Merge pull request #21004 from alexrp/linux-6.10

Linux 6.10 headers/syscalls
This commit is contained in:
Andrew Kelley 2024-08-09 13:08:39 -07:00 committed by GitHub
commit 71a27ebd84
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
309 changed files with 17431 additions and 3155 deletions

View File

@ -102,5 +102,25 @@
#define HWCAP2_SME_BI32I32 (1UL << 40)
#define HWCAP2_SME_B16B16 (1UL << 41)
#define HWCAP2_SME_F16F16 (1UL << 42)
#define HWCAP2_MOPS (1UL << 43)
#define HWCAP2_HBC (1UL << 44)
#define HWCAP2_SVE_B16B16 (1UL << 45)
#define HWCAP2_LRCPC3 (1UL << 46)
#define HWCAP2_LSE128 (1UL << 47)
#define HWCAP2_FPMR (1UL << 48)
#define HWCAP2_LUT (1UL << 49)
#define HWCAP2_FAMINMAX (1UL << 50)
#define HWCAP2_F8CVT (1UL << 51)
#define HWCAP2_F8FMA (1UL << 52)
#define HWCAP2_F8DP4 (1UL << 53)
#define HWCAP2_F8DP2 (1UL << 54)
#define HWCAP2_F8E4M3 (1UL << 55)
#define HWCAP2_F8E5M2 (1UL << 56)
#define HWCAP2_SME_LUTV2 (1UL << 57)
#define HWCAP2_SME_F8F16 (1UL << 58)
#define HWCAP2_SME_F8F32 (1UL << 59)
#define HWCAP2_SME_SF8FMA (1UL << 60)
#define HWCAP2_SME_SF8DP4 (1UL << 61)
#define HWCAP2_SME_SF8DP2 (1UL << 62)
#endif /* __ASM_HWCAP_H */

View File

@ -37,9 +37,7 @@
#include <asm/ptrace.h>
#include <asm/sve_context.h>
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@ -76,11 +74,11 @@ struct kvm_regs {
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_TYPE_MASK __GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_ID_SHIFT 16
#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
KVM_ARM_DEVICE_ID_SHIFT)
#define KVM_ARM_DEVICE_ID_MASK __GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
KVM_ARM_DEVICE_ID_SHIFT)
/* Supported device IDs */
#define KVM_ARM_DEVICE_VGIC_V2 0
@ -162,6 +160,11 @@ struct kvm_sync_regs {
__u64 device_irq_level;
};
/* Bits for run->s.regs.device_irq_level */
#define KVM_ARM_DEV_EL1_VTIMER (1 << 0)
#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
#define KVM_ARM_DEV_PMU (1 << 2)
/*
* PMU filter structure. Describe a range of events with a particular
* action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER.
@ -198,6 +201,15 @@ struct kvm_arm_copy_mte_tags {
__u64 reserved[2];
};
/*
* Counter/Timer offset structure. Describe the virtual/physical offset.
* To be used with KVM_ARM_SET_COUNTER_OFFSET.
*/
struct kvm_arm_counter_offset {
__u64 counter_offset;
__u64 reserved;
};
#define KVM_ARM_TAGS_TO_GUEST 0
#define KVM_ARM_TAGS_FROM_GUEST 1
@ -363,6 +375,10 @@ enum {
KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1,
};
/* Device Control API on vm fd */
#define KVM_ARM_VM_SMCCC_CTRL 0
#define KVM_ARM_VM_SMCCC_FILTER 0
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
@ -402,6 +418,8 @@ enum {
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
#define KVM_ARM_VCPU_TIMER_IRQ_HVTIMER 2
#define KVM_ARM_VCPU_TIMER_IRQ_HPTIMER 3
#define KVM_ARM_VCPU_PVTIME_CTRL 2
#define KVM_ARM_VCPU_PVTIME_IPA 0
@ -458,6 +476,56 @@ enum {
/* run->fail_entry.hardware_entry_failure_reason codes. */
#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0)
enum kvm_smccc_filter_action {
KVM_SMCCC_FILTER_HANDLE = 0,
KVM_SMCCC_FILTER_DENY,
KVM_SMCCC_FILTER_FWD_TO_USER,
};
struct kvm_smccc_filter {
__u32 base;
__u32 nr_functions;
__u8 action;
__u8 pad[15];
};
/* arm64-specific KVM_EXIT_HYPERCALL flags */
#define KVM_HYPERCALL_EXIT_SMC (1U << 0)
#define KVM_HYPERCALL_EXIT_16BIT (1U << 1)
/*
* Get feature ID registers userspace writable mask.
*
* From DDI0487J.a, D19.2.66 ("ID_AA64MMFR2_EL1, AArch64 Memory Model
* Feature Register 2"):
*
* "The Feature ID space is defined as the System register space in
* AArch64 with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7},
* op2=={0-7}."
*
* This covers all currently known R/O registers that indicate
* anything useful feature wise, including the ID registers.
*
* If we ever need to introduce a new range, it will be described as
* such in the range field.
*/
#define KVM_ARM_FEATURE_ID_RANGE_IDX(op0, op1, crn, crm, op2) \
({ \
__u64 __op1 = (op1) & 3; \
__op1 -= (__op1 == 3); \
(__op1 << 6 | ((crm) & 7) << 3 | (op2)); \
})
#define KVM_ARM_FEATURE_ID_RANGE 0
#define KVM_ARM_FEATURE_ID_RANGE_SIZE (3 * 8 * 8)
struct reg_mask_range {
__u64 addr; /* Pointer to mask array */
__u32 range; /* Requested range */
__u32 reserved[13];
};
#endif
#endif /* __ARM_KVM_H__ */

View File

@ -152,6 +152,14 @@ struct tpidr2_context {
__u64 tpidr2;
};
/* FPMR context */
#define FPMR_MAGIC 0x46504d52
struct fpmr_context {
struct _aarch64_ctx head;
__u64 fpmr;
};
#define ZA_MAGIC 0x54366345
struct za_context {
@ -177,7 +185,7 @@ struct zt_context {
* vector length beyond its initial architectural limit of 2048 bits
* (16 quadwords).
*
* See linux/Documentation/arm64/sve.rst for a description of the VL/VQ
* See linux/Documentation/arch/arm64/sve.rst for a description of the VL/VQ
* terminology.
*/
#define SVE_VQ_BYTES __SVE_VQ_BYTES /* bytes per quadword */

View File

@ -13,6 +13,17 @@
#define __SVE_VQ_BYTES 16 /* number of bytes per quadword */
/*
* Yes, __SVE_VQ_MAX is 512 QUADWORDS.
*
* To help ensure forward portability, this is much larger than the
* current maximum value defined by the SVE architecture. While arrays
* or static allocations can be sized based on this value, watch out!
* It will waste a surprisingly large amount of memory.
*
* Dynamic sizing based on the actual runtime vector length is likely to
* be preferable for most purposes.
*/
#define __SVE_VQ_MIN 1
#define __SVE_VQ_MAX 512

View File

@ -2,6 +2,17 @@
#ifndef __ASM_GENERIC_BITS_PER_LONG
#define __ASM_GENERIC_BITS_PER_LONG
#ifndef __BITS_PER_LONG
/*
* In order to keep safe and avoid regression, only unify uapi
* bitsperlong.h for some archs which are using newer toolchains
* that have the definitions of __CHAR_BIT__ and __SIZEOF_LONG__.
* See the following link for more info:
* https://lore.kernel.org/linux-arch/b9624545-2c80-49a1-ac3c-39264a591f7b@app.fastmail.com/
*/
#if defined(__CHAR_BIT__) && defined(__SIZEOF_LONG__)
#define __BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
#else
/*
* There seems to be no way of detecting this automatically from user
* space, so 64 bit architectures should override this in their
@ -9,8 +20,12 @@
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
* to decide it, but rather check a compiler provided macro.
*/
#ifndef __BITS_PER_LONG
#define __BITS_PER_LONG 32
#endif
#endif
#ifndef __BITS_PER_LONG_LONG
#define __BITS_PER_LONG_LONG 64
#endif
#endif /* __ASM_GENERIC_BITS_PER_LONG */

View File

@ -68,11 +68,6 @@ union __sifields {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
void *_addr; /* faulting insn/memory ref. */
#ifdef __ia64__
int _imm; /* immediate value for "break" */
unsigned int _flags; /* see ia64 si_flags */
unsigned long _isr; /* isr */
#endif
#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \
sizeof(short) : __alignof__(void *))
@ -242,7 +237,8 @@ typedef struct siginfo {
#define SEGV_ADIPERR 7 /* Precise MCD exception */
#define SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
#define SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
#define NSIGSEGV 9
#define SEGV_CPERR 10 /* Control protection fault */
#define NSIGSEGV 10
/*
* SIGBUS si_codes

View File

@ -132,6 +132,9 @@
#define SO_RCVMARK 75
#define SO_PASSPIDFD 76
#define SO_PEERPIDFD 77
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
/* on 64-bit and x32, avoid the ?: operator */

View File

@ -38,12 +38,12 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 3
__SYSCALL(__NR_io_cancel, sys_io_cancel)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_getevents 4
__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
#endif
/* fs/xattr.c */
#define __NR_setxattr 5
__SYSCALL(__NR_setxattr, sys_setxattr)
#define __NR_lsetxattr 6
@ -68,58 +68,38 @@ __SYSCALL(__NR_removexattr, sys_removexattr)
__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
#define __NR_fremovexattr 16
__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
/* fs/dcache.c */
#define __NR_getcwd 17
__SYSCALL(__NR_getcwd, sys_getcwd)
/* fs/cookies.c */
#define __NR_lookup_dcookie 18
__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
/* fs/eventfd.c */
__SYSCALL(__NR_lookup_dcookie, sys_ni_syscall)
#define __NR_eventfd2 19
__SYSCALL(__NR_eventfd2, sys_eventfd2)
/* fs/eventpoll.c */
#define __NR_epoll_create1 20
__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
#define __NR_epoll_ctl 21
__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
#define __NR_epoll_pwait 22
__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
/* fs/fcntl.c */
#define __NR_dup 23
__SYSCALL(__NR_dup, sys_dup)
#define __NR_dup3 24
__SYSCALL(__NR_dup3, sys_dup3)
#define __NR3264_fcntl 25
__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
/* fs/inotify_user.c */
#define __NR_inotify_init1 26
__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
#define __NR_inotify_add_watch 27
__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
#define __NR_inotify_rm_watch 28
__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
/* fs/ioctl.c */
#define __NR_ioctl 29
__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
/* fs/ioprio.c */
#define __NR_ioprio_set 30
__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
#define __NR_ioprio_get 31
__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
/* fs/locks.c */
#define __NR_flock 32
__SYSCALL(__NR_flock, sys_flock)
/* fs/namei.c */
#define __NR_mknodat 33
__SYSCALL(__NR_mknodat, sys_mknodat)
#define __NR_mkdirat 34
@ -130,25 +110,21 @@ __SYSCALL(__NR_unlinkat, sys_unlinkat)
__SYSCALL(__NR_symlinkat, sys_symlinkat)
#define __NR_linkat 37
__SYSCALL(__NR_linkat, sys_linkat)
#ifdef __ARCH_WANT_RENAMEAT
/* renameat is superseded with flags by renameat2 */
#define __NR_renameat 38
__SYSCALL(__NR_renameat, sys_renameat)
#endif /* __ARCH_WANT_RENAMEAT */
/* fs/namespace.c */
#define __NR_umount2 39
__SYSCALL(__NR_umount2, sys_umount)
#define __NR_mount 40
__SYSCALL(__NR_mount, sys_mount)
#define __NR_pivot_root 41
__SYSCALL(__NR_pivot_root, sys_pivot_root)
/* fs/nfsctl.c */
#define __NR_nfsservctl 42
__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
/* fs/open.c */
#define __NR3264_statfs 43
__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
compat_sys_statfs64)
@ -161,7 +137,6 @@ __SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
#define __NR3264_ftruncate 46
__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
compat_sys_ftruncate64)
#define __NR_fallocate 47
__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
#define __NR_faccessat 48
@ -186,20 +161,12 @@ __SYSCALL(__NR_openat, sys_openat)
__SYSCALL(__NR_close, sys_close)
#define __NR_vhangup 58
__SYSCALL(__NR_vhangup, sys_vhangup)
/* fs/pipe.c */
#define __NR_pipe2 59
__SYSCALL(__NR_pipe2, sys_pipe2)
/* fs/quota.c */
#define __NR_quotactl 60
__SYSCALL(__NR_quotactl, sys_quotactl)
/* fs/readdir.c */
#define __NR_getdents64 61
__SYSCALL(__NR_getdents64, sys_getdents64)
/* fs/read_write.c */
#define __NR3264_lseek 62
__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
#define __NR_read 63
@ -218,12 +185,9 @@ __SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
#define __NR_pwritev 70
__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
/* fs/sendfile.c */
#define __NR3264_sendfile 71
__SYSCALL(__NR3264_sendfile, sys_sendfile64)
/* fs/select.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_pselect6 72
__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
@ -231,21 +195,17 @@ __SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_psel
__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
#endif
/* fs/signalfd.c */
#define __NR_signalfd4 74
__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
/* fs/splice.c */
#define __NR_vmsplice 75
__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_splice 76
__SYSCALL(__NR_splice, sys_splice)
#define __NR_tee 77
__SYSCALL(__NR_tee, sys_tee)
/* fs/stat.c */
#define __NR_readlinkat 78
__SYSCALL(__NR_readlinkat, sys_readlinkat)
#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR3264_fstatat 79
__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
@ -253,13 +213,13 @@ __SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
#endif
/* fs/sync.c */
#define __NR_sync 81
__SYSCALL(__NR_sync, sys_sync)
#define __NR_fsync 82
__SYSCALL(__NR_fsync, sys_fsync)
#define __NR_fdatasync 83
__SYSCALL(__NR_fdatasync, sys_fdatasync)
#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
#define __NR_sync_file_range2 84
__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
@ -270,9 +230,9 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
compat_sys_sync_file_range)
#endif
/* fs/timerfd.c */
#define __NR_timerfd_create 85
__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timerfd_settime 86
__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
@ -282,45 +242,35 @@ __SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
sys_timerfd_gettime)
#endif
/* fs/utimes.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_utimensat 88
__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
#endif
/* kernel/acct.c */
#define __NR_acct 89
__SYSCALL(__NR_acct, sys_acct)
/* kernel/capability.c */
#define __NR_capget 90
__SYSCALL(__NR_capget, sys_capget)
#define __NR_capset 91
__SYSCALL(__NR_capset, sys_capset)
/* kernel/exec_domain.c */
#define __NR_personality 92
__SYSCALL(__NR_personality, sys_personality)
/* kernel/exit.c */
#define __NR_exit 93
__SYSCALL(__NR_exit, sys_exit)
#define __NR_exit_group 94
__SYSCALL(__NR_exit_group, sys_exit_group)
#define __NR_waitid 95
__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
/* kernel/fork.c */
#define __NR_set_tid_address 96
__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
#define __NR_unshare 97
__SYSCALL(__NR_unshare, sys_unshare)
/* kernel/futex.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_futex 98
__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
#endif
#define __NR_set_robust_list 99
__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
compat_sys_set_robust_list)
@ -328,43 +278,40 @@ __SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
compat_sys_get_robust_list)
/* kernel/hrtimer.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_nanosleep 101
__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
#endif
/* kernel/itimer.c */
#define __NR_getitimer 102
__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
#define __NR_setitimer 103
__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
/* kernel/kexec.c */
#define __NR_kexec_load 104
__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
/* kernel/module.c */
#define __NR_init_module 105
__SYSCALL(__NR_init_module, sys_init_module)
#define __NR_delete_module 106
__SYSCALL(__NR_delete_module, sys_delete_module)
/* kernel/posix-timers.c */
#define __NR_timer_create 107
__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_gettime 108
__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
#endif
#define __NR_timer_getoverrun 109
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_settime 110
__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
#endif
#define __NR_timer_delete 111
__SYSCALL(__NR_timer_delete, sys_timer_delete)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_settime 112
__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
@ -377,15 +324,10 @@ __SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
sys_clock_nanosleep)
#endif
/* kernel/printk.c */
#define __NR_syslog 116
__SYSCALL(__NR_syslog, sys_syslog)
/* kernel/ptrace.c */
#define __NR_ptrace 117
__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
/* kernel/sched/core.c */
#define __NR_sched_setparam 118
__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
#define __NR_sched_setscheduler 119
@ -406,13 +348,13 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 126
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_sched_rr_get_interval 127
__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
sys_sched_rr_get_interval)
#endif
/* kernel/signal.c */
#define __NR_restart_syscall 128
__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
#define __NR_kill 129
@ -431,18 +373,18 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 136
__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_rt_sigtimedwait 137
__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
#endif
#define __NR_rt_sigqueueinfo 138
__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
compat_sys_rt_sigqueueinfo)
#define __NR_rt_sigreturn 139
__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
/* kernel/sys.c */
#define __NR_setpriority 140
__SYSCALL(__NR_setpriority, sys_setpriority)
#define __NR_getpriority 141
@ -507,7 +449,6 @@ __SYSCALL(__NR_prctl, sys_prctl)
#define __NR_getcpu 168
__SYSCALL(__NR_getcpu, sys_getcpu)
/* kernel/time.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_gettimeofday 169
__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
@ -517,7 +458,6 @@ __SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
#endif
/* kernel/sys.c */
#define __NR_getpid 172
__SYSCALL(__NR_getpid, sys_getpid)
#define __NR_getppid 173
@ -534,12 +474,11 @@ __SYSCALL(__NR_getegid, sys_getegid)
__SYSCALL(__NR_gettid, sys_gettid)
#define __NR_sysinfo 179
__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
/* ipc/mqueue.c */
#define __NR_mq_open 180
__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 181
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_mq_timedsend 182
__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
@ -547,12 +486,11 @@ __SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
sys_mq_timedreceive)
#endif
#define __NR_mq_notify 184
__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 185
__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
/* ipc/msg.c */
#define __NR_msgget 186
__SYSCALL(__NR_msgget, sys_msgget)
#define __NR_msgctl 187
@ -561,20 +499,18 @@ __SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
#define __NR_msgsnd 189
__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
/* ipc/sem.c */
#define __NR_semget 190
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 191
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
#endif
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
/* ipc/shm.c */
#define __NR_shmget 194
__SYSCALL(__NR_shmget, sys_shmget)
#define __NR_shmctl 195
@ -583,8 +519,6 @@ __SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
#define __NR_shmdt 197
__SYSCALL(__NR_shmdt, sys_shmdt)
/* net/socket.c */
#define __NR_socket 198
__SYSCALL(__NR_socket, sys_socket)
#define __NR_socketpair 199
@ -615,40 +549,30 @@ __SYSCALL(__NR_shutdown, sys_shutdown)
__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
#define __NR_recvmsg 212
__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
/* mm/filemap.c */
#define __NR_readahead 213
__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
/* mm/nommu.c, also with MMU */
#define __NR_brk 214
__SYSCALL(__NR_brk, sys_brk)
#define __NR_munmap 215
__SYSCALL(__NR_munmap, sys_munmap)
#define __NR_mremap 216
__SYSCALL(__NR_mremap, sys_mremap)
/* security/keys/keyctl.c */
#define __NR_add_key 217
__SYSCALL(__NR_add_key, sys_add_key)
#define __NR_request_key 218
__SYSCALL(__NR_request_key, sys_request_key)
#define __NR_keyctl 219
__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
/* arch/example/kernel/sys_example.c */
#define __NR_clone 220
__SYSCALL(__NR_clone, sys_clone)
#define __NR_execve 221
__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
#define __NR3264_mmap 222
__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
/* mm/fadvise.c */
#define __NR3264_fadvise64 223
__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
/* mm/, CONFIG_MMU only */
/* CONFIG_MMU only */
#ifndef __ARCH_NOMMU
#define __NR_swapon 224
__SYSCALL(__NR_swapon, sys_swapon)
@ -691,6 +615,7 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_accept4 242
__SYSCALL(__NR_accept4, sys_accept4)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_recvmmsg 243
__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
@ -706,6 +631,7 @@ __SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recv
#define __NR_wait4 260
__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
#endif
#define __NR_prlimit64 261
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#define __NR_fanotify_init 262
@ -716,10 +642,12 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 265
__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_adjtime 266
__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
#endif
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_setns 268
@ -770,15 +698,19 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_pgetevents 292
__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
#endif
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
@ -805,7 +737,7 @@ __SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
#define __NR_ppoll_time64 414
__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
#define __NR_io_pgetevents_time64 416
__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
__SC_COMP(__NR_io_pgetevents_time64, sys_io_pgetevents, compat_sys_io_pgetevents_time64)
#define __NR_recvmmsg_time64 417
__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
#define __NR_mq_timedsend_time64 418
@ -844,13 +776,14 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
#endif
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
#define __NR_openat2 437
__SYSCALL(__NR_openat2, sys_openat2)
#define __NR_pidfd_getfd 438
@ -865,7 +798,6 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
#define __NR_quotactl_fd 443
__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
#define __NR_landlock_add_rule 445
@ -877,17 +809,44 @@ __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
#define __NR_memfd_secret 447
__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
#endif
#define __NR_process_mrelease 448
__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
#define __NR_futex_waitv 449
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
#define __NR_set_mempolicy_home_node 450
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
#define __NR_cachestat 451
__SYSCALL(__NR_cachestat, sys_cachestat)
#define __NR_fchmodat2 452
__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
#define __NR_map_shadow_stack 453
__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack)
#define __NR_futex_wake 454
__SYSCALL(__NR_futex_wake, sys_futex_wake)
#define __NR_futex_wait 455
__SYSCALL(__NR_futex_wait, sys_futex_wait)
#define __NR_futex_requeue 456
__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
#define __NR_statmount 457
__SYSCALL(__NR_statmount, sys_statmount)
#define __NR_listmount 458
__SYSCALL(__NR_listmount, sys_listmount)
#define __NR_lsm_get_self_attr 459
__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
#define __NR_lsm_set_self_attr 460
__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
#define __NR_lsm_list_modules 461
__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
#define __NR_mseal 462
__SYSCALL(__NR_mseal, sys_mseal)
#undef __NR_syscalls
#define __NR_syscalls 451
#define __NR_syscalls 463
/*
* 32 bit systems traditionally used different

View File

@ -94,6 +94,9 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
* for appending data.
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@ -101,12 +104,14 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
AMDGPU_GEM_DOMAIN_OA)
AMDGPU_GEM_DOMAIN_OA | \
AMDGPU_GEM_DOMAIN_DOORBELL)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@ -145,7 +150,7 @@ extern "C" {
*/
#define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12)
/* Flag that BO is shared coherently between multiple devices or CPU threads.
* May depend on GPU instructions to flush caches explicitly
* May depend on GPU instructions to flush caches to system scope explicitly.
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
@ -158,6 +163,14 @@ extern "C" {
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_UNCACHED (1 << 14)
/* Flag that BO should be coherent across devices when using device-level
* atomics. May depend on GPU instructions to flush caches to device scope
* explicitly, promoting them to system scope automatically.
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@ -236,15 +249,17 @@ union drm_amdgpu_bo_list {
/* unknown cause */
#define AMDGPU_CTX_UNKNOWN_RESET 3
/* indicate gpu reset occured after ctx created */
/* indicate gpu reset occurred after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
/* indicate vram lost occured after ctx created */
/* indicate vram lost occurred after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
/* indicate some job from this context once cause gpu hang */
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
/* indicate some errors are detected by RAS */
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
/* indicate that the reset hasn't completed yet */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5)
/* Context priority level */
#define AMDGPU_CTX_PRIORITY_UNSET -2048
@ -579,7 +594,8 @@ struct drm_amdgpu_gem_va {
*/
#define AMDGPU_HW_IP_VCN_ENC 7
#define AMDGPU_HW_IP_VCN_JPEG 8
#define AMDGPU_HW_IP_NUM 9
#define AMDGPU_HW_IP_VPE 9
#define AMDGPU_HW_IP_NUM 10
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@ -592,6 +608,7 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@ -708,6 +725,15 @@ struct drm_amdgpu_cs_chunk_data {
};
};
#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1
struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
__u64 shadow_va;
__u64 csa_va;
__u64 gds_va;
__u64 flags;
};
/*
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
@ -780,6 +806,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_MES 0x1a
/* Subquery id: Query IMU firmware version */
#define AMDGPU_INFO_FW_IMU 0x1b
/* Subquery id: Query VPE firmware version */
#define AMDGPU_INFO_FW_VPE 0x1c
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
@ -837,6 +865,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK 0xa
/* Subquery id: Query GPU peak pstate memory clock */
#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK 0xb
/* Subquery id: Query input GPU power */
#define AMDGPU_INFO_SENSOR_GPU_INPUT_POWER 0xc
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
@ -876,6 +906,10 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
/* Subquery id: Encode */
#define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
/* Query the max number of IBs per gang per submission */
#define AMDGPU_INFO_MAX_IBS 0x22
/* query last page fault info */
#define AMDGPU_INFO_GPUVM_FAULT 0x23
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@ -1126,6 +1160,14 @@ struct drm_amdgpu_info_device {
__u64 mall_size; /* AKA infinity cache */
/* high 32 bits of the rb pipes mask */
__u32 enabled_rb_pipes_mask_hi;
/* shadow area size for gfx11 */
__u32 shadow_size;
/* shadow area base virtual alignment for gfx11 */
__u32 shadow_alignment;
/* context save area size for gfx11 */
__u32 csa_size;
/* context save area base virtual alignment for gfx11 */
__u32 csa_alignment;
};
struct drm_amdgpu_info_hw_ip {
@ -1193,6 +1235,20 @@ struct drm_amdgpu_info_video_caps {
struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
};
#define AMDGPU_VMHUB_TYPE_MASK 0xff
#define AMDGPU_VMHUB_TYPE_SHIFT 0
#define AMDGPU_VMHUB_TYPE_GFX 0
#define AMDGPU_VMHUB_TYPE_MM0 1
#define AMDGPU_VMHUB_TYPE_MM1 2
#define AMDGPU_VMHUB_IDX_MASK 0xff00
#define AMDGPU_VMHUB_IDX_SHIFT 8
struct drm_amdgpu_info_gpuvm_fault {
__u64 addr;
__u32 status;
__u32 vmhub;
};
/*
* Supported GPU families
*/
@ -1211,6 +1267,7 @@ struct drm_amdgpu_info_video_caps {
#define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */
#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */
#if defined(__cplusplus)
}

View File

@ -667,8 +667,11 @@ struct drm_gem_open {
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
* and &DRM_PRIME_CAP_EXPORT.
*
* PRIME buffers are exposed as dma-buf file descriptors. See
* Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
* Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
* &DRM_PRIME_CAP_EXPORT are always advertised.
*
* PRIME buffers are exposed as dma-buf file descriptors.
* See :ref:`prime_buffer_sharing`.
*/
#define DRM_CAP_PRIME 0x5
/**
@ -676,6 +679,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
*
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_IMPORT 0x1
/**
@ -683,6 +688,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
*
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_EXPORT 0x2
/**
@ -700,7 +707,8 @@ struct drm_gem_open {
/**
* DRM_CAP_ASYNC_PAGE_FLIP
*
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
* page-flips.
*/
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/**
@ -750,17 +758,23 @@ struct drm_gem_open {
/**
* DRM_CAP_SYNCOBJ
*
* If set to 1, the driver supports sync objects. See
* Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
* If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ 0x13
/**
* DRM_CAP_SYNCOBJ_TIMELINE
*
* If set to 1, the driver supports timeline operations on sync objects. See
* Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
* :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
/**
* DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
*
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
* commits.
*/
#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@ -830,6 +844,31 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
/**
* DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
*
* Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
* virtualbox) have additional restrictions for cursor planes (thus
* making cursor planes on those drivers not truly universal,) e.g.
* they need cursor planes to act like one would expect from a mouse
* cursor and have correctly set hotspot properties.
* If this client cap is not set the DRM core will hide cursor plane on
* those virtualized drivers because not setting it implies that the
* client is not capable of dealing with those extra restictions.
* Clients which do set cursor hotspot and treat the cursor plane
* like a mouse cursor should set this property.
* The client must enable &DRM_CLIENT_CAP_ATOMIC first.
*
* Setting this property on drivers which do not special case
* cursor planes (i.e. non-virtualized drivers) will return
* EOPNOTSUPP, which can be used by userspace to gauge
* requirements of the hardware/drivers they're running on.
*
* This capability is always supported for atomic-capable virtualized
* drivers starting from kernel version 6.6.
*/
#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@ -881,6 +920,7 @@ struct drm_syncobj_transfer {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@ -889,6 +929,14 @@ struct drm_syncobj_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
/**
* @deadline_nsec - fence deadline hint
*
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
* set.
*/
__u64 deadline_nsec;
};
struct drm_syncobj_timeline_wait {
@ -901,6 +949,35 @@ struct drm_syncobj_timeline_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
/**
* @deadline_nsec - fence deadline hint
*
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
* set.
*/
__u64 deadline_nsec;
};
/**
* struct drm_syncobj_eventfd
* @handle: syncobj handle.
* @flags: Zero to wait for the point to be signalled, or
* &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
* available for the point.
* @point: syncobj timeline point (set to zero for binary syncobjs).
* @fd: Existing eventfd to sent events to.
* @pad: Must be zero.
*
* Register an eventfd to be signalled by a syncobj. The eventfd counter will
* be incremented by one.
*/
struct drm_syncobj_eventfd {
__u32 handle;
__u32 flags;
__u64 point;
__s32 fd;
__u32 pad;
};
@ -966,6 +1043,19 @@ extern "C" {
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
/**
* DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
*
* GEM handles are not reference-counted by the kernel. User-space is
* responsible for managing their lifetime. For example, if user-space imports
* the same memory object twice on the same DRM file description, the same GEM
* handle is returned by both imports, and user-space needs to ensure
* &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
* when a memory object is allocated, then exported and imported again on the
* same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
* and always returns fresh new GEM handles even if an existing GEM handle
* already refers to the same memory object before the IOCTL is performed.
*/
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
@ -1006,7 +1096,37 @@ extern "C" {
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
/**
* DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
*
* User-space sets &drm_prime_handle.handle with the GEM handle to export and
* &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
* &drm_prime_handle.fd.
*
* The export can fail for any driver-specific reason, e.g. because export is
* not supported for this specific GEM handle (but might be for others).
*
* Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
*/
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
/**
* DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
*
* User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
* import, and gets back a GEM handle in &drm_prime_handle.handle.
* &drm_prime_handle.flags is unused.
*
* If an existing GEM handle refers to the memory object backing the DMA-BUF,
* that GEM handle is returned. Therefore user-space which needs to handle
* arbitrary DMA-BUFs must have a user-space lookup data structure to manually
* reference-count duplicated GEM handles. For more information see
* &DRM_IOCTL_GEM_CLOSE.
*
* The import can fail for any driver-specific reason, e.g. because import is
* only supported for DMA-BUFs allocated on this DRM device.
*
* Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
*/
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
@ -1058,6 +1178,26 @@ extern "C" {
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
/**
* DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
*
* KMS dumb buffers provide a very primitive way to allocate a buffer object
* suitable for scanout and map it for software rendering. KMS dumb buffers are
* not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
* buffers are not suitable to be displayed on any other device than the KMS
* device where they were allocated from. Also see
* :ref:`kms_dumb_buffer_objects`.
*
* The IOCTL argument is a struct drm_mode_create_dumb.
*
* User-space is expected to create a KMS dumb buffer via this IOCTL, then add
* it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
* &DRM_IOCTL_MODE_MAP_DUMB.
*
* &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
* &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
* driver preferences for dumb buffers.
*/
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
@ -1098,8 +1238,13 @@ extern "C" {
* struct as the output.
*
* If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
* will be filled with GEM buffer handles. Planes are valid until one has a
* zero handle -- this can be used to compute the number of planes.
* will be filled with GEM buffer handles. Fresh new GEM handles are always
* returned, even if another GEM handle referring to the same memory object
* already exists on the DRM file description. The caller is responsible for
* removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
* new handle will be returned for multiple planes in case they use the same
* memory object. Planes are valid until one has a zero handle -- this can be
* used to compute the number of planes.
*
* Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
* until one has a zero &drm_mode_fb_cmd2.pitches.
@ -1107,9 +1252,36 @@ extern "C" {
* If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
* in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
* modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
*
* To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
* can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
* close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
* double-close handles which are specified multiple times in the array.
*/
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
/**
* DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
*
* This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
* argument is a framebuffer object ID.
*
* This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
* planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
* alive. When the plane no longer uses the framebuffer (because the
* framebuffer is replaced with another one, or the plane is disabled), the
* framebuffer is cleaned up.
*
* This is useful to implement flicker-free transitions between two processes.
*
* Depending on the threat model, user-space may want to ensure that the
* framebuffer doesn't expose any sensitive user information: closed
* framebuffers attached to a plane can be read back by the next DRM master.
*/
#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
@ -1121,25 +1293,50 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
/*
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
* typically a 64 bit value passed with the ioctl that triggered the
* event. A read on the drm fd will always only return complete
* events, that is, if for example the read buffer is 100 bytes, and
* there are two 64 byte events pending, only one will be returned.
/**
* struct drm_event - Header for DRM events
* @type: event type.
* @length: total number of payload bytes (including header).
*
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
* up are chipset specific.
* This struct is a header for events written back to user-space on the DRM FD.
* A read on the DRM FD will always only return complete events: e.g. if the
* read buffer is 100 bytes large and there are two 64 byte events pending,
* only one will be returned.
*
* Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
* up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
* &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
*/
struct drm_event {
__u32 type;
__u32 length;
};
/**
* DRM_EVENT_VBLANK - vertical blanking event
*
* This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
* &_DRM_VBLANK_EVENT flag set.
*
* The event payload is a struct drm_event_vblank.
*/
#define DRM_EVENT_VBLANK 0x01
/**
* DRM_EVENT_FLIP_COMPLETE - page-flip completion event
*
* This event is sent in response to an atomic commit or legacy page-flip with
* the &DRM_MODE_PAGE_FLIP_EVENT flag set.
*
* The event payload is a struct drm_event_vblank.
*/
#define DRM_EVENT_FLIP_COMPLETE 0x02
/**
* DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
*
* This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
*
* The event payload is a struct drm_event_crtc_sequence.
*/
#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {

View File

@ -54,7 +54,7 @@ extern "C" {
* Format modifiers may change any property of the buffer, including the number
* of planes and/or the required allocation size. Format modifiers are
* vendor-namespaced, and as such the relationship between a fourcc code and a
* modifier is specific to the modifer being used. For example, some modifiers
* modifier is specific to the modifier being used. For example, some modifiers
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
@ -79,7 +79,7 @@ extern "C" {
* format.
* - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
* see modifiers as opaque tokens they can check for equality and intersect.
* These users musn't need to know to reason about the modifier value
* These users mustn't need to know to reason about the modifier value
* (i.e. they are not expected to extract information out of the modifier).
*
* Vendors should document their modifier usage in as much detail as
@ -323,6 +323,8 @@ extern "C" {
* index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
*/
#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned
@ -538,7 +540,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
* Each group therefore consits out of four 256 byte units, which are also laid
* Each group therefore consists out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@ -657,6 +659,49 @@ extern "C" {
*/
#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
/*
* Intel Color Control Surfaces (CCS) for display ver. 14 render compression.
*
* The main surface is tile4 and at plane index 0, the CCS is linear and
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
* main surface. In other words, 4 bits in CCS map to a main surface cache
* line pair. The main surface pitch is required to be a multiple of four
* tile4 widths.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13)
/*
* Intel Color Control Surfaces (CCS) for display ver. 14 media compression
*
* The main surface is tile4 and at plane index 0, the CCS is linear and
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
* main surface. In other words, 4 bits in CCS map to a main surface cache
* line pair. The main surface pitch is required to be a multiple of four
* tile4 widths. For semi-planar formats like NV12, CCS planes follow the
* Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
* planes 2 and 3 for the respective CCS.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14)
/*
* Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render
* compression.
*
* The main surface is tile4 and is at plane index 0 whereas CCS is linear
* and at index 1. The clear color is stored at index 2, and the pitch should
* be ignored. The clear color structure is 256 bits. The first 128 bits
* represents Raw Clear Color Red, Green, Blue and Alpha color each represented
* by 32 bits. The raw clear color is consumed by the 3d engine and generates
* the converted clear color of size 64 bits. The first 32 bits store the Lower
* Converted Clear Color value and the next 32 bits store the Higher Converted
* Clear Color value when applicable. The Converted Clear Color values are
* consumed by the DE. The last 64 bits are used to store Color Discard Enable
* and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
* corresponds to an area of 4x1 tiles in the main surface. The main surface
* pitch is required to be a multiple of 4 tile widths.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
@ -1058,7 +1103,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
/*
* The top 4 bits (out of the 56 bits alloted for specifying vendor specific
* The top 4 bits (out of the 56 bits allotted for specifying vendor specific
* modifiers) denote the category for modifiers. Currently we have three
* categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
* sixteen different categories.
@ -1374,7 +1419,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Amlogic FBC Memory Saving mode
*
* Indicates the storage is packed when pixel size is multiple of word
* boudaries, i.e. 8bit should be stored in this mode to save allocation
* boundaries, i.e. 8bit should be stored in this mode to save allocation
* memory.
*
* This mode reduces body layout to 3072 bytes per 64x32 superblock with

View File

@ -36,10 +36,10 @@ extern "C" {
/**
* DOC: overview
*
* DRM exposes many UAPI and structure definition to have a consistent
* and standardized interface with user.
* DRM exposes many UAPI and structure definitions to have a consistent
* and standardized interface with users.
* Userspace can refer to these structure definitions and UAPI formats
* to communicate to driver
* to communicate to drivers.
*/
#define DRM_CONNECTOR_NAME_LEN 32
@ -488,6 +488,9 @@ struct drm_mode_get_connector {
* This is not an object ID. This is a per-type connector number. Each
* (type, type_id) combination is unique across all connectors of a DRM
* device.
*
* The (type, type_id) combination is not a stable identifier: the
* type_id can change depending on the driver probe order.
*/
__u32 connector_type_id;
@ -537,7 +540,7 @@ struct drm_mode_get_connector {
/* the PROP_ATOMIC flag is used to hide properties from userspace that
* is not aware of atomic properties. This is mostly to work around
* older userspace (DDX drivers) that read/write each prop they find,
* witout being aware that this could be triggering a lengthy modeset.
* without being aware that this could be triggering a lengthy modeset.
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
@ -661,7 +664,7 @@ struct drm_mode_fb_cmd {
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */
/**
* struct drm_mode_fb_cmd2 - Frame-buffer metadata.
@ -834,10 +837,23 @@ struct drm_color_ctm {
/*
* Conversion matrix in S31.32 sign-magnitude
* (not two's complement!) format.
*
* out matrix in
* |R| |0 1 2| |R|
* |G| = |3 4 5| x |G|
* |B| |6 7 8| |B|
*/
__u64 matrix[9];
};
struct drm_color_ctm_3x4 {
/*
* Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude
* (not two's complement!) format.
*/
__u64 matrix[12];
};
struct drm_color_lut {
/*
* Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
@ -849,6 +865,17 @@ struct drm_color_lut {
__u16 reserved;
};
/**
* struct drm_plane_size_hint - Plane size hints
*
* The plane SIZE_HINTS property blob contains an
* array of struct drm_plane_size_hint.
*/
struct drm_plane_size_hint {
__u16 width;
__u16 height;
};
/**
* struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
*
@ -873,23 +900,23 @@ struct hdr_metadata_infoframe {
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @display_primaries.x: X cordinate of color primary.
* @display_primaries.y: Y cordinate of color primary.
* @display_primaries.x: X coordinate of color primary.
* @display_primaries.y: Y coordinate of color primary.
*/
struct {
__u16 x, y;
} display_primaries[3];
} display_primaries[3];
/**
* @white_point: White Point of Colorspace Data.
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @white_point.x: X cordinate of whitepoint of color primary.
* @white_point.y: Y cordinate of whitepoint of color primary.
* @white_point.x: X coordinate of whitepoint of color primary.
* @white_point.y: Y coordinate of whitepoint of color primary.
*/
struct {
__u16 x, y;
} white_point;
} white_point;
/**
* @max_display_mastering_luminance: Max Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
@ -949,6 +976,15 @@ struct hdr_output_metadata {
* Request that the page-flip is performed as soon as possible, ie. with no
* delay due to waiting for vblank. This may cause tearing to be visible on
* the screen.
*
* When used with atomic uAPI, the driver will return an error if the hardware
* doesn't support performing an asynchronous page-flip for this update.
* User-space should handle this, e.g. by falling back to a regular page-flip.
*
* Note, some hardware might need to perform one last synchronous page-flip
* before being able to switch to asynchronous page-flips. As an exception,
* the driver will return success even though that first page-flip is not
* asynchronous.
*/
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
@ -1024,13 +1060,25 @@ struct drm_mode_crtc_page_flip_target {
__u64 user_data;
};
/* create a dumb scanout buffer */
/**
* struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout.
* @height: buffer height in pixels
* @width: buffer width in pixels
* @bpp: bits per pixel
* @flags: must be zero
* @handle: buffer object handle
* @pitch: number of bytes between two consecutive lines
* @size: size of the whole buffer in bytes
*
* User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds,
* the kernel fills @handle, @pitch and @size.
*/
struct drm_mode_create_dumb {
__u32 height;
__u32 width;
__u32 bpp;
__u32 flags;
/* handle, pitch, size will be returned */
__u32 handle;
__u32 pitch;
__u64 size;
@ -1303,6 +1351,16 @@ struct drm_mode_rect {
__s32 y2;
};
/**
* struct drm_mode_closefb
* @fb_id: Framebuffer ID.
* @pad: Must be zero.
*/
struct drm_mode_closefb {
__u32 fb_id;
__u32 pad;
};
#if defined(__cplusplus)
}
#endif

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
*
* Copyright 2016-2022 HabanaLabs, Ltd.
* Copyright 2016-2023 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@ -8,8 +8,7 @@
#ifndef HABANALABS_H_
#define HABANALABS_H_
#include <linux/types.h>
#include <linux/ioctl.h>
#include <drm/drm.h>
/*
* Defines that are asic-specific but constitutes as ABI between kernel driver
@ -607,9 +606,9 @@ enum gaudi2_engine_id {
/*
* ASIC specific PLL index
*
* Used to retrieve in frequency info of different IPs via
* HL_INFO_PLL_FREQUENCY under HL_IOCTL_INFO IOCTL. The enums need to be
* used as an index in struct hl_pll_frequency_info
* Used to retrieve in frequency info of different IPs via HL_INFO_PLL_FREQUENCY under
* DRM_IOCTL_HL_INFO IOCTL.
* The enums need to be used as an index in struct hl_pll_frequency_info.
*/
enum hl_goya_pll_index {
@ -708,7 +707,8 @@ enum hl_server_type {
HL_SERVER_GAUDI_HLS1H = 2,
HL_SERVER_GAUDI_TYPE1 = 3,
HL_SERVER_GAUDI_TYPE2 = 4,
HL_SERVER_GAUDI2_HLS2 = 5
HL_SERVER_GAUDI2_HLS2 = 5,
HL_SERVER_GAUDI2_TYPE1 = 7
};
/*
@ -723,6 +723,10 @@ enum hl_server_type {
* HL_NOTIFIER_EVENT_GENERAL_HW_ERR - Indicates device HW error
* HL_NOTIFIER_EVENT_RAZWI - Indicates razwi happened
* HL_NOTIFIER_EVENT_PAGE_FAULT - Indicates page fault happened
* HL_NOTIFIER_EVENT_CRITICAL_HW_ERR - Indicates a HW error that requires SW abort and
* HW reset
* HL_NOTIFIER_EVENT_CRITICAL_FW_ERR - Indicates a FW error that requires SW abort and
* HW reset
*/
#define HL_NOTIFIER_EVENT_TPC_ASSERT (1ULL << 0)
#define HL_NOTIFIER_EVENT_UNDEFINED_OPCODE (1ULL << 1)
@ -733,6 +737,8 @@ enum hl_server_type {
#define HL_NOTIFIER_EVENT_GENERAL_HW_ERR (1ULL << 6)
#define HL_NOTIFIER_EVENT_RAZWI (1ULL << 7)
#define HL_NOTIFIER_EVENT_PAGE_FAULT (1ULL << 8)
#define HL_NOTIFIER_EVENT_CRITICL_HW_ERR (1ULL << 9)
#define HL_NOTIFIER_EVENT_CRITICL_FW_ERR (1ULL << 10)
/* Opcode for management ioctl
*
@ -780,16 +786,29 @@ enum hl_server_type {
* The address which accessing it caused the razwi.
* Razwi initiator.
* Razwi cause, was it a page fault or MMU access error.
* May return 0 even though no new data is available, in that case
* timestamp will be 0.
* HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation
* HL_INFO_SECURED_ATTESTATION - Retrieve attestation report of the boot.
* HL_INFO_REGISTER_EVENTFD - Register eventfd for event notifications.
* HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd
* HL_INFO_GET_EVENTS - Retrieve the last occurred events
* HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information.
* May return 0 even though no new data is available, in that case
* timestamp will be 0.
* HL_INFO_ENGINE_STATUS - Retrieve the status of all the h/w engines in the asic.
* HL_INFO_PAGE_FAULT_EVENT - Retrieve parameters of captured page fault.
* May return 0 even though no new data is available, in that case
* timestamp will be 0.
* HL_INFO_USER_MAPPINGS - Retrieve user mappings, captured after page fault event.
* HL_INFO_FW_GENERIC_REQ - Send generic request to FW.
* HL_INFO_HW_ERR_EVENT - Retrieve information on the reported HW error.
* May return 0 even though no new data is available, in that case
* timestamp will be 0.
* HL_INFO_FW_ERR_EVENT - Retrieve information on the reported FW error.
* May return 0 even though no new data is available, in that case
* timestamp will be 0.
* HL_INFO_USER_ENGINE_ERR_EVENT - Retrieve the last engine id that reported an error.
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@ -824,6 +843,10 @@ enum hl_server_type {
#define HL_INFO_PAGE_FAULT_EVENT 33
#define HL_INFO_USER_MAPPINGS 34
#define HL_INFO_FW_GENERIC_REQ 35
#define HL_INFO_HW_ERR_EVENT 36
#define HL_INFO_FW_ERR_EVENT 37
#define HL_INFO_USER_ENGINE_ERR_EVENT 38
#define HL_INFO_DEV_SIGNED 40
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
@ -863,11 +886,11 @@ enum hl_server_type {
* @dram_enabled: Whether the DRAM is enabled.
* @security_enabled: Whether security is enabled on device.
* @mme_master_slave_mode: Indicate whether the MME is working in master/slave
* configuration. Relevant for Greco and later.
* configuration. Relevant for Gaudi2 and later.
* @cpucp_version: The CPUCP f/w version.
* @card_name: The card name as passed by the f/w.
* @tpc_enabled_mask_ext: Bit-mask that represents which TPCs are enabled.
* Relevant for Greco and later.
* Relevant for Gaudi2 and later.
* @dram_page_size: The DRAM physical page size.
* @edma_enabled_mask: Bit-mask that represents which EDMAs are enabled.
* Relevant for Gaudi2 and later.
@ -875,6 +898,12 @@ enum hl_server_type {
* application to use. Relevant for Gaudi2 and later.
* @device_mem_alloc_default_page_size: default page size used in device memory allocation.
* @revision_id: PCI revision ID of the ASIC.
* @tpc_interrupt_id: interrupt id for TPC to use in order to raise events towards the host.
* @rotator_enabled_mask: Bit-mask that represents which rotators are enabled.
* Relevant for Gaudi3 and later.
* @engine_core_interrupt_reg_addr: interrupt register address for engine core to use
* in order to raise events toward FW.
* @reserved_dram_size: DRAM size reserved for driver and firmware.
*/
struct hl_info_hw_ip_info {
__u64 sram_base_address;
@ -902,15 +931,20 @@ struct hl_info_hw_ip_info {
__u64 dram_page_size;
__u32 edma_enabled_mask;
__u16 number_of_user_interrupts;
__u16 pad2;
__u64 reserved4;
__u8 reserved1;
__u8 reserved2;
__u64 reserved3;
__u64 device_mem_alloc_default_page_size;
__u64 reserved4;
__u64 reserved5;
__u64 reserved6;
__u32 reserved7;
__u8 reserved8;
__u32 reserved6;
__u8 reserved7;
__u8 revision_id;
__u8 pad[2];
__u16 tpc_interrupt_id;
__u32 rotator_enabled_mask;
__u32 reserved9;
__u64 engine_core_interrupt_reg_addr;
__u64 reserved_dram_size;
};
struct hl_info_dram_usage {
@ -958,6 +992,7 @@ struct hl_info_reset_count {
struct hl_info_time_sync {
__u64 device_time;
__u64 host_time;
__u64 tsc_time;
};
/**
@ -1161,6 +1196,53 @@ struct hl_info_undefined_opcode_event {
__u32 stream_id;
};
/**
* struct hl_info_hw_err_event - info about HW error
* @timestamp: timestamp of error occurrence
* @event_id: The async event ID (specific to each device type).
* @pad: size padding for u64 granularity.
*/
struct hl_info_hw_err_event {
__s64 timestamp;
__u16 event_id;
__u16 pad[3];
};
/* FW error definition for event_type in struct hl_info_fw_err_event */
enum hl_info_fw_err_type {
HL_INFO_FW_HEARTBEAT_ERR,
HL_INFO_FW_REPORTED_ERR,
};
/**
* struct hl_info_fw_err_event - info about FW error
* @timestamp: time-stamp of error occurrence
* @err_type: The type of event as defined in hl_info_fw_err_type.
* @event_id: The async event ID (specific to each device type, applicable only when event type is
* HL_INFO_FW_REPORTED_ERR).
* @pad: size padding for u64 granularity.
*/
struct hl_info_fw_err_event {
__s64 timestamp;
__u16 err_type;
__u16 event_id;
__u32 pad;
};
/**
* struct hl_info_engine_err_event - engine error info
* @timestamp: time-stamp of error occurrence
* @engine_id: engine id who reported the error.
* @error_count: Amount of errors reported.
* @pad: size padding for u64 granularity.
*/
struct hl_info_engine_err_event {
__s64 timestamp;
__u16 engine_id;
__u16 error_count;
__u32 pad;
};
/**
* struct hl_info_dev_memalloc_page_sizes - valid page sizes in device mem alloc information.
* @page_order_bitmask: bitmap in which a set bit represents the order of the supported page size
@ -1175,6 +1257,7 @@ struct hl_info_dev_memalloc_page_sizes {
#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
#define SEC_DEV_INFO_BUF_SZ 5120
/*
* struct hl_info_sec_attest - attestation report of the boot
@ -1209,6 +1292,32 @@ struct hl_info_sec_attest {
__u8 pad0[2];
};
/*
* struct hl_info_signed - device information signed by a secured device.
* @nonce: number only used once. random number provided by host. this also passed to the quote
* command as a qualifying data.
* @pub_data_len: length of the public data (bytes)
* @certificate_len: length of the certificate (bytes)
* @info_sig_len: length of the attestation signature (bytes)
* @public_data: public key info signed info data (outPublic + name + qualifiedName)
* @certificate: certificate for the signing key
* @info_sig: signature of the info + nonce data.
* @dev_info_len: length of device info (bytes)
* @dev_info: device info as byte array.
*/
struct hl_info_signed {
__u32 nonce;
__u16 pub_data_len;
__u16 certificate_len;
__u8 info_sig_len;
__u8 public_data[SEC_PUB_DATA_BUF_SZ];
__u8 certificate[SEC_CERTIFICATE_BUF_SZ];
__u8 info_sig[SEC_SIGNATURE_BUF_SZ];
__u16 dev_info_len;
__u8 dev_info[SEC_DEV_INFO_BUF_SZ];
__u8 pad[2];
};
/**
* struct hl_page_fault_info - page fault information.
* @timestamp: timestamp of page fault.
@ -1344,7 +1453,7 @@ union hl_cb_args {
*
* HL_CS_CHUNK_FLAGS_USER_ALLOC_CB:
* Indicates if the CB was allocated and mapped by userspace
* (relevant to greco and above). User allocated CB is a command buffer,
* (relevant to Gaudi2 and later). User allocated CB is a command buffer,
* allocated by the user, via malloc (or similar). After allocating the
* CB, the user invokes - memory ioctl to map the user memory into a
* device virtual address. The user provides this address via the
@ -1369,7 +1478,7 @@ struct hl_cs_chunk {
* a DRAM address of the internal CB. In Gaudi, this might also
* represent a mapped host address of the CB.
*
* Greco onwards:
* Gaudi2 onwards:
* For H/W queue, this represents either a Handle of CB on the
* Host, or an SRAM, a DRAM, or a mapped host address of the CB.
*
@ -1486,17 +1595,31 @@ struct hl_cs_chunk {
*/
#define HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES 0x8000
/*
* The engines CS is merged into the existing CS ioctls.
* Use it to control engines modes.
*/
#define HL_CS_FLAGS_ENGINES_COMMAND 0x10000
#define HL_CS_STATUS_SUCCESS 0
#define HL_MAX_JOBS_PER_CS 512
/* HL_ENGINE_CORE_ values
/*
* enum hl_engine_command - engine command
*
* HL_ENGINE_CORE_HALT: engine core halt
* HL_ENGINE_CORE_RUN: engine core run
* @HL_ENGINE_CORE_HALT: engine core halt
* @HL_ENGINE_CORE_RUN: engine core run
* @HL_ENGINE_STALL: user engine/s stall
* @HL_ENGINE_RESUME: user engine/s resume
*/
#define HL_ENGINE_CORE_HALT (1 << 0)
#define HL_ENGINE_CORE_RUN (1 << 1)
enum hl_engine_command {
HL_ENGINE_CORE_HALT = 1,
HL_ENGINE_CORE_RUN = 2,
HL_ENGINE_STALL = 3,
HL_ENGINE_RESUME = 4,
HL_ENGINE_COMMAND_MAX
};
struct hl_cs_in {
@ -1520,6 +1643,18 @@ struct hl_cs_in {
/* the core command to be sent towards engine cores */
__u32 core_command;
};
/* Valid only when HL_CS_FLAGS_ENGINES_COMMAND is set */
struct {
/* this holds address of array of uint32 for engines */
__u64 engines;
/* number of engines in engines array */
__u32 num_engines;
/* the engine command to be sent towards engines */
__u32 engine_command;
};
};
union {
@ -2056,6 +2191,13 @@ struct hl_debug_args {
__u32 ctx_id;
};
#define HL_IOCTL_INFO 0x00
#define HL_IOCTL_CB 0x01
#define HL_IOCTL_CS 0x02
#define HL_IOCTL_WAIT_CS 0x03
#define HL_IOCTL_MEMORY 0x04
#define HL_IOCTL_DEBUG 0x05
/*
* Various information operations such as:
* - H/W IP information
@ -2070,8 +2212,7 @@ struct hl_debug_args {
* definitions of structures in kernel and userspace, e.g. in case of old
* userspace and new kernel driver
*/
#define HL_IOCTL_INFO \
_IOWR('H', 0x01, struct hl_info_args)
#define DRM_IOCTL_HL_INFO DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_INFO, struct hl_info_args)
/*
* Command Buffer
@ -2092,8 +2233,7 @@ struct hl_debug_args {
* and won't be returned to user.
*
*/
#define HL_IOCTL_CB \
_IOWR('H', 0x02, union hl_cb_args)
#define DRM_IOCTL_HL_CB DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CB, union hl_cb_args)
/*
* Command Submission
@ -2115,7 +2255,7 @@ struct hl_debug_args {
* internal. The driver will get completion notifications from the device only
* on JOBS which are enqueued in the external queues.
*
* Greco onwards:
* Gaudi2 onwards:
* There is a single type of queue for all types of engines, either DMA engines
* for transfers from/to the host or inside the device, or compute engines.
* The driver will get completion notifications from the device for all queues.
@ -2145,8 +2285,7 @@ struct hl_debug_args {
* and only if CS N and CS N-1 are exactly the same (same CBs for the same
* queues).
*/
#define HL_IOCTL_CS \
_IOWR('H', 0x03, union hl_cs_args)
#define DRM_IOCTL_HL_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CS, union hl_cs_args)
/*
* Wait for Command Submission
@ -2178,9 +2317,7 @@ struct hl_debug_args {
* HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
* device was reset (EIO)
*/
#define HL_IOCTL_WAIT_CS \
_IOWR('H', 0x04, union hl_wait_cs_args)
#define DRM_IOCTL_HL_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_WAIT_CS, union hl_wait_cs_args)
/*
* Memory
@ -2197,8 +2334,7 @@ struct hl_debug_args {
* There is an option for the user to specify the requested virtual address.
*
*/
#define HL_IOCTL_MEMORY \
_IOWR('H', 0x05, union hl_mem_args)
#define DRM_IOCTL_HL_MEMORY DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_MEMORY, union hl_mem_args)
/*
* Debug
@ -2224,10 +2360,9 @@ struct hl_debug_args {
* The driver can decide to "kick out" the user if he abuses this interface.
*
*/
#define HL_IOCTL_DEBUG \
_IOWR('H', 0x06, struct hl_debug_args)
#define DRM_IOCTL_HL_DEBUG DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_DEBUG, struct hl_debug_args)
#define HL_COMMAND_START 0x01
#define HL_COMMAND_END 0x07
#define HL_COMMAND_START (DRM_COMMAND_BASE + HL_IOCTL_INFO)
#define HL_COMMAND_END (DRM_COMMAND_BASE + HL_IOCTL_DEBUG + 1)
#endif /* HABANALABS_H_ */

View File

@ -38,13 +38,13 @@ extern "C" {
*/
/**
* DOC: uevents generated by i915 on it's device node
* DOC: uevents generated by i915 on its device node
*
* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
* event from the gpu l3 cache. Additional information supplied is ROW,
* event from the GPU L3 cache. Additional information supplied is ROW,
* BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
* track of these events and if a specific cache-line seems to have a
* persistent error remap it with the l3 remapping tool supplied in
* track of these events, and if a specific cache-line seems to have a
* persistent error, remap it with the L3 remapping tool supplied in
* intel-gpu-tools. The value supplied with the event is always 1.
*
* I915_ERROR_UEVENT - Generated upon error detection, currently only via
@ -280,7 +280,16 @@ enum drm_i915_pmu_engine_sample {
#define I915_PMU_ENGINE_SEMA(class, instance) \
__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
/*
* Top 4 bits of every non-engine counter are GT id.
*/
#define __I915_PMU_GT_SHIFT (60)
#define ___I915_PMU_OTHER(gt, x) \
(((__u64)__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | \
((__u64)(gt) << __I915_PMU_GT_SHIFT))
#define __I915_PMU_OTHER(x) ___I915_PMU_OTHER(0, x)
#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
@ -290,6 +299,12 @@ enum drm_i915_pmu_engine_sample {
#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
#define __I915_PMU_ACTUAL_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 0)
#define __I915_PMU_REQUESTED_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 1)
#define __I915_PMU_INTERRUPTS(gt) ___I915_PMU_OTHER(gt, 2)
#define __I915_PMU_RC6_RESIDENCY(gt) ___I915_PMU_OTHER(gt, 3)
#define __I915_PMU_SOFTWARE_GT_AWAKE_TIME(gt) ___I915_PMU_OTHER(gt, 4)
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@ -659,7 +674,8 @@ typedef struct drm_i915_irq_wait {
* If the IOCTL is successful, the returned parameter will be set to one of the
* following values:
* * 0 if HuC firmware load is not complete,
* * 1 if HuC firmware is authenticated and running.
* * 1 if HuC firmware is loaded and fully authenticated,
* * 2 if HuC firmware is loaded and authenticated for clear media only
*/
#define I915_PARAM_HUC_STATUS 42
@ -677,7 +693,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_FENCE 44
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
* user specified bufffers for post-mortem debugging of GPU hangs. See
* user-specified buffers for post-mortem debugging of GPU hangs. See
* EXEC_OBJECT_CAPTURE.
*/
#define I915_PARAM_HAS_EXEC_CAPTURE 45
@ -771,6 +787,31 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57
/*
* Query the status of PXP support in i915.
*
* The query can fail in the following scenarios with the listed error codes:
* -ENODEV = PXP support is not available on the GPU device or in the
* kernel due to missing component drivers or kernel configs.
*
* If the IOCTL is successful, the returned parameter will be set to one of
* the following values:
* 1 = PXP feature is supported and is ready for use.
* 2 = PXP feature is supported but should be ready soon (pending
* initialization of non-i915 system dependencies).
*
* NOTE: When param is supported (positive return values), user space should
* still refer to the GEM PXP context-creation UAPI header specs to be
* aware of possible failure due to system state machine at the time.
*/
#define I915_PARAM_PXP_STATUS 58
/*
* Query if kernel allows marking a context to send a Freq hint to SLPC. This
* will enable use of the strategies allowed by the SLPC algorithm.
*/
#define I915_PARAM_HAS_CONTEXT_FREQ_HINT 59
/* Must be kept compact -- no holes and well documented */
/**
@ -1571,7 +1612,7 @@ struct drm_i915_gem_busy {
* is accurate.
*
* The returned dword is split into two fields to indicate both
* the engine classess on which the object is being read, and the
* the engine classes on which the object is being read, and the
* engine class on which it is currently being written (if any).
*
* The low word (bits 0:15) indicate if the object is being written
@ -1780,7 +1821,7 @@ struct drm_i915_gem_madvise {
__u32 handle;
/* Advice: either the buffer will be needed again in the near future,
* or wont be and could be discarded under memory pressure.
* or won't be and could be discarded under memory pressure.
*/
__u32 madv;
@ -2096,8 +2137,32 @@ struct drm_i915_gem_context_param {
*
* -ENODEV: feature not available
* -EPERM: trying to mark a recoverable or not bannable context as protected
* -ENXIO: A dependency such as a component driver or firmware is not yet
* loaded so user space may need to attempt again. Depending on the
* device, this error may be reported if protected context creation is
* attempted very early after kernel start because the internal timeout
* waiting for such dependencies is not guaranteed to be larger than
* required (numbers differ depending on system and kernel config):
* - ADL/RPL: dependencies may take up to 3 seconds from kernel start
* while context creation internal timeout is 250 milisecs
* - MTL: dependencies may take up to 8 seconds from kernel start
* while context creation internal timeout is 250 milisecs
* NOTE: such dependencies happen once, so a subsequent call to create a
* protected context after a prior successful call will not experience
* such timeouts and will not return -ENXIO (unless the driver is reloaded,
* or, depending on the device, resumes from a suspended state).
* -EIO: The firmware did not succeed in creating the protected context.
*/
#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
/*
* I915_CONTEXT_PARAM_LOW_LATENCY:
*
* Mark this context as a low latency workload which requires aggressive GT
* frequency scaling. Use I915_PARAM_HAS_CONTEXT_FREQ_HINT to check if the kernel
* supports this per context flag.
*/
#define I915_CONTEXT_PARAM_LOW_LATENCY 0xe
/* Must be kept compact -- no holes and well documented */
/** @value: Context parameter value to be set or queried */
@ -2491,7 +2556,7 @@ struct i915_context_param_engines {
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
struct i915_engine_class_instance engines[0];
struct i915_engine_class_instance engines[];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
@ -2573,19 +2638,29 @@ struct drm_i915_reg_read {
*
*/
/*
* struct drm_i915_reset_stats - Return global reset and other context stats
*
* Driver keeps few stats for each contexts and also global reset count.
* This struct can be used to query those stats.
*/
struct drm_i915_reset_stats {
/** @ctx_id: ID of the requested context */
__u32 ctx_id;
/** @flags: MBZ */
__u32 flags;
/* All resets since boot/module reload, for all contexts */
/** @reset_count: All resets since boot/module reload, for all contexts */
__u32 reset_count;
/* Number of batches lost when active in GPU, for this context */
/** @batch_active: Number of batches lost when active in GPU, for this context */
__u32 batch_active;
/* Number of batches lost pending for execution, for this context */
/** @batch_pending: Number of batches lost pending for execution, for this context */
__u32 batch_pending;
/** @pad: MBZ */
__u32 pad;
};
@ -2676,6 +2751,10 @@ enum drm_i915_oa_format {
I915_OAR_FORMAT_A32u40_A4u32_B8_C8,
I915_OA_FORMAT_A24u40_A14u32_B8_C8,
/* MTL OAM */
I915_OAM_FORMAT_MPEC8u64_B8_C8,
I915_OAM_FORMAT_MPEC8u32_B8_C8,
I915_OA_FORMAT_MAX /* non-ABI */
};
@ -2758,6 +2837,25 @@ enum drm_i915_perf_property_id {
*/
DRM_I915_PERF_PROP_POLL_OA_PERIOD,
/**
* Multiple engines may be mapped to the same OA unit. The OA unit is
* identified by class:instance of any engine mapped to it.
*
* This parameter specifies the engine class and must be passed along
* with DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE.
*
* This property is available in perf revision 6.
*/
DRM_I915_PERF_PROP_OA_ENGINE_CLASS,
/**
* This parameter specifies the engine instance and must be passed along
* with DRM_I915_PERF_PROP_OA_ENGINE_CLASS.
*
* This property is available in perf revision 6.
*/
DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE,
DRM_I915_PERF_PROP_MAX /* non-ABI */
};
@ -2940,6 +3038,7 @@ struct drm_i915_query_item {
* - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
* - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
* - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
* - %DRM_I915_QUERY_GUC_SUBMISSION_VERSION (see struct drm_i915_query_guc_submission_version)
*/
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
@ -2948,6 +3047,7 @@ struct drm_i915_query_item {
#define DRM_I915_QUERY_MEMORY_REGIONS 4
#define DRM_I915_QUERY_HWCONFIG_BLOB 5
#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6
#define DRM_I915_QUERY_GUC_SUBMISSION_VERSION 7
/* Must be kept compact -- no holes and well documented */
/**
@ -3173,7 +3273,7 @@ struct drm_i915_query_topology_info {
* // enough to hold our array of engines. The kernel will fill out the
* // item.length for us, which is the number of bytes we need.
* //
* // Alternatively a large buffer can be allocated straight away enabling
* // Alternatively a large buffer can be allocated straightaway enabling
* // querying in one pass, in which case item.length should contain the
* // length of the provided buffer.
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
@ -3183,7 +3283,7 @@ struct drm_i915_query_topology_info {
* // Now that we allocated the required number of bytes, we call the ioctl
* // again, this time with the data_ptr pointing to our newly allocated
* // blob, which the kernel can then populate with info on all engines.
* item.data_ptr = (uintptr_t)&info,
* item.data_ptr = (uintptr_t)&info;
*
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
* if (err) ...
@ -3213,7 +3313,7 @@ struct drm_i915_query_topology_info {
/**
* struct drm_i915_engine_info
*
* Describes one engine and it's capabilities as known to the driver.
* Describes one engine and its capabilities as known to the driver.
*/
struct drm_i915_engine_info {
/** @engine: Engine class and instance. */
@ -3493,6 +3593,20 @@ struct drm_i915_query_memory_regions {
struct drm_i915_memory_region_info regions[];
};
/**
* struct drm_i915_query_guc_submission_version - query GuC submission interface version
*/
struct drm_i915_query_guc_submission_version {
/** @branch: Firmware branch version. */
__u32 branch;
/** @major: Firmware major version. */
__u32 major;
/** @minor: Firmware minor version. */
__u32 minor;
/** @patch: Firmware patch version. */
__u32 patch;
};
/**
* DOC: GuC HWCONFIG blob uAPI
*
@ -3607,9 +3721,13 @@ struct drm_i915_gem_create_ext {
*
* For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
* struct drm_i915_gem_create_ext_protected_content.
*
* For I915_GEM_CREATE_EXT_SET_PAT usage see
* struct drm_i915_gem_create_ext_set_pat.
*/
#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
#define I915_GEM_CREATE_EXT_SET_PAT 2
__u64 extensions;
};
@ -3724,6 +3842,43 @@ struct drm_i915_gem_create_ext_protected_content {
__u32 flags;
};
/**
* struct drm_i915_gem_create_ext_set_pat - The
* I915_GEM_CREATE_EXT_SET_PAT extension.
*
* If this extension is provided, the specified caching policy (PAT index) is
* applied to the buffer object.
*
* Below is an example on how to create an object with specific caching policy:
*
* .. code-block:: C
*
* struct drm_i915_gem_create_ext_set_pat set_pat_ext = {
* .base = { .name = I915_GEM_CREATE_EXT_SET_PAT },
* .pat_index = 0,
* };
* struct drm_i915_gem_create_ext create_ext = {
* .size = PAGE_SIZE,
* .extensions = (uintptr_t)&set_pat_ext,
* };
*
* int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
* if (err) ...
*/
struct drm_i915_gem_create_ext_set_pat {
/** @base: Extension link. See struct i915_user_extension. */
struct i915_user_extension base;
/**
* @pat_index: PAT index to be set
* PAT index is a bit field in Page Table Entry to control caching
* behaviors for GPU accesses. The definition of PAT index is
* platform dependent and can be found in hardware specifications,
*/
__u32 pat_index;
/** @rsvd: reserved for future use */
__u32 rsvd;
};
/* ID of the protected content session managed by i915 when PXP is active */
#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf

View File

@ -53,21 +53,44 @@ extern "C" {
#define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
#define DRM_IVPU_PARAM_NUM_CONTEXTS 4
#define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6
#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6 /* Deprecated */
#define DRM_IVPU_PARAM_CONTEXT_ID 7
#define DRM_IVPU_PARAM_FW_API_VERSION 8
#define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
#define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
#define DRM_IVPU_PARAM_TILE_CONFIG 11
#define DRM_IVPU_PARAM_SKU 12
#define DRM_IVPU_PARAM_CAPABILITIES 13
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
/* Deprecated, use DRM_IVPU_JOB_PRIORITY */
#define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
#define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
#define DRM_IVPU_JOB_PRIORITY_DEFAULT 0
#define DRM_IVPU_JOB_PRIORITY_IDLE 1
#define DRM_IVPU_JOB_PRIORITY_NORMAL 2
#define DRM_IVPU_JOB_PRIORITY_FOCUS 3
#define DRM_IVPU_JOB_PRIORITY_REALTIME 4
/**
* DRM_IVPU_CAP_METRIC_STREAMER
*
* Metric streamer support. Provides sampling of various hardware performance
* metrics like DMA bandwidth and cache miss/hits. Can be used for profiling.
*/
#define DRM_IVPU_CAP_METRIC_STREAMER 1
/**
* DRM_IVPU_CAP_DMA_MEMORY_RANGE
*
* Driver has capability to allocate separate memory range
* accessible by hardware DMA.
*/
#define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2
/**
* struct drm_ivpu_param - Get/Set VPU parameters
*/
@ -96,10 +119,6 @@ struct drm_ivpu_param {
* %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
* Lowest VPU virtual address available in the current context (read-only)
*
* %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
* Value of current context scheduling priority (read-write).
* See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
*
* %DRM_IVPU_PARAM_CONTEXT_ID:
* Current context ID, always greater than 0 (read-only)
*
@ -119,6 +138,8 @@ struct drm_ivpu_param {
* %DRM_IVPU_PARAM_SKU:
* VPU SKU ID (read-only)
*
* %DRM_IVPU_PARAM_CAPABILITIES:
* Supported capabilities (read-only)
*/
__u32 param;
@ -129,8 +150,10 @@ struct drm_ivpu_param {
__u64 value;
};
#define DRM_IVPU_BO_HIGH_MEM 0x00000001
#define DRM_IVPU_BO_SHAVE_MEM 0x00000001
#define DRM_IVPU_BO_HIGH_MEM DRM_IVPU_BO_SHAVE_MEM
#define DRM_IVPU_BO_MAPPABLE 0x00000002
#define DRM_IVPU_BO_DMA_MEM 0x00000004
#define DRM_IVPU_BO_CACHED 0x00000000
#define DRM_IVPU_BO_UNCACHED 0x00010000
@ -140,6 +163,7 @@ struct drm_ivpu_param {
#define DRM_IVPU_BO_FLAGS \
(DRM_IVPU_BO_HIGH_MEM | \
DRM_IVPU_BO_MAPPABLE | \
DRM_IVPU_BO_DMA_MEM | \
DRM_IVPU_BO_CACHE_MASK)
/**
@ -175,7 +199,7 @@ struct drm_ivpu_bo_create {
*
* %DRM_IVPU_BO_UNCACHED:
*
* Allocated BO will not be cached on host side nor snooped on the VPU side.
* Not supported. Use DRM_IVPU_BO_WC instead.
*
* %DRM_IVPU_BO_WC:
*
@ -265,10 +289,23 @@ struct drm_ivpu_submit {
* to be executed. The offset has to be 8-byte aligned.
*/
__u32 commands_offset;
/**
* @priority:
*
* Priority to be set for related job command queue, can be one of the following:
* %DRM_IVPU_JOB_PRIORITY_DEFAULT
* %DRM_IVPU_JOB_PRIORITY_IDLE
* %DRM_IVPU_JOB_PRIORITY_NORMAL
* %DRM_IVPU_JOB_PRIORITY_FOCUS
* %DRM_IVPU_JOB_PRIORITY_REALTIME
*/
__u32 priority;
};
/* drm_ivpu_bo_wait job status codes */
#define DRM_IVPU_JOB_STATUS_SUCCESS 0
#define DRM_IVPU_JOB_STATUS_ABORTED 256
/**
* struct drm_ivpu_bo_wait - Wait for BO to become inactive

View File

@ -86,6 +86,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */
#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
@ -139,6 +140,8 @@ struct drm_msm_gem_new {
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
#define MSM_INFO_GET_FLAGS 0x05 /* get the MSM_BO_x flags */
#define MSM_INFO_SET_METADATA 0x06 /* set userspace metadata */
#define MSM_INFO_GET_METADATA 0x07 /* get userspace metadata */
struct drm_msm_gem_info {
__u32 handle; /* in */
@ -151,8 +154,13 @@ struct drm_msm_gem_info {
#define MSM_PREP_READ 0x01
#define MSM_PREP_WRITE 0x02
#define MSM_PREP_NOSYNC 0x04
#define MSM_PREP_BOOST 0x08
#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
#define MSM_PREP_FLAGS (MSM_PREP_READ | \
MSM_PREP_WRITE | \
MSM_PREP_NOSYNC | \
MSM_PREP_BOOST | \
0)
struct drm_msm_gem_cpu_prep {
__u32 handle; /* in */
@ -181,7 +189,11 @@ struct drm_msm_gem_cpu_fini {
*/
struct drm_msm_gem_submit_reloc {
__u32 submit_offset; /* in, offset from submit_bo */
#ifdef __cplusplus
__u32 _or; /* in, value OR'd with result */
#else
__u32 or; /* in, value OR'd with result */
#endif
__s32 shift; /* in, amount of left shift (can be negative) */
__u32 reloc_idx; /* in, index of reloc_bo buffer */
__u64 reloc_offset; /* in, offset from start of reloc_bo */
@ -286,6 +298,11 @@ struct drm_msm_gem_submit {
};
#define MSM_WAIT_FENCE_BOOST 0x00000001
#define MSM_WAIT_FENCE_FLAGS ( \
MSM_WAIT_FENCE_BOOST | \
0)
/* The normal way to synchronize with the GPU is just to CPU_PREP on
* a buffer if you need to access it from the CPU (other cmdstream
* submission from same or other contexts, PAGE_FLIP ioctl, etc, all
@ -295,7 +312,7 @@ struct drm_msm_gem_submit {
*/
struct drm_msm_wait_fence {
__u32 fence; /* in */
__u32 pad;
__u32 flags; /* in, bitmask of MSM_WAIT_FENCE_x */
struct drm_msm_timespec timeout; /* in */
__u32 queueid; /* in, submitqueue id */
};

View File

@ -33,11 +33,104 @@
extern "C" {
#endif
#define NOUVEAU_GETPARAM_PCI_VENDOR 3
#define NOUVEAU_GETPARAM_PCI_DEVICE 4
#define NOUVEAU_GETPARAM_BUS_TYPE 5
#define NOUVEAU_GETPARAM_FB_SIZE 8
#define NOUVEAU_GETPARAM_AGP_SIZE 9
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
#define NOUVEAU_GETPARAM_PTIMER_TIME 14
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
/*
* NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
*
* Query the maximum amount of IBs that can be pushed through a single
* &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
* ioctl().
*/
#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
/*
* NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
*
* Query the VRAM BAR size.
*/
#define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
/*
* NOUVEAU_GETPARAM_VRAM_USED
*
* Get remaining VRAM size.
*/
#define NOUVEAU_GETPARAM_VRAM_USED 19
/*
* NOUVEAU_GETPARAM_HAS_VMA_TILEMODE
*
* Query whether tile mode and PTE kind are accepted with VM allocs or not.
*/
#define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20
struct drm_nouveau_getparam {
__u64 param;
__u64 value;
};
/*
* Those are used to support selecting the main engine used on Kepler.
* This goes into drm_nouveau_channel_alloc::tt_ctxdma_handle
*/
#define NOUVEAU_FIFO_ENGINE_GR 0x01
#define NOUVEAU_FIFO_ENGINE_VP 0x02
#define NOUVEAU_FIFO_ENGINE_PPP 0x04
#define NOUVEAU_FIFO_ENGINE_BSP 0x08
#define NOUVEAU_FIFO_ENGINE_CE 0x30
struct drm_nouveau_channel_alloc {
__u32 fb_ctxdma_handle;
__u32 tt_ctxdma_handle;
__s32 channel;
__u32 pushbuf_domains;
/* Notifier memory */
__u32 notifier_handle;
/* DRM-enforced subchannel assignments */
struct {
__u32 handle;
__u32 grclass;
} subchan[8];
__u32 nr_subchan;
};
struct drm_nouveau_channel_free {
__s32 channel;
};
struct drm_nouveau_notifierobj_alloc {
__u32 channel;
__u32 handle;
__u32 size;
__u32 offset;
};
struct drm_nouveau_gpuobj_free {
__s32 channel;
__u32 handle;
};
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
/* The BO will never be shared via import or export. */
#define NOUVEAU_GEM_DOMAIN_NO_SHARE (1 << 5)
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
@ -98,6 +191,7 @@ struct drm_nouveau_gem_pushbuf_push {
__u32 pad;
__u64 offset;
__u64 length;
#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
};
struct drm_nouveau_gem_pushbuf {
@ -126,16 +220,231 @@ struct drm_nouveau_gem_cpu_fini {
__u32 handle;
};
#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
/**
* struct drm_nouveau_sync - sync object
*
* This structure serves as synchronization mechanism for (potentially)
* asynchronous operations such as EXEC or VM_BIND.
*/
struct drm_nouveau_sync {
/**
* @flags: the flags for a sync object
*
* The first 8 bits are used to determine the type of the sync object.
*/
__u32 flags;
#define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
#define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
#define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
/**
* @handle: the handle of the sync object
*/
__u32 handle;
/**
* @timeline_value:
*
* The timeline point of the sync object in case the syncobj is of
* type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
};
/**
* struct drm_nouveau_vm_init - GPU VA space init structure
*
* Used to initialize the GPU's VA space for a user client, telling the kernel
* which portion of the VA space is managed by the UMD and kernel respectively.
*
* For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
* channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
* with -ENOSYS.
*/
struct drm_nouveau_vm_init {
/**
* @kernel_managed_addr: start address of the kernel managed VA space
* region
*/
__u64 kernel_managed_addr;
/**
* @kernel_managed_size: size of the kernel managed VA space region in
* bytes
*/
__u64 kernel_managed_size;
};
/**
* struct drm_nouveau_vm_bind_op - VM_BIND operation
*
* This structure represents a single VM_BIND operation. UMDs should pass
* an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
*/
struct drm_nouveau_vm_bind_op {
/**
* @op: the operation type
*
* Supported values:
*
* %DRM_NOUVEAU_VM_BIND_OP_MAP - Map a GEM object to the GPU's VA
* space. Optionally, the &DRM_NOUVEAU_VM_BIND_SPARSE flag can be
* passed to instruct the kernel to create sparse mappings for the
* given range.
*
* %DRM_NOUVEAU_VM_BIND_OP_UNMAP - Unmap an existing mapping in the
* GPU's VA space. If the region the mapping is located in is a
* sparse region, new sparse mappings are created where the unmapped
* (memory backed) mapping was mapped previously. To remove a sparse
* region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
*/
__u32 op;
#define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
#define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
/**
* @flags: the flags for a &drm_nouveau_vm_bind_op
*
* Supported values:
*
* %DRM_NOUVEAU_VM_BIND_SPARSE - Indicates that an allocated VA
* space region should be sparse.
*/
__u32 flags;
#define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
/**
* @handle: the handle of the DRM GEM object to map
*/
__u32 handle;
/**
* @pad: 32 bit padding, should be 0
*/
__u32 pad;
/**
* @addr:
*
* the address the VA space region or (memory backed) mapping should be mapped to
*/
__u64 addr;
/**
* @bo_offset: the offset within the BO backing the mapping
*/
__u64 bo_offset;
/**
* @range: the size of the requested mapping in bytes
*/
__u64 range;
};
/**
* struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
*/
struct drm_nouveau_vm_bind {
/**
* @op_count: the number of &drm_nouveau_vm_bind_op
*/
__u32 op_count;
/**
* @flags: the flags for a &drm_nouveau_vm_bind ioctl
*
* Supported values:
*
* %DRM_NOUVEAU_VM_BIND_RUN_ASYNC - Indicates that the given VM_BIND
* operation should be executed asynchronously by the kernel.
*
* If this flag is not supplied the kernel executes the associated
* operations synchronously and doesn't accept any &drm_nouveau_sync
* objects.
*/
__u32 flags;
#define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
/**
* @wait_count: the number of wait &drm_nouveau_syncs
*/
__u32 wait_count;
/**
* @sig_count: the number of &drm_nouveau_syncs to signal when finished
*/
__u32 sig_count;
/**
* @wait_ptr: pointer to &drm_nouveau_syncs to wait for
*/
__u64 wait_ptr;
/**
* @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
*/
__u64 sig_ptr;
/**
* @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
*/
__u64 op_ptr;
};
/**
* struct drm_nouveau_exec_push - EXEC push operation
*
* This structure represents a single EXEC push operation. UMDs should pass an
* array of this structure via struct drm_nouveau_exec's &push_ptr field.
*/
struct drm_nouveau_exec_push {
/**
* @va: the virtual address of the push buffer mapping
*/
__u64 va;
/**
* @va_len: the length of the push buffer mapping
*/
__u32 va_len;
/**
* @flags: the flags for this push buffer mapping
*/
__u32 flags;
#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
};
/**
* struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
*/
struct drm_nouveau_exec {
/**
* @channel: the channel to execute the push buffer in
*/
__u32 channel;
/**
* @push_count: the number of &drm_nouveau_exec_push ops
*/
__u32 push_count;
/**
* @wait_count: the number of wait &drm_nouveau_syncs
*/
__u32 wait_count;
/**
* @sig_count: the number of &drm_nouveau_syncs to signal when finished
*/
__u32 sig_count;
/**
* @wait_ptr: pointer to &drm_nouveau_syncs to wait for
*/
__u64 wait_ptr;
/**
* @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
*/
__u64 sig_ptr;
/**
* @push_ptr: pointer to &drm_nouveau_exec_push ops
*/
__u64 push_ptr;
};
#define DRM_NOUVEAU_GETPARAM 0x00
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
#define DRM_NOUVEAU_CHANNEL_FREE 0x03
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_NVIF 0x07
#define DRM_NOUVEAU_SVM_INIT 0x08
#define DRM_NOUVEAU_SVM_BIND 0x09
#define DRM_NOUVEAU_VM_INIT 0x10
#define DRM_NOUVEAU_VM_BIND 0x11
#define DRM_NOUVEAU_EXEC 0x12
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
@ -188,6 +497,10 @@ struct drm_nouveau_svm_bind {
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
@ -197,6 +510,9 @@ struct drm_nouveau_svm_bind {
#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
#define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
#define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
#define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
#if defined(__cplusplus)
}
#endif

View File

@ -0,0 +1,962 @@
/* SPDX-License-Identifier: MIT */
/* Copyright (C) 2023 Collabora ltd. */
#ifndef _PANTHOR_DRM_H_
#define _PANTHOR_DRM_H_
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/**
* DOC: Introduction
*
* This documentation describes the Panthor IOCTLs.
*
* Just a few generic rules about the data passed to the Panthor IOCTLs:
*
* - Structures must be aligned on 64-bit/8-byte. If the object is not
* naturally aligned, a padding field must be added.
* - Fields must be explicitly aligned to their natural type alignment with
* pad[0..N] fields.
* - All padding fields will be checked by the driver to make sure they are
* zeroed.
* - Flags can be added, but not removed/replaced.
* - New fields can be added to the main structures (the structures
* directly passed to the ioctl). Those fields can be added at the end of
* the structure, or replace existing padding fields. Any new field being
* added must preserve the behavior that existed before those fields were
* added when a value of zero is passed.
* - New fields can be added to indirect objects (objects pointed by the
* main structure), iff those objects are passed a size to reflect the
* size known by the userspace driver (see drm_panthor_obj_array::stride
* or drm_panthor_dev_query::size).
* - If the kernel driver is too old to know some fields, those will be
* ignored if zero, and otherwise rejected (and so will be zero on output).
* - If userspace is too old to know some fields, those will be zeroed
* (input) before the structure is parsed by the kernel driver.
* - Each new flag/field addition must come with a driver version update so
* the userspace driver doesn't have to trial and error to know which
* flags are supported.
* - Structures should not contain unions, as this would defeat the
* extensibility of such structures.
* - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
* at the end of the drm_panthor_ioctl_id enum.
*/
/**
* DOC: MMIO regions exposed to userspace.
*
* .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET
*
* File offset for all MMIO regions being exposed to userspace. Don't use
* this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.
* pgoffset passed to mmap2() is an unsigned long, which forces us to use a
* different offset on 32-bit and 64-bit systems.
*
* .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET
*
* File offset for the LATEST_FLUSH_ID register. The Userspace driver controls
* GPU cache flushing through CS instructions, but the flush reduction
* mechanism requires a flush_id. This flush_id could be queried with an
* ioctl, but Arm provides a well-isolated register page containing only this
* read-only register, so let's expose this page through a static mmap offset
* and allow direct mapping of this MMIO region so we can avoid the
* user <-> kernel round-trip.
*/
#define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43)
#define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56)
#define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? \
DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \
DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
#define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0)
/**
* DOC: IOCTL IDs
*
* enum drm_panthor_ioctl_id - IOCTL IDs
*
* Place new ioctls at the end, don't re-order, don't replace or remove entries.
*
* These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx
* definitions instead.
*/
enum drm_panthor_ioctl_id {
/** @DRM_PANTHOR_DEV_QUERY: Query device information. */
DRM_PANTHOR_DEV_QUERY = 0,
/** @DRM_PANTHOR_VM_CREATE: Create a VM. */
DRM_PANTHOR_VM_CREATE,
/** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */
DRM_PANTHOR_VM_DESTROY,
/** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */
DRM_PANTHOR_VM_BIND,
/** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */
DRM_PANTHOR_VM_GET_STATE,
/** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */
DRM_PANTHOR_BO_CREATE,
/**
* @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to
* mmap to map a GEM object.
*/
DRM_PANTHOR_BO_MMAP_OFFSET,
/** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */
DRM_PANTHOR_GROUP_CREATE,
/** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */
DRM_PANTHOR_GROUP_DESTROY,
/**
* @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging
* to a specific scheduling group.
*/
DRM_PANTHOR_GROUP_SUBMIT,
/** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */
DRM_PANTHOR_GROUP_GET_STATE,
/** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */
DRM_PANTHOR_TILER_HEAP_CREATE,
/** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
DRM_PANTHOR_TILER_HEAP_DESTROY,
};
/**
* DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
* @__access: Access type. Must be R, W or RW.
* @__id: One of the DRM_PANTHOR_xxx id.
* @__type: Suffix of the type being passed to the IOCTL.
*
* Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx
* values instead.
*
* Return: An IOCTL number to be passed to ioctl() from userspace.
*/
#define DRM_IOCTL_PANTHOR(__access, __id, __type) \
DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \
struct drm_panthor_ ## __type)
#define DRM_IOCTL_PANTHOR_DEV_QUERY \
DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query)
#define DRM_IOCTL_PANTHOR_VM_CREATE \
DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create)
#define DRM_IOCTL_PANTHOR_VM_DESTROY \
DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy)
#define DRM_IOCTL_PANTHOR_VM_BIND \
DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind)
#define DRM_IOCTL_PANTHOR_VM_GET_STATE \
DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state)
#define DRM_IOCTL_PANTHOR_BO_CREATE \
DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create)
#define DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET \
DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset)
#define DRM_IOCTL_PANTHOR_GROUP_CREATE \
DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create)
#define DRM_IOCTL_PANTHOR_GROUP_DESTROY \
DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy)
#define DRM_IOCTL_PANTHOR_GROUP_SUBMIT \
DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit)
#define DRM_IOCTL_PANTHOR_GROUP_GET_STATE \
DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state)
#define DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE \
DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create)
#define DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY \
DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy)
/**
* DOC: IOCTL arguments
*/
/**
* struct drm_panthor_obj_array - Object array.
*
* This object is used to pass an array of objects whose size is subject to changes in
* future versions of the driver. In order to support this mutability, we pass a stride
* describing the size of the object as known by userspace.
*
* You shouldn't fill drm_panthor_obj_array fields directly. You should instead use
* the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to
* the object size.
*/
struct drm_panthor_obj_array {
/** @stride: Stride of object struct. Used for versioning. */
__u32 stride;
/** @count: Number of objects in the array. */
__u32 count;
/** @array: User pointer to an array of objects. */
__u64 array;
};
/**
* DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.
* @cnt: Number of elements in the array.
* @ptr: Pointer to the array to pass to the kernel.
*
* Macro initializing a drm_panthor_obj_array based on the object size as known
* by userspace.
*/
#define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \
{ .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
/**
* enum drm_panthor_sync_op_flags - Synchronization operation flags.
*/
enum drm_panthor_sync_op_flags {
/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */
DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,
/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */
DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,
/**
* @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization
* object type.
*/
DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,
/** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */
DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,
/** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */
DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),
};
/**
* struct drm_panthor_sync_op - Synchronization operation.
*/
struct drm_panthor_sync_op {
/** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */
__u32 flags;
/** @handle: Sync handle. */
__u32 handle;
/**
* @timeline_value: MBZ if
* (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=
* DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
};
/**
* enum drm_panthor_dev_query_type - Query type
*
* Place new types at the end, don't re-order, don't remove or replace.
*/
enum drm_panthor_dev_query_type {
/** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */
DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,
/** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */
DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
};
/**
* struct drm_panthor_gpu_info - GPU information
*
* Structure grouping all queryable information relating to the GPU.
*/
struct drm_panthor_gpu_info {
/** @gpu_id : GPU ID. */
__u32 gpu_id;
#define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28)
#define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf)
#define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf)
#define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf)
#define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf)
#define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff)
#define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf)
/** @gpu_rev: GPU revision. */
__u32 gpu_rev;
/** @csf_id: Command stream frontend ID. */
__u32 csf_id;
#define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f)
#define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f)
#define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf)
#define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f)
#define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f)
#define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf)
/** @l2_features: L2-cache features. */
__u32 l2_features;
/** @tiler_features: Tiler features. */
__u32 tiler_features;
/** @mem_features: Memory features. */
__u32 mem_features;
/** @mmu_features: MMU features. */
__u32 mmu_features;
#define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff)
/** @thread_features: Thread features. */
__u32 thread_features;
/** @max_threads: Maximum number of threads. */
__u32 max_threads;
/** @thread_max_workgroup_size: Maximum workgroup size. */
__u32 thread_max_workgroup_size;
/**
* @thread_max_barrier_size: Maximum number of threads that can wait
* simultaneously on a barrier.
*/
__u32 thread_max_barrier_size;
/** @coherency_features: Coherency features. */
__u32 coherency_features;
/** @texture_features: Texture features. */
__u32 texture_features[4];
/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
__u32 as_present;
/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
__u64 shader_present;
/** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */
__u64 l2_present;
/** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */
__u64 tiler_present;
/** @core_features: Used to discriminate core variants when they exist. */
__u32 core_features;
/** @pad: MBZ. */
__u32 pad;
};
/**
* struct drm_panthor_csif_info - Command stream interface information
*
* Structure grouping all queryable information relating to the command stream interface.
*/
struct drm_panthor_csif_info {
/** @csg_slot_count: Number of command stream group slots exposed by the firmware. */
__u32 csg_slot_count;
/** @cs_slot_count: Number of command stream slots per group. */
__u32 cs_slot_count;
/** @cs_reg_count: Number of command stream registers. */
__u32 cs_reg_count;
/** @scoreboard_slot_count: Number of scoreboard slots. */
__u32 scoreboard_slot_count;
/**
* @unpreserved_cs_reg_count: Number of command stream registers reserved by
* the kernel driver to call a userspace command stream.
*
* All registers can be used by a userspace command stream, but the
* [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are
* used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.
*/
__u32 unpreserved_cs_reg_count;
/**
* @pad: Padding field, set to zero.
*/
__u32 pad;
};
/**
* struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY
*/
struct drm_panthor_dev_query {
/** @type: the query type (see drm_panthor_dev_query_type). */
__u32 type;
/**
* @size: size of the type being queried.
*
* If pointer is NULL, size is updated by the driver to provide the
* output structure size. If pointer is not NULL, the driver will
* only copy min(size, actual_structure_size) bytes to the pointer,
* and update the size accordingly. This allows us to extend query
* types without breaking userspace.
*/
__u32 size;
/**
* @pointer: user pointer to a query type struct.
*
* Pointer can be NULL, in which case, nothing is copied, but the
* actual structure size is returned. If not NULL, it must point to
* a location that's large enough to hold size bytes.
*/
__u64 pointer;
};
/**
* struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE
*/
struct drm_panthor_vm_create {
/** @flags: VM flags, MBZ. */
__u32 flags;
/** @id: Returned VM ID. */
__u32 id;
/**
* @user_va_range: Size of the VA space reserved for user objects.
*
* The kernel will pick the remaining space to map kernel-only objects to the
* VM (heap chunks, heap context, ring buffers, kernel synchronization objects,
* ...). If the space left for kernel objects is too small, kernel object
* allocation will fail further down the road. One can use
* drm_panthor_gpu_info::mmu_features to extract the total virtual address
* range, and chose a user_va_range that leaves some space to the kernel.
*
* If user_va_range is zero, the kernel will pick a sensible value based on
* TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user
* split should leave enough VA space for userspace processes to support SVM,
* while still allowing the kernel to map some amount of kernel objects in
* the kernel VA range). The value chosen by the driver will be returned in
* @user_va_range.
*
* User VA space always starts at 0x0, kernel VA space is always placed after
* the user VA range.
*/
__u64 user_va_range;
};
/**
* struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY
*/
struct drm_panthor_vm_destroy {
/** @id: ID of the VM to destroy. */
__u32 id;
/** @pad: MBZ. */
__u32 pad;
};
/**
* enum drm_panthor_vm_bind_op_flags - VM bind operation flags
*/
enum drm_panthor_vm_bind_op_flags {
/**
* @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.
*
* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
*/
DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,
/**
* @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.
*
* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
*/
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,
/**
* @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.
*
* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
*/
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
/**
* @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.
*/
DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),
/** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */
DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,
/** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */
DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,
/**
* @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.
*
* Just serves as a synchronization point on a VM queue.
*
* Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,
* and drm_panthor_vm_bind_op::syncs contains at least one element.
*/
DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,
};
/**
* struct drm_panthor_vm_bind_op - VM bind operation
*/
struct drm_panthor_vm_bind_op {
/** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */
__u32 flags;
/**
* @bo_handle: Handle of the buffer object to map.
* MBZ for unmap or sync-only operations.
*/
__u32 bo_handle;
/**
* @bo_offset: Buffer object offset.
* MBZ for unmap or sync-only operations.
*/
__u64 bo_offset;
/**
* @va: Virtual address to map/unmap.
* MBZ for sync-only operations.
*/
__u64 va;
/**
* @size: Size to map/unmap.
* MBZ for sync-only operations.
*/
__u64 size;
/**
* @syncs: Array of struct drm_panthor_sync_op synchronization
* operations.
*
* This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on
* the drm_panthor_vm_bind object containing this VM bind operation.
*
* This array shall not be empty for sync-only operations.
*/
struct drm_panthor_obj_array syncs;
};
/**
* enum drm_panthor_vm_bind_flags - VM bind flags
*/
enum drm_panthor_vm_bind_flags {
/**
* @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM
* queue instead of being executed synchronously.
*/
DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,
};
/**
* struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND
*/
struct drm_panthor_vm_bind {
/** @vm_id: VM targeted by the bind request. */
__u32 vm_id;
/** @flags: Combination of drm_panthor_vm_bind_flags flags. */
__u32 flags;
/** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */
struct drm_panthor_obj_array ops;
};
/**
* enum drm_panthor_vm_state - VM states.
*/
enum drm_panthor_vm_state {
/**
* @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.
*
* New VM operations will be accepted on this VM.
*/
DRM_PANTHOR_VM_STATE_USABLE,
/**
* @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.
*
* Something put the VM in an unusable state (like an asynchronous
* VM_BIND request failing for any reason).
*
* Once the VM is in this state, all new MAP operations will be
* rejected, and any GPU job targeting this VM will fail.
* UNMAP operations are still accepted.
*
* The only way to recover from an unusable VM is to create a new
* VM, and destroy the old one.
*/
DRM_PANTHOR_VM_STATE_UNUSABLE,
};
/**
* struct drm_panthor_vm_get_state - Get VM state.
*/
struct drm_panthor_vm_get_state {
/** @vm_id: VM targeted by the get_state request. */
__u32 vm_id;
/**
* @state: state returned by the driver.
*
* Must be one of the enum drm_panthor_vm_state values.
*/
__u32 state;
};
/**
* enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.
*/
enum drm_panthor_bo_flags {
/** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
};
/**
* struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.
*/
struct drm_panthor_bo_create {
/**
* @size: Requested size for the object
*
* The (page-aligned) allocated size for the object will be returned.
*/
__u64 size;
/**
* @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.
*/
__u32 flags;
/**
* @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.
*
* If not zero, the field must refer to a valid VM ID, and implies that:
* - the buffer object will only ever be bound to that VM
* - cannot be exported as a PRIME fd
*/
__u32 exclusive_vm_id;
/**
* @handle: Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
/** @pad: MBZ. */
__u32 pad;
};
/**
* struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.
*/
struct drm_panthor_bo_mmap_offset {
/** @handle: Handle of the object we want an mmap offset for. */
__u32 handle;
/** @pad: MBZ. */
__u32 pad;
/** @offset: The fake offset to use for subsequent mmap calls. */
__u64 offset;
};
/**
* struct drm_panthor_queue_create - Queue creation arguments.
*/
struct drm_panthor_queue_create {
/**
* @priority: Defines the priority of queues inside a group. Goes from 0 to 15,
* 15 being the highest priority.
*/
__u8 priority;
/** @pad: Padding fields, MBZ. */
__u8 pad[3];
/** @ringbuf_size: Size of the ring buffer to allocate to this queue. */
__u32 ringbuf_size;
};
/**
* enum drm_panthor_group_priority - Scheduling group priority
*/
enum drm_panthor_group_priority {
/** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */
PANTHOR_GROUP_PRIORITY_LOW = 0,
/** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */
PANTHOR_GROUP_PRIORITY_MEDIUM,
/** @PANTHOR_GROUP_PRIORITY_HIGH: High priority group. */
PANTHOR_GROUP_PRIORITY_HIGH,
};
/**
* struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE
*/
struct drm_panthor_group_create {
/** @queues: Array of drm_panthor_queue_create elements. */
struct drm_panthor_obj_array queues;
/**
* @max_compute_cores: Maximum number of cores that can be used by compute
* jobs across CS queues bound to this group.
*
* Must be less or equal to the number of bits set in @compute_core_mask.
*/
__u8 max_compute_cores;
/**
* @max_fragment_cores: Maximum number of cores that can be used by fragment
* jobs across CS queues bound to this group.
*
* Must be less or equal to the number of bits set in @fragment_core_mask.
*/
__u8 max_fragment_cores;
/**
* @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs
* across CS queues bound to this group.
*
* Must be less or equal to the number of bits set in @tiler_core_mask.
*/
__u8 max_tiler_cores;
/** @priority: Group priority (see enum drm_panthor_group_priority). */
__u8 priority;
/** @pad: Padding field, MBZ. */
__u32 pad;
/**
* @compute_core_mask: Mask encoding cores that can be used for compute jobs.
*
* This field must have at least @max_compute_cores bits set.
*
* The bits set here should also be set in drm_panthor_gpu_info::shader_present.
*/
__u64 compute_core_mask;
/**
* @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.
*
* This field must have at least @max_fragment_cores bits set.
*
* The bits set here should also be set in drm_panthor_gpu_info::shader_present.
*/
__u64 fragment_core_mask;
/**
* @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.
*
* This field must have at least @max_tiler_cores bits set.
*
* The bits set here should also be set in drm_panthor_gpu_info::tiler_present.
*/
__u64 tiler_core_mask;
/**
* @vm_id: VM ID to bind this group to.
*
* All submission to queues bound to this group will use this VM.
*/
__u32 vm_id;
/**
* @group_handle: Returned group handle. Passed back when submitting jobs or
* destroying a group.
*/
__u32 group_handle;
};
/**
* struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY
*/
struct drm_panthor_group_destroy {
/** @group_handle: Group to destroy */
__u32 group_handle;
/** @pad: Padding field, MBZ. */
__u32 pad;
};
/**
* struct drm_panthor_queue_submit - Job submission arguments.
*
* This is describing the userspace command stream to call from the kernel
* command stream ring-buffer. Queue submission is always part of a group
* submission, taking one or more jobs to submit to the underlying queues.
*/
struct drm_panthor_queue_submit {
/** @queue_index: Index of the queue inside a group. */
__u32 queue_index;
/**
* @stream_size: Size of the command stream to execute.
*
* Must be 64-bit/8-byte aligned (the size of a CS instruction)
*
* Can be zero if stream_addr is zero too.
*
* When the stream size is zero, the queue submit serves as a
* synchronization point.
*/
__u32 stream_size;
/**
* @stream_addr: GPU address of the command stream to execute.
*
* Must be aligned on 64-byte.
*
* Can be zero is stream_size is zero too.
*/
__u64 stream_addr;
/**
* @latest_flush: FLUSH_ID read at the time the stream was built.
*
* This allows cache flush elimination for the automatic
* flush+invalidate(all) done at submission time, which is needed to
* ensure the GPU doesn't get garbage when reading the indirect command
* stream buffers. If you want the cache flush to happen
* unconditionally, pass a zero here.
*
* Ignored when stream_size is zero.
*/
__u32 latest_flush;
/** @pad: MBZ. */
__u32 pad;
/** @syncs: Array of struct drm_panthor_sync_op sync operations. */
struct drm_panthor_obj_array syncs;
};
/**
* struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT
*/
struct drm_panthor_group_submit {
/** @group_handle: Handle of the group to queue jobs to. */
__u32 group_handle;
/** @pad: MBZ. */
__u32 pad;
/** @queue_submits: Array of drm_panthor_queue_submit objects. */
struct drm_panthor_obj_array queue_submits;
};
/**
* enum drm_panthor_group_state_flags - Group state flags
*/
enum drm_panthor_group_state_flags {
/**
* @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.
*
* When a group ends up with this flag set, no jobs can be submitted to its queues.
*/
DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,
/**
* @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.
*
* When a group ends up with this flag set, no jobs can be submitted to its queues.
*/
DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,
};
/**
* struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE
*
* Used to query the state of a group and decide whether a new group should be created to
* replace it.
*/
struct drm_panthor_group_get_state {
/** @group_handle: Handle of the group to query state on */
__u32 group_handle;
/**
* @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the
* group state.
*/
__u32 state;
/** @fatal_queues: Bitmask of queues that faced fatal faults. */
__u32 fatal_queues;
/** @pad: MBZ */
__u32 pad;
};
/**
* struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE
*/
struct drm_panthor_tiler_heap_create {
/** @vm_id: VM ID the tiler heap should be mapped to */
__u32 vm_id;
/** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */
__u32 initial_chunk_count;
/**
* @chunk_size: Chunk size.
*
* Must be page-aligned and lie in the [128k:8M] range.
*/
__u32 chunk_size;
/**
* @max_chunks: Maximum number of chunks that can be allocated.
*
* Must be at least @initial_chunk_count.
*/
__u32 max_chunks;
/**
* @target_in_flight: Maximum number of in-flight render passes.
*
* If the heap has more than tiler jobs in-flight, the FW will wait for render
* passes to finish before queuing new tiler jobs.
*/
__u32 target_in_flight;
/** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */
__u32 handle;
/** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */
__u64 tiler_heap_ctx_gpu_va;
/**
* @first_heap_chunk_gpu_va: First heap chunk.
*
* The tiler heap is formed of heap chunks forming a single-link list. This
* is the first element in the list.
*/
__u64 first_heap_chunk_gpu_va;
};
/**
* struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
*/
struct drm_panthor_tiler_heap_destroy {
/**
* @handle: Handle of the tiler heap to destroy.
*
* Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.
*/
__u32 handle;
/** @pad: Padding field, MBZ. */
__u32 pad;
};
#if defined(__cplusplus)
}
#endif
#endif /* _PANTHOR_DRM_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,399 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef QAIC_ACCEL_H_
#define QAIC_ACCEL_H_
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/* The length(4K) includes len and count fields of qaic_manage_msg */
#define QAIC_MANAGE_MAX_MSG_LENGTH SZ_4K
/* semaphore flags */
#define QAIC_SEM_INSYNCFENCE 2
#define QAIC_SEM_OUTSYNCFENCE 1
/* Semaphore commands */
#define QAIC_SEM_NOP 0
#define QAIC_SEM_INIT 1
#define QAIC_SEM_INC 2
#define QAIC_SEM_DEC 3
#define QAIC_SEM_WAIT_EQUAL 4
#define QAIC_SEM_WAIT_GT_EQ 5 /* Greater than or equal */
#define QAIC_SEM_WAIT_GT_0 6 /* Greater than 0 */
#define QAIC_TRANS_UNDEFINED 0
#define QAIC_TRANS_PASSTHROUGH_FROM_USR 1
#define QAIC_TRANS_PASSTHROUGH_TO_USR 2
#define QAIC_TRANS_PASSTHROUGH_FROM_DEV 3
#define QAIC_TRANS_PASSTHROUGH_TO_DEV 4
#define QAIC_TRANS_DMA_XFER_FROM_USR 5
#define QAIC_TRANS_DMA_XFER_TO_DEV 6
#define QAIC_TRANS_ACTIVATE_FROM_USR 7
#define QAIC_TRANS_ACTIVATE_FROM_DEV 8
#define QAIC_TRANS_ACTIVATE_TO_DEV 9
#define QAIC_TRANS_DEACTIVATE_FROM_USR 10
#define QAIC_TRANS_DEACTIVATE_FROM_DEV 11
#define QAIC_TRANS_STATUS_FROM_USR 12
#define QAIC_TRANS_STATUS_TO_USR 13
#define QAIC_TRANS_STATUS_FROM_DEV 14
#define QAIC_TRANS_STATUS_TO_DEV 15
#define QAIC_TRANS_TERMINATE_FROM_DEV 16
#define QAIC_TRANS_TERMINATE_TO_DEV 17
#define QAIC_TRANS_DMA_XFER_CONT 18
#define QAIC_TRANS_VALIDATE_PARTITION_FROM_DEV 19
#define QAIC_TRANS_VALIDATE_PARTITION_TO_DEV 20
/**
* struct qaic_manage_trans_hdr - Header for a transaction in a manage message.
* @type: In. Identifies this transaction. See QAIC_TRANS_* defines.
* @len: In. Length of this transaction, including this header.
*/
struct qaic_manage_trans_hdr {
__u32 type;
__u32 len;
};
/**
* struct qaic_manage_trans_passthrough - Defines a passthrough transaction.
* @hdr: In. Header to identify this transaction.
* @data: In. Payload of this ransaction. Opaque to the driver. Userspace must
* encode in little endian and align/pad to 64-bit.
*/
struct qaic_manage_trans_passthrough {
struct qaic_manage_trans_hdr hdr;
__u8 data[];
};
/**
* struct qaic_manage_trans_dma_xfer - Defines a DMA transfer transaction.
* @hdr: In. Header to identify this transaction.
* @tag: In. Identified this transfer in other transactions. Opaque to the
* driver.
* @pad: Structure padding.
* @addr: In. Address of the data to DMA to the device.
* @size: In. Length of the data to DMA to the device.
*/
struct qaic_manage_trans_dma_xfer {
struct qaic_manage_trans_hdr hdr;
__u32 tag;
__u32 pad;
__u64 addr;
__u64 size;
};
/**
* struct qaic_manage_trans_activate_to_dev - Defines an activate request.
* @hdr: In. Header to identify this transaction.
* @queue_size: In. Number of elements for DBC request and response queues.
* @eventfd: Unused.
* @options: In. Device specific options for this activate.
* @pad: Structure padding. Must be 0.
*/
struct qaic_manage_trans_activate_to_dev {
struct qaic_manage_trans_hdr hdr;
__u32 queue_size;
__u32 eventfd;
__u32 options;
__u32 pad;
};
/**
* struct qaic_manage_trans_activate_from_dev - Defines an activate response.
* @hdr: Out. Header to identify this transaction.
* @status: Out. Return code of the request from the device.
* @dbc_id: Out. Id of the assigned DBC for successful request.
* @options: Out. Device specific options for this activate.
*/
struct qaic_manage_trans_activate_from_dev {
struct qaic_manage_trans_hdr hdr;
__u32 status;
__u32 dbc_id;
__u64 options;
};
/**
* struct qaic_manage_trans_deactivate - Defines a deactivate request.
* @hdr: In. Header to identify this transaction.
* @dbc_id: In. Id of assigned DBC.
* @pad: Structure padding. Must be 0.
*/
struct qaic_manage_trans_deactivate {
struct qaic_manage_trans_hdr hdr;
__u32 dbc_id;
__u32 pad;
};
/**
* struct qaic_manage_trans_status_to_dev - Defines a status request.
* @hdr: In. Header to identify this transaction.
*/
struct qaic_manage_trans_status_to_dev {
struct qaic_manage_trans_hdr hdr;
};
/**
* struct qaic_manage_trans_status_from_dev - Defines a status response.
* @hdr: Out. Header to identify this transaction.
* @major: Out. NNC protocol version major number.
* @minor: Out. NNC protocol version minor number.
* @status: Out. Return code from device.
* @status_flags: Out. Flags from device. Bit 0 indicates if CRCs are required.
*/
struct qaic_manage_trans_status_from_dev {
struct qaic_manage_trans_hdr hdr;
__u16 major;
__u16 minor;
__u32 status;
__u64 status_flags;
};
/**
* struct qaic_manage_msg - Defines a message to the device.
* @len: In. Length of all the transactions contained within this message.
* @count: In. Number of transactions in this message.
* @data: In. Address to an array where the transactions can be found.
*/
struct qaic_manage_msg {
__u32 len;
__u32 count;
__u64 data;
};
/**
* struct qaic_create_bo - Defines a request to create a buffer object.
* @size: In. Size of the buffer in bytes.
* @handle: Out. GEM handle for the BO.
* @pad: Structure padding. Must be 0.
*/
struct qaic_create_bo {
__u64 size;
__u32 handle;
__u32 pad;
};
/**
* struct qaic_mmap_bo - Defines a request to prepare a BO for mmap().
* @handle: In. Handle of the GEM BO to prepare for mmap().
* @pad: Structure padding. Must be 0.
* @offset: Out. Offset value to provide to mmap().
*/
struct qaic_mmap_bo {
__u32 handle;
__u32 pad;
__u64 offset;
};
/**
* struct qaic_sem - Defines a semaphore command for a BO slice.
* @val: In. Only lower 12 bits are valid.
* @index: In. Only lower 5 bits are valid.
* @presync: In. 1 if presync operation, 0 if postsync.
* @cmd: In. One of QAIC_SEM_*.
* @flags: In. Bitfield. See QAIC_SEM_INSYNCFENCE and QAIC_SEM_OUTSYNCFENCE
* @pad: Structure padding. Must be 0.
*/
struct qaic_sem {
__u16 val;
__u8 index;
__u8 presync;
__u8 cmd;
__u8 flags;
__u16 pad;
};
/**
* struct qaic_attach_slice_entry - Defines a single BO slice.
* @size: In. Size of this slice in bytes.
* @sem0: In. Semaphore command 0. Must be 0 is not valid.
* @sem1: In. Semaphore command 1. Must be 0 is not valid.
* @sem2: In. Semaphore command 2. Must be 0 is not valid.
* @sem3: In. Semaphore command 3. Must be 0 is not valid.
* @dev_addr: In. Device address this slice pushes to or pulls from.
* @db_addr: In. Address of the doorbell to ring.
* @db_data: In. Data to write to the doorbell.
* @db_len: In. Size of the doorbell data in bits - 32, 16, or 8. 0 is for
* inactive doorbells.
* @offset: In. Start of this slice as an offset from the start of the BO.
*/
struct qaic_attach_slice_entry {
__u64 size;
struct qaic_sem sem0;
struct qaic_sem sem1;
struct qaic_sem sem2;
struct qaic_sem sem3;
__u64 dev_addr;
__u64 db_addr;
__u32 db_data;
__u32 db_len;
__u64 offset;
};
/**
* struct qaic_attach_slice_hdr - Defines metadata for a set of BO slices.
* @count: In. Number of slices for this BO.
* @dbc_id: In. Associate the sliced BO with this DBC.
* @handle: In. GEM handle of the BO to slice.
* @dir: In. Direction of data flow. 1 = DMA_TO_DEVICE, 2 = DMA_FROM_DEVICE
* @size: Deprecated. This value is ignored and size of @handle is used instead.
*/
struct qaic_attach_slice_hdr {
__u32 count;
__u32 dbc_id;
__u32 handle;
__u32 dir;
__u64 size;
};
/**
* struct qaic_attach_slice - Defines a set of BO slices.
* @hdr: In. Metadata of the set of slices.
* @data: In. Pointer to an array containing the slice definitions.
*/
struct qaic_attach_slice {
struct qaic_attach_slice_hdr hdr;
__u64 data;
};
/**
* struct qaic_execute_entry - Defines a BO to submit to the device.
* @handle: In. GEM handle of the BO to commit to the device.
* @dir: In. Direction of data. 1 = to device, 2 = from device.
*/
struct qaic_execute_entry {
__u32 handle;
__u32 dir;
};
/**
* struct qaic_partial_execute_entry - Defines a BO to resize and submit.
* @handle: In. GEM handle of the BO to commit to the device.
* @dir: In. Direction of data. 1 = to device, 2 = from device.
* @resize: In. New size of the BO. Must be <= the original BO size.
* @resize as 0 would be interpreted as no DMA transfer is
* involved.
*/
struct qaic_partial_execute_entry {
__u32 handle;
__u32 dir;
__u64 resize;
};
/**
* struct qaic_execute_hdr - Defines metadata for BO submission.
* @count: In. Number of BOs to submit.
* @dbc_id: In. DBC to submit the BOs on.
*/
struct qaic_execute_hdr {
__u32 count;
__u32 dbc_id;
};
/**
* struct qaic_execute - Defines a list of BOs to submit to the device.
* @hdr: In. BO list metadata.
* @data: In. Pointer to an array of BOs to submit.
*/
struct qaic_execute {
struct qaic_execute_hdr hdr;
__u64 data;
};
/**
* struct qaic_wait - Defines a blocking wait for BO execution.
* @handle: In. GEM handle of the BO to wait on.
* @timeout: In. Maximum time in ms to wait for the BO.
* @dbc_id: In. DBC the BO is submitted to.
* @pad: Structure padding. Must be 0.
*/
struct qaic_wait {
__u32 handle;
__u32 timeout;
__u32 dbc_id;
__u32 pad;
};
/**
* struct qaic_perf_stats_hdr - Defines metadata for getting BO perf info.
* @count: In. Number of BOs requested.
* @pad: Structure padding. Must be 0.
* @dbc_id: In. DBC the BO are associated with.
*/
struct qaic_perf_stats_hdr {
__u16 count;
__u16 pad;
__u32 dbc_id;
};
/**
* struct qaic_perf_stats - Defines a request for getting BO perf info.
* @hdr: In. Request metadata
* @data: In. Pointer to array of stats structures that will receive the data.
*/
struct qaic_perf_stats {
struct qaic_perf_stats_hdr hdr;
__u64 data;
};
/**
* struct qaic_perf_stats_entry - Defines a BO perf info.
* @handle: In. GEM handle of the BO to get perf stats for.
* @queue_level_before: Out. Number of elements in the queue before this BO
* was submitted.
* @num_queue_element: Out. Number of elements added to the queue to submit
* this BO.
* @submit_latency_us: Out. Time taken by the driver to submit this BO.
* @device_latency_us: Out. Time taken by the device to execute this BO.
* @pad: Structure padding. Must be 0.
*/
struct qaic_perf_stats_entry {
__u32 handle;
__u32 queue_level_before;
__u32 num_queue_element;
__u32 submit_latency_us;
__u32 device_latency_us;
__u32 pad;
};
/**
* struct qaic_detach_slice - Detaches slicing configuration from BO.
* @handle: In. GEM handle of the BO to detach slicing configuration.
* @pad: Structure padding. Must be 0.
*/
struct qaic_detach_slice {
__u32 handle;
__u32 pad;
};
#define DRM_QAIC_MANAGE 0x00
#define DRM_QAIC_CREATE_BO 0x01
#define DRM_QAIC_MMAP_BO 0x02
#define DRM_QAIC_ATTACH_SLICE_BO 0x03
#define DRM_QAIC_EXECUTE_BO 0x04
#define DRM_QAIC_PARTIAL_EXECUTE_BO 0x05
#define DRM_QAIC_WAIT_BO 0x06
#define DRM_QAIC_PERF_STATS_BO 0x07
#define DRM_QAIC_DETACH_SLICE_BO 0x08
#define DRM_IOCTL_QAIC_MANAGE DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_MANAGE, struct qaic_manage_msg)
#define DRM_IOCTL_QAIC_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_CREATE_BO, struct qaic_create_bo)
#define DRM_IOCTL_QAIC_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_MMAP_BO, struct qaic_mmap_bo)
#define DRM_IOCTL_QAIC_ATTACH_SLICE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_ATTACH_SLICE_BO, struct qaic_attach_slice)
#define DRM_IOCTL_QAIC_EXECUTE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_EXECUTE_BO, struct qaic_execute)
#define DRM_IOCTL_QAIC_PARTIAL_EXECUTE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_PARTIAL_EXECUTE_BO, struct qaic_execute)
#define DRM_IOCTL_QAIC_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_WAIT_BO, struct qaic_wait)
#define DRM_IOCTL_QAIC_PERF_STATS_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_PERF_STATS_BO, struct qaic_perf_stats)
#define DRM_IOCTL_QAIC_DETACH_SLICE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_DETACH_SLICE_BO, struct qaic_detach_slice)
#if defined(__cplusplus)
}
#endif
#endif /* QAIC_ACCEL_H_ */

View File

@ -41,6 +41,7 @@ extern "C" {
#define DRM_V3D_PERFMON_CREATE 0x08
#define DRM_V3D_PERFMON_DESTROY 0x09
#define DRM_V3D_PERFMON_GET_VALUES 0x0a
#define DRM_V3D_SUBMIT_CPU 0x0b
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@ -56,6 +57,7 @@ extern "C" {
struct drm_v3d_perfmon_destroy)
#define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \
struct drm_v3d_perfmon_get_values)
#define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu)
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
#define DRM_V3D_SUBMIT_EXTENSION 0x02
@ -69,7 +71,13 @@ extern "C" {
struct drm_v3d_extension {
__u64 next;
__u32 id;
#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02
#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03
#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04
#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05
#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06
#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07
__u32 flags; /* mbz */
};
@ -93,6 +101,7 @@ enum v3d_queue {
V3D_TFU,
V3D_CSD,
V3D_CACHE_CLEAN,
V3D_CPU,
};
/**
@ -276,6 +285,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
DRM_V3D_PARAM_SUPPORTS_PERFMON,
DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
};
struct drm_v3d_get_param {
@ -319,6 +329,11 @@ struct drm_v3d_submit_tfu {
/* Pointer to an array of ioctl extensions*/
__u64 extensions;
struct {
__u32 ioc;
__u32 pad;
} v71;
};
/* Submits a compute shader for dispatch. This job will block on any
@ -356,6 +371,234 @@ struct drm_v3d_submit_csd {
__u32 pad;
};
/**
* struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an
* indirect CSD
*
* When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it
* points to this extension to define a indirect CSD submission. It creates a
* CPU job linked to a CSD job. The CPU job waits for the indirect CSD
* dependencies and, once they are signaled, it updates the CSD job config
* before allowing the CSD job execution.
*/
struct drm_v3d_indirect_csd {
struct drm_v3d_extension base;
/* Indirect CSD */
struct drm_v3d_submit_csd submit;
/* Handle of the indirect BO, that should be also attached to the
* indirect CSD.
*/
__u32 indirect;
/* Offset within the BO where the workgroup counts are stored */
__u32 offset;
/* Workgroups size */
__u32 wg_size;
/* Indices of the uniforms with the workgroup dispatch counts
* in the uniform stream. If the uniform rewrite is not needed,
* the offset must be 0xffffffff.
*/
__u32 wg_uniform_offsets[3];
};
/**
* struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate
* a timestamp query
*
* When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to
* this extension to define a timestamp query submission. This CPU job will
* calculate the timestamp query and update the query value within the
* timestamp BO. Moreover, it will signal the timestamp syncobj to indicate
* query availability.
*/
struct drm_v3d_timestamp_query {
struct drm_v3d_extension base;
/* Array of queries' offsets within the timestamp BO for their value */
__u64 offsets;
/* Array of timestamp's syncobjs to indicate its availability */
__u64 syncs;
/* Number of queries */
__u32 count;
/* mbz */
__u32 pad;
};
/**
* struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to
* reset timestamp queries
*
* When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it
* points to this extension to define a reset timestamp submission. This CPU
* job will reset the timestamp queries based on value offset of the first
* query. Moreover, it will reset the timestamp syncobj to reset query
* availability.
*/
struct drm_v3d_reset_timestamp_query {
struct drm_v3d_extension base;
/* Array of timestamp's syncobjs to indicate its availability */
__u64 syncs;
/* Offset of the first query within the timestamp BO for its value */
__u32 offset;
/* Number of queries */
__u32 count;
};
/**
* struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy
* query results to a buffer
*
* When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it
* points to this extension to define a copy timestamp query submission. This
* CPU job will copy the timestamp queries results to a BO with the offset
* and stride defined in the extension.
*/
struct drm_v3d_copy_timestamp_query {
struct drm_v3d_extension base;
/* Define if should write to buffer using 64 or 32 bits */
__u8 do_64bit;
/* Define if it can write to buffer even if the query is not available */
__u8 do_partial;
/* Define if it should write availability bit to buffer */
__u8 availability_bit;
/* mbz */
__u8 pad;
/* Offset of the buffer in the BO */
__u32 offset;
/* Stride of the buffer in the BO */
__u32 stride;
/* Number of queries */
__u32 count;
/* Array of queries' offsets within the timestamp BO for their value */
__u64 offsets;
/* Array of timestamp's syncobjs to indicate its availability */
__u64 syncs;
};
/**
* struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to
* reset performance queries
*
* When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it
* points to this extension to define a reset performance submission. This CPU
* job will reset the performance queries by resetting the values of the
* performance monitors. Moreover, it will reset the syncobj to reset query
* availability.
*/
struct drm_v3d_reset_performance_query {
struct drm_v3d_extension base;
/* Array of performance queries's syncobjs to indicate its availability */
__u64 syncs;
/* Number of queries */
__u32 count;
/* Number of performance monitors */
__u32 nperfmons;
/* Array of u64 user-pointers that point to an array of kperfmon_ids */
__u64 kperfmon_ids;
};
/**
* struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy
* performance query results to a buffer
*
* When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it
* points to this extension to define a copy performance query submission. This
* CPU job will copy the performance queries results to a BO with the offset
* and stride defined in the extension.
*/
struct drm_v3d_copy_performance_query {
struct drm_v3d_extension base;
/* Define if should write to buffer using 64 or 32 bits */
__u8 do_64bit;
/* Define if it can write to buffer even if the query is not available */
__u8 do_partial;
/* Define if it should write availability bit to buffer */
__u8 availability_bit;
/* mbz */
__u8 pad;
/* Offset of the buffer in the BO */
__u32 offset;
/* Stride of the buffer in the BO */
__u32 stride;
/* Number of performance monitors */
__u32 nperfmons;
/* Number of performance counters related to this query pool */
__u32 ncounters;
/* Number of queries */
__u32 count;
/* Array of performance queries's syncobjs to indicate its availability */
__u64 syncs;
/* Array of u64 user-pointers that point to an array of kperfmon_ids */
__u64 kperfmon_ids;
};
struct drm_v3d_submit_cpu {
/* Pointer to a u32 array of the BOs that are referenced by the job.
*
* For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO,
* that contains the workgroup counts.
*
* For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO,
* that will contain the timestamp.
*
* For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only
* one BO, that contains the timestamp.
*
* For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two
* BOs. The first is the BO where the timestamp queries will be written
* to. The second is the BO that contains the timestamp.
*
* For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no
* BOs.
*
* For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one
* BO, where the performance queries will be written.
*/
__u64 bo_handles;
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
__u32 flags;
/* Pointer to an array of ioctl extensions*/
__u64 extensions;
};
enum {
V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
V3D_PERFCNT_FEP_VALID_PRIMS,

View File

@ -64,6 +64,16 @@ struct drm_virtgpu_map {
__u32 pad;
};
#define VIRTGPU_EXECBUF_SYNCOBJ_RESET 0x01
#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
0)
struct drm_virtgpu_execbuffer_syncobj {
__u32 handle;
__u32 flags;
__u64 point;
};
/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
struct drm_virtgpu_execbuffer {
__u32 flags;
@ -73,7 +83,11 @@ struct drm_virtgpu_execbuffer {
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
__u32 pad;
__u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
__u32 num_in_syncobjs;
__u32 num_out_syncobjs;
__u64 in_syncobjs;
__u64 out_syncobjs;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@ -83,6 +97,7 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */
struct drm_virtgpu_getparam {
__u64 param;
@ -184,6 +199,7 @@ struct drm_virtgpu_resource_create_blob {
#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004
struct drm_virtgpu_context_set_param {
__u64 param;
__u64 value;

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
/**************************************************************************
*
* Copyright © 2009-2022 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -902,7 +903,8 @@ struct drm_vmw_shader_arg {
/**
* enum drm_vmw_surface_flags
*
* @drm_vmw_surface_flag_shareable: Whether the surface is shareable
* @drm_vmw_surface_flag_shareable: Deprecated - all userspace surfaces are
* shareable.
* @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
* surface.
* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is

File diff suppressed because it is too large Load Diff

View File

@ -7,42 +7,42 @@
/* Just the needed definitions for the RDB of an Amiga HD. */
struct RigidDiskBlock {
__u32 rdb_ID;
__be32 rdb_ID;
__be32 rdb_SummedLongs;
__s32 rdb_ChkSum;
__u32 rdb_HostID;
__be32 rdb_ChkSum;
__be32 rdb_HostID;
__be32 rdb_BlockBytes;
__u32 rdb_Flags;
__u32 rdb_BadBlockList;
__be32 rdb_Flags;
__be32 rdb_BadBlockList;
__be32 rdb_PartitionList;
__u32 rdb_FileSysHeaderList;
__u32 rdb_DriveInit;
__u32 rdb_Reserved1[6];
__u32 rdb_Cylinders;
__u32 rdb_Sectors;
__u32 rdb_Heads;
__u32 rdb_Interleave;
__u32 rdb_Park;
__u32 rdb_Reserved2[3];
__u32 rdb_WritePreComp;
__u32 rdb_ReducedWrite;
__u32 rdb_StepRate;
__u32 rdb_Reserved3[5];
__u32 rdb_RDBBlocksLo;
__u32 rdb_RDBBlocksHi;
__u32 rdb_LoCylinder;
__u32 rdb_HiCylinder;
__u32 rdb_CylBlocks;
__u32 rdb_AutoParkSeconds;
__u32 rdb_HighRDSKBlock;
__u32 rdb_Reserved4;
__be32 rdb_FileSysHeaderList;
__be32 rdb_DriveInit;
__be32 rdb_Reserved1[6];
__be32 rdb_Cylinders;
__be32 rdb_Sectors;
__be32 rdb_Heads;
__be32 rdb_Interleave;
__be32 rdb_Park;
__be32 rdb_Reserved2[3];
__be32 rdb_WritePreComp;
__be32 rdb_ReducedWrite;
__be32 rdb_StepRate;
__be32 rdb_Reserved3[5];
__be32 rdb_RDBBlocksLo;
__be32 rdb_RDBBlocksHi;
__be32 rdb_LoCylinder;
__be32 rdb_HiCylinder;
__be32 rdb_CylBlocks;
__be32 rdb_AutoParkSeconds;
__be32 rdb_HighRDSKBlock;
__be32 rdb_Reserved4;
char rdb_DiskVendor[8];
char rdb_DiskProduct[16];
char rdb_DiskRevision[4];
char rdb_ControllerVendor[8];
char rdb_ControllerProduct[16];
char rdb_ControllerRevision[4];
__u32 rdb_Reserved5[10];
__be32 rdb_Reserved5[10];
};
#define IDNAME_RIGIDDISK 0x5244534B /* "RDSK" */
@ -50,16 +50,16 @@ struct RigidDiskBlock {
struct PartitionBlock {
__be32 pb_ID;
__be32 pb_SummedLongs;
__s32 pb_ChkSum;
__u32 pb_HostID;
__be32 pb_ChkSum;
__be32 pb_HostID;
__be32 pb_Next;
__u32 pb_Flags;
__u32 pb_Reserved1[2];
__u32 pb_DevFlags;
__be32 pb_Flags;
__be32 pb_Reserved1[2];
__be32 pb_DevFlags;
__u8 pb_DriveName[32];
__u32 pb_Reserved2[15];
__be32 pb_Reserved2[15];
__be32 pb_Environment[17];
__u32 pb_EReserved[15];
__be32 pb_EReserved[15];
};
#define IDNAME_PARTITION 0x50415254 /* "PART" */

View File

@ -251,20 +251,22 @@ struct binder_extended_error {
__s32 param;
};
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32)
#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error)
enum {
BINDER_WRITE_READ = _IOWR('b', 1, struct binder_write_read),
BINDER_SET_IDLE_TIMEOUT = _IOW('b', 3, __s64),
BINDER_SET_MAX_THREADS = _IOW('b', 5, __u32),
BINDER_SET_IDLE_PRIORITY = _IOW('b', 6, __s32),
BINDER_SET_CONTEXT_MGR = _IOW('b', 7, __s32),
BINDER_THREAD_EXIT = _IOW('b', 8, __s32),
BINDER_VERSION = _IOWR('b', 9, struct binder_version),
BINDER_GET_NODE_DEBUG_INFO = _IOWR('b', 11, struct binder_node_debug_info),
BINDER_GET_NODE_INFO_FOR_REF = _IOWR('b', 12, struct binder_node_info_for_ref),
BINDER_SET_CONTEXT_MGR_EXT = _IOW('b', 13, struct flat_binder_object),
BINDER_FREEZE = _IOW('b', 14, struct binder_freeze_info),
BINDER_GET_FROZEN_INFO = _IOWR('b', 15, struct binder_frozen_status_info),
BINDER_ENABLE_ONEWAY_SPAM_DETECTION = _IOW('b', 16, __u32),
BINDER_GET_EXTENDED_ERROR = _IOWR('b', 17, struct binder_extended_error),
};
/*
* NOTE: Two special error codes you should check for when calling

View File

@ -101,10 +101,6 @@ struct atm_dev_stats {
/* use backend to make new if */
#define ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct atm_iobuf)
/* add party to p2mp call */
#ifdef CONFIG_COMPAT
/* It actually takes struct sockaddr_atmsvc, not struct atm_iobuf */
#define COMPAT_ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct compat_atm_iobuf)
#endif
#define ATM_DROPPARTY _IOW('a', ATMIOC_SPECIAL+5,int)
/* drop party from p2mp call */

View File

@ -109,7 +109,7 @@ struct autofs_dev_ioctl {
struct args_ismountpoint ismountpoint;
};
char path[0];
char path[];
};
static __inline__ void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)

View File

@ -32,6 +32,8 @@
#define AT_HWCAP2 26 /* extension of AT_HWCAP */
#define AT_RSEQ_FEATURE_SIZE 27 /* rseq supported feature size */
#define AT_RSEQ_ALIGN 28 /* rseq allocation alignment */
#define AT_HWCAP3 29 /* extension of AT_HWCAP */
#define AT_HWCAP4 30 /* extension of AT_HWCAP */
#define AT_EXECFN 31 /* filename of program */

View File

@ -116,6 +116,9 @@ enum batadv_icmp_packettype {
* only need routable IPv4 multicast packets we signed up for explicitly
* @BATADV_MCAST_WANT_NO_RTR6: we have no IPv6 multicast router and therefore
* only need routable IPv6 multicast packets we signed up for explicitly
* @BATADV_MCAST_HAVE_MC_PTYPE_CAPA: we can parse, receive and forward
* batman-adv multicast packets with a multicast tracker TVLV. And all our
* hard interfaces have an MTU of at least 1280 bytes.
*/
enum batadv_mcast_flags {
BATADV_MCAST_WANT_ALL_UNSNOOPABLES = 1UL << 0,
@ -123,6 +126,7 @@ enum batadv_mcast_flags {
BATADV_MCAST_WANT_ALL_IPV6 = 1UL << 2,
BATADV_MCAST_WANT_NO_RTR4 = 1UL << 3,
BATADV_MCAST_WANT_NO_RTR6 = 1UL << 4,
BATADV_MCAST_HAVE_MC_PTYPE_CAPA = 1UL << 5,
};
/* tt data subtypes */
@ -174,14 +178,16 @@ enum batadv_bla_claimframe {
* @BATADV_TVLV_TT: translation table tvlv
* @BATADV_TVLV_ROAM: roaming advertisement tvlv
* @BATADV_TVLV_MCAST: multicast capability tvlv
* @BATADV_TVLV_MCAST_TRACKER: multicast tracker tvlv
*/
enum batadv_tvlv_type {
BATADV_TVLV_GW = 0x01,
BATADV_TVLV_DAT = 0x02,
BATADV_TVLV_NC = 0x03,
BATADV_TVLV_TT = 0x04,
BATADV_TVLV_ROAM = 0x05,
BATADV_TVLV_MCAST = 0x06,
BATADV_TVLV_GW = 0x01,
BATADV_TVLV_DAT = 0x02,
BATADV_TVLV_NC = 0x03,
BATADV_TVLV_TT = 0x04,
BATADV_TVLV_ROAM = 0x05,
BATADV_TVLV_MCAST = 0x06,
BATADV_TVLV_MCAST_TRACKER = 0x07,
};
#pragma pack(2)
@ -487,6 +493,25 @@ struct batadv_bcast_packet {
*/
};
/**
* struct batadv_mcast_packet - multicast packet for network payload
* @packet_type: batman-adv packet type, part of the general header
* @version: batman-adv protocol version, part of the general header
* @ttl: time to live for this packet, part of the general header
* @reserved: reserved byte for alignment
* @tvlv_len: length of the appended tvlv buffer (in bytes)
*/
struct batadv_mcast_packet {
__u8 packet_type;
__u8 version;
__u8 ttl;
__u8 reserved;
__be16 tvlv_len;
/* "4 bytes boundary + 2 bytes" long to make the payload after the
* following ethernet header again 4 bytes boundary aligned
*/
};
/**
* struct batadv_coded_packet - network coded packet
* @packet_type: batman-adv packet type, part of the general header
@ -628,6 +653,14 @@ struct batadv_tvlv_mcast_data {
__u8 reserved[3];
};
/**
* struct batadv_tvlv_mcast_tracker - payload of a multicast tracker tvlv
* @num_dests: number of subsequent destination originator MAC addresses
*/
struct batadv_tvlv_mcast_tracker {
__be16 num_dests;
};
#pragma pack()
#endif /* _LINUX_BATADV_PACKET_H_ */

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* bits.h: Macros for dealing with bitmasks. */
#ifndef _LINUX_BITS_H
#define _LINUX_BITS_H
#define __GENMASK(h, l) \
(((~_UL(0)) - (_UL(1) << (l)) + 1) & \
(~_UL(0) >> (__BITS_PER_LONG - 1 - (h))))
#define __GENMASK_ULL(h, l) \
(((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \
(~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
#endif /* _LINUX_BITS_H */

View File

@ -51,13 +51,13 @@ enum blk_zone_type {
*
* The Zone Condition state machine in the ZBC/ZAC standards maps the above
* deinitions as:
* - ZC1: Empty | BLK_ZONE_EMPTY
* - ZC1: Empty | BLK_ZONE_COND_EMPTY
* - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN
* - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN
* - ZC4: Closed | BLK_ZONE_CLOSED
* - ZC5: Full | BLK_ZONE_FULL
* - ZC6: Read Only | BLK_ZONE_READONLY
* - ZC7: Offline | BLK_ZONE_OFFLINE
* - ZC4: Closed | BLK_ZONE_COND_CLOSED
* - ZC5: Full | BLK_ZONE_COND_FULL
* - ZC6: Read Only | BLK_ZONE_COND_READONLY
* - ZC7: Offline | BLK_ZONE_COND_OFFLINE
*
* Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should
* be considered invalid.

View File

@ -19,6 +19,7 @@
/* ld/ldx fields */
#define BPF_DW 0x18 /* double word (64-bit) */
#define BPF_MEMSX 0x80 /* load with sign extension */
#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
#define BPF_XADD 0xc0 /* exclusive add - legacy name */
@ -41,6 +42,7 @@
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
#define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */
#define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */
@ -49,6 +51,10 @@
#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
enum bpf_cond_pseudo_jmp {
BPF_MAY_GOTO = 0,
};
/* Register numbers */
enum {
BPF_REG_0 = 0,
@ -76,12 +82,29 @@ struct bpf_insn {
__s32 imm; /* signed immediate constant */
};
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
* byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
* the trailing flexible array member) instead.
*/
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
__u8 data[0]; /* Arbitrary size */
};
/* Header for bpf_lpm_trie_key structs */
struct bpf_lpm_trie_key_hdr {
__u32 prefixlen;
};
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
struct bpf_lpm_trie_key_u8 {
union {
struct bpf_lpm_trie_key_hdr hdr;
__u32 prefixlen;
};
__u8 data[]; /* Arbitrary size */
};
struct bpf_cgroup_storage_key {
__u64 cgroup_inode_id; /* cgroup inode id */
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
@ -616,7 +639,11 @@ union bpf_iter_link_info {
* to NULL to begin the batched operation. After each subsequent
* **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
* *out_batch* as the *in_batch* for the next operation to
* continue iteration from the current point.
* continue iteration from the current point. Both *in_batch* and
* *out_batch* must point to memory large enough to hold a key,
* except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
* LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters
* must be at least 4 bytes wide regardless of key size.
*
* The *keys* and *values* are output parameters which must point
* to memory large enough to hold *count* items based on the key
@ -846,6 +873,36 @@ union bpf_iter_link_info {
* Returns zero on success. On error, -1 is returned and *errno*
* is set appropriately.
*
* BPF_TOKEN_CREATE
* Description
* Create BPF token with embedded information about what
* BPF-related functionality it allows:
* - a set of allowed bpf() syscall commands;
* - a set of allowed BPF map types to be created with
* BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
* - a set of allowed BPF program types and BPF program attach
* types to be loaded with BPF_PROG_LOAD command, if
* BPF_PROG_LOAD itself is allowed.
*
* BPF token is created (derived) from an instance of BPF FS,
* assuming it has necessary delegation mount options specified.
* This BPF token can be passed as an extra parameter to various
* bpf() syscall commands to grant BPF subsystem functionality to
* unprivileged processes.
*
* When created, BPF token is "associated" with the owning
* user namespace of BPF FS instance (super block) that it was
* derived from, and subsequent BPF operations performed with
* BPF token would be performing capabilities checks (i.e.,
* CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
* that user namespace. Without BPF token, such capabilities
* have to be granted in init user namespace, making bpf()
* syscall incompatible with user namespace, for the most part.
*
* Return
* A new file descriptor (a nonnegative integer), or -1 if an
* error occurred (in which case, *errno* is set appropriately).
*
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@ -900,6 +957,8 @@ enum bpf_cmd {
BPF_ITER_CREATE,
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
BPF_TOKEN_CREATE,
__MAX_BPF_CMD,
};
enum bpf_map_type {
@ -931,7 +990,14 @@ enum bpf_map_type {
*/
BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
/* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
* attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
* local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
* functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
* deprecated.
*/
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE,
@ -943,6 +1009,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE,
BPF_MAP_TYPE_ARENA,
__MAX_BPF_MAP_TYPE
};
/* Note that tracing related programs such as
@ -986,6 +1054,8 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LSM,
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
BPF_PROG_TYPE_NETFILTER,
__MAX_BPF_PROG_TYPE
};
enum bpf_attach_type {
@ -1033,6 +1103,19 @@ enum bpf_attach_type {
BPF_PERF_EVENT,
BPF_TRACE_KPROBE_MULTI,
BPF_LSM_CGROUP,
BPF_STRUCT_OPS,
BPF_NETFILTER,
BPF_TCX_INGRESS,
BPF_TCX_EGRESS,
BPF_TRACE_UPROBE_MULTI,
BPF_CGROUP_UNIX_CONNECT,
BPF_CGROUP_UNIX_SENDMSG,
BPF_CGROUP_UNIX_RECVMSG,
BPF_CGROUP_UNIX_GETPEERNAME,
BPF_CGROUP_UNIX_GETSOCKNAME,
BPF_NETKIT_PRIMARY,
BPF_NETKIT_PEER,
BPF_TRACE_KPROBE_SESSION,
__MAX_BPF_ATTACH_TYPE
};
@ -1049,8 +1132,24 @@ enum bpf_link_type {
BPF_LINK_TYPE_PERF_EVENT = 7,
BPF_LINK_TYPE_KPROBE_MULTI = 8,
BPF_LINK_TYPE_STRUCT_OPS = 9,
BPF_LINK_TYPE_NETFILTER = 10,
BPF_LINK_TYPE_TCX = 11,
BPF_LINK_TYPE_UPROBE_MULTI = 12,
BPF_LINK_TYPE_NETKIT = 13,
BPF_LINK_TYPE_SOCKMAP = 14,
__MAX_BPF_LINK_TYPE,
};
MAX_BPF_LINK_TYPE,
#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
enum bpf_perf_event_type {
BPF_PERF_EVENT_UNSPEC = 0,
BPF_PERF_EVENT_UPROBE = 1,
BPF_PERF_EVENT_URETPROBE = 2,
BPF_PERF_EVENT_KPROBE = 3,
BPF_PERF_EVENT_KRETPROBE = 4,
BPF_PERF_EVENT_TRACEPOINT = 5,
BPF_PERF_EVENT_EVENT = 6,
};
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
@ -1099,7 +1198,12 @@ enum bpf_link_type {
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_F_ALLOW_MULTI (1U << 1)
/* Generic attachment flags. */
#define BPF_F_REPLACE (1U << 2)
#define BPF_F_BEFORE (1U << 3)
#define BPF_F_AFTER (1U << 4)
#define BPF_F_ID (1U << 5)
#define BPF_F_LINK BPF_F_LINK /* 1 << 13 */
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel
@ -1108,7 +1212,7 @@ enum bpf_link_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will allow any alignment whatsoever. On platforms
* with strict alignment requirements for loads ands stores (such
* as sparc and mips) the verifier validates that all loads and
@ -1161,10 +1265,27 @@ enum bpf_link_type {
*/
#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
/* The verifier internal test flag. Behavior is undefined */
#define BPF_F_TEST_REG_INVARIANTS (1U << 7)
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
#define BPF_F_KPROBE_MULTI_RETURN (1U << 0)
enum {
BPF_F_KPROBE_MULTI_RETURN = (1U << 0)
};
/* link_create.uprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_UPROBE_MULTI attach type to create return probe.
*/
enum {
BPF_F_UPROBE_MULTI_RETURN = (1U << 0)
};
/* link_create.netfilter.flags used in LINK_CREATE command for
* BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation.
*/
#define BPF_F_NETFILTER_IP_DEFRAG (1U << 0)
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* the following extensions:
@ -1220,6 +1341,10 @@ enum bpf_link_type {
*/
#define BPF_PSEUDO_KFUNC_CALL 2
enum bpf_addr_space_cast {
BPF_ADDR_SPACE_CAST = 1,
};
/* flags for BPF_MAP_UPDATE_ELEM command */
enum {
BPF_ANY = 0, /* create new element or update existing */
@ -1266,6 +1391,24 @@ enum {
/* Create a map that is suitable to be an inner map with dynamic max entries */
BPF_F_INNER_MAP = (1U << 12),
/* Create a map that will be registered/unregesitered by the backed bpf_link */
BPF_F_LINK = (1U << 13),
/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
BPF_F_PATH_FD = (1U << 14),
/* Flag for value_type_btf_obj_fd, the fd is available */
BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
/* BPF token FD is passed in a corresponding command's token_fd field */
BPF_F_TOKEN_FD = (1U << 16),
/* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */
BPF_F_SEGV_ON_FAULT = (1U << 17),
/* Do not translate kernel bpf_arena pointers to user pointers */
BPF_F_NO_USER_CONV = (1U << 18),
};
/* Flags for BPF_PROG_QUERY. */
@ -1337,8 +1480,20 @@ union bpf_attr {
* BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
* number of hash functions (if 0, the bloom filter will default
* to using 5 hash functions).
*
* BPF_MAP_TYPE_ARENA - contains the address where user space
* is going to mmap() the arena. It has to be page aligned.
*/
__u64 map_extra;
__s32 value_type_btf_obj_fd; /* fd pointing to a BTF
* type data for
* btf_vmlinux_value_type_id.
*/
/* BPF token FD to use with BPF_MAP_CREATE operation.
* If provided, map_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 map_token_fd;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@ -1403,23 +1558,44 @@ union bpf_attr {
__aligned_u64 fd_array; /* array of FDs */
__aligned_u64 core_relos;
__u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
/* output: actual total log contents size (including termintaing zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 log_true_size;
/* BPF token FD to use with BPF_PROG_LOAD operation.
* If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 prog_token_fd;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
__aligned_u64 pathname;
__u32 bpf_fd;
__u32 file_flags;
/* Same as dirfd in openat() syscall; see openat(2)
* manpage for details of path FD and pathname semantics;
* path_fd should accompanied by BPF_F_PATH_FD flag set in
* file_flags field, otherwise it should be set to zero;
* if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed.
*/
__s32 path_fd;
};
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
__u32 target_fd; /* container object to attach to */
__u32 attach_bpf_fd; /* eBPF program to attach */
union {
__u32 target_fd; /* target object to attach to or ... */
__u32 target_ifindex; /* target ifindex */
};
__u32 attach_bpf_fd;
__u32 attach_type;
__u32 attach_flags;
__u32 replace_bpf_fd; /* previously attached eBPF
* program to replace if
* BPF_F_REPLACE is used
*/
__u32 replace_bpf_fd;
union {
__u32 relative_fd;
__u32 relative_id;
};
__u64 expected_revision;
};
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
@ -1465,21 +1641,33 @@ union bpf_attr {
} info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */
__u32 target_fd; /* container object to query */
union {
__u32 target_fd; /* target object to query or ... */
__u32 target_ifindex; /* target ifindex */
};
__u32 attach_type;
__u32 query_flags;
__u32 attach_flags;
__aligned_u64 prog_ids;
__u32 prog_cnt;
union {
__u32 prog_cnt;
__u32 count;
};
__u32 :32;
/* output: per-program attach_flags.
* not allowed to be set during effective query.
*/
__aligned_u64 prog_attach_flags;
__aligned_u64 link_ids;
__aligned_u64 link_attach_flags;
__u64 revision;
} query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
__u64 name;
__u32 prog_fd;
__u64 name;
__u32 prog_fd;
__u32 :32;
__aligned_u64 cookie;
} raw_tracepoint;
struct { /* anonymous struct for BPF_BTF_LOAD */
@ -1488,6 +1676,16 @@ union bpf_attr {
__u32 btf_size;
__u32 btf_log_size;
__u32 btf_log_level;
/* output: actual total log contents size (including termintaing zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 btf_log_true_size;
__u32 btf_flags;
/* BPF token FD to use with BPF_BTF_LOAD operation.
* If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 btf_token_fd;
};
struct {
@ -1507,15 +1705,18 @@ union bpf_attr {
} task_fd_query;
struct { /* struct used by BPF_LINK_CREATE command */
__u32 prog_fd; /* eBPF program to attach */
union {
__u32 target_fd; /* object to attach to */
__u32 target_ifindex; /* target ifindex */
__u32 prog_fd; /* eBPF program to attach */
__u32 map_fd; /* struct_ops to attach */
};
union {
__u32 target_fd; /* target object to attach to or ... */
__u32 target_ifindex; /* target ifindex */
};
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
union {
__u32 target_btf_id; /* btf_id of target to attach to */
__u32 target_btf_id; /* btf_id of target to attach to */
struct {
__aligned_u64 iter_info; /* extra bpf_iter_link_info */
__u32 iter_info_len; /* iter_info length */
@ -1543,17 +1744,57 @@ union bpf_attr {
*/
__u64 cookie;
} tracing;
struct {
__u32 pf;
__u32 hooknum;
__s32 priority;
__u32 flags;
} netfilter;
struct {
union {
__u32 relative_fd;
__u32 relative_id;
};
__u64 expected_revision;
} tcx;
struct {
__aligned_u64 path;
__aligned_u64 offsets;
__aligned_u64 ref_ctr_offsets;
__aligned_u64 cookies;
__u32 cnt;
__u32 flags;
__u32 pid;
} uprobe_multi;
struct {
union {
__u32 relative_fd;
__u32 relative_id;
};
__u64 expected_revision;
} netkit;
};
} link_create;
struct { /* struct used by BPF_LINK_UPDATE command */
__u32 link_fd; /* link fd */
/* new program fd to update link with */
__u32 new_prog_fd;
union {
/* new program fd to update link with */
__u32 new_prog_fd;
/* new struct_ops map fd to update link with */
__u32 new_map_fd;
};
__u32 flags; /* extra flags */
/* expected link's program fd; is specified only if
* BPF_F_REPLACE flag is set in flags */
__u32 old_prog_fd;
union {
/* expected link's program fd; is specified only if
* BPF_F_REPLACE flag is set in flags.
*/
__u32 old_prog_fd;
/* expected link's map fd; is specified only
* if BPF_F_REPLACE flag is set.
*/
__u32 old_map_fd;
};
} link_update;
struct {
@ -1575,6 +1816,11 @@ union bpf_attr {
__u32 flags; /* extra flags */
} prog_bind_map;
struct { /* struct used by BPF_TOKEN_CREATE command */
__u32 flags;
__u32 bpffs_fd;
} token_create;
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@ -1647,17 +1893,17 @@ union bpf_attr {
* Description
* This helper is a "printk()-like" facility for debugging. It
* prints a message defined by format *fmt* (of size *fmt_size*)
* to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
* to file *\/sys/kernel/tracing/trace* from TraceFS, if
* available. It can take up to three additional **u64**
* arguments (as an eBPF helpers, the total number of arguments is
* limited to five).
*
* Each time the helper is called, it appends a line to the trace.
* Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
* open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
* Lines are discarded while *\/sys/kernel/tracing/trace* is
* open, use *\/sys/kernel/tracing/trace_pipe* to avoid this.
* The format of the trace is customizable, and the exact output
* one will get depends on the options set in
* *\/sys/kernel/debug/tracing/trace_options* (see also the
* *\/sys/kernel/tracing/trace_options* (see also the
* *README* file under the same directory). However, it usually
* defaults to something like:
*
@ -1850,7 +2096,9 @@ union bpf_attr {
* performed again, if the helper is used in combination with
* direct packet access.
* Return
* 0 on success, or a negative error in case of failure.
* 0 on success, or a negative error in case of failure. Positive
* error indicates a potential drop or congestion in the target
* device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
* Description
@ -2583,8 +2831,8 @@ union bpf_attr {
* *bpf_socket* should be one of the following:
*
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
* and **BPF_CGROUP_INET6_CONNECT**.
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
* **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
*
* This helper actually implements a subset of **setsockopt()**.
* It supports the following *level*\ s:
@ -2822,8 +3070,8 @@ union bpf_attr {
* *bpf_socket* should be one of the following:
*
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
* and **BPF_CGROUP_INET6_CONNECT**.
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
* **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
*
* This helper actually implements a subset of **getsockopt()**.
* It supports the same set of *optname*\ s that is supported by
@ -3131,6 +3379,10 @@ union bpf_attr {
* **BPF_FIB_LOOKUP_DIRECT**
* Do a direct table lookup vs full lookup using FIB
* rules.
* **BPF_FIB_LOOKUP_TBID**
* Used with BPF_FIB_LOOKUP_DIRECT.
* Use the routing table ID present in *params*->tbid
* for the fib lookup.
* **BPF_FIB_LOOKUP_OUTPUT**
* Perform lookup from an egress perspective (default is
* ingress).
@ -3139,6 +3391,15 @@ union bpf_attr {
* and *params*->smac will not be set as output. A common
* use case is to call **bpf_redirect_neigh**\ () after
* doing **bpf_fib_lookup**\ ().
* **BPF_FIB_LOOKUP_SRC**
* Derive and set source IP addr in *params*->ipv{4,6}_src
* for the nexthop. If the src addr cannot be derived,
* **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
* case, *params*->dmac and *params*->smac are not set either.
* **BPF_FIB_LOOKUP_MARK**
* Use the mark present in *params*->mark for the fib lookup.
* This option should not be used with BPF_FIB_LOOKUP_DIRECT,
* as it only has meaning for full lookups.
*
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
@ -4108,9 +4369,6 @@ union bpf_attr {
* **-EOPNOTSUPP** if the operation is not supported, for example
* a call from outside of TC ingress.
*
* **-ESOCKTNOSUPPORT** if the socket type is not supported
* (reuseport).
*
* long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
* Description
* Helper is overloaded depending on BPF program type. This
@ -4375,6 +4633,8 @@ union bpf_attr {
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* Note: the user stack will only be populated if the *task* is
* the current task; all other tasks will return -EOPNOTSUPP.
* To achieve this, the helper needs *task*, which is a valid
* pointer to **struct task_struct**. To store the stacktrace, the
* bpf program provides *buf* with a nonnegative *size*.
@ -4386,6 +4646,7 @@ union bpf_attr {
*
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
* The *task* must be the current task.
* **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack,
* only valid if **BPF_F_USER_STACK** is also specified.
@ -4689,9 +4950,9 @@ union bpf_attr {
* going through the CPU's backlog queue.
*
* The *flags* argument is reserved and must be 0. The helper is
* currently only supported for tc BPF program types at the ingress
* hook and for veth device types. The peer device must reside in a
* different network namespace.
* currently only supported for tc BPF program types at the
* ingress hook and for veth and netkit target device types. The
* peer device must reside in a different network namespace.
* Return
* The helper returns **TC_ACT_REDIRECT** on success or
* **TC_ACT_SHOT** on error.
@ -4767,7 +5028,7 @@ union bpf_attr {
* bytes will be copied to *dst*
* Return
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
* **-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if
* invalid arguments are passed.
*
* struct socket *bpf_sock_from_file(struct file *file)
@ -4969,6 +5230,14 @@ union bpf_attr {
* different maps if key/value layout matches across maps.
* Every bpf_timer_set_callback() can have different callback_fn.
*
* *flags* can be one of:
*
* **BPF_F_TIMER_ABS**
* Start the timer in absolute expire value instead of the
* default relative one.
* **BPF_F_TIMER_CPU_PIN**
* Timer will be pinned to the CPU of the caller.
*
* Return
* 0 on success.
* **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
@ -4987,9 +5256,14 @@ union bpf_attr {
* u64 bpf_get_func_ip(void *ctx)
* Description
* Get address of the traced function (for tracing and kprobe programs).
*
* When called for kprobe program attached as uprobe it returns
* probe address for both entry and return uprobe.
*
* Return
* Address of the traced function.
* Address of the traced function for kprobe.
* 0 for kprobes placed within the function (not at the entry).
* Address of the probe for uprobe and return uprobe.
*
* u64 bpf_get_attach_cookie(void *ctx)
* Description
@ -5240,7 +5514,7 @@ union bpf_attr {
* bytes will be copied to *dst*
* Return
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
*
* void *bpf_kptr_xchg(void *map_value, void *ptr)
@ -5325,11 +5599,22 @@ union bpf_attr {
* Description
* Write *len* bytes from *src* into *dst*, starting from *offset*
* into *dst*.
* *flags* is currently unused.
*
* *flags* must be 0 except for skb-type dynptrs.
*
* For skb-type dynptrs:
* * All data slices of the dynptr are automatically
* invalidated after **bpf_dynptr_write**\ (). This is
* because writing may pull the skb and change the
* underlying packet buffer.
*
* * For *flags*, please see the flags accepted by
* **bpf_skb_store_bytes**\ ().
* Return
* 0 on success, -E2BIG if *offset* + *len* exceeds the length
* of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
* is a read-only dynptr or if *flags* is not 0.
* is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs,
* other errors correspond to errors returned by **bpf_skb_store_bytes**\ ().
*
* void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len)
* Description
@ -5337,6 +5622,9 @@ union bpf_attr {
*
* *len* must be a statically known value. The returned data slice
* is invalidated whenever the dynptr is invalidated.
*
* skb and xdp type dynptrs may not use bpf_dynptr_data. They should
* instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr.
* Return
* Pointer to the underlying dynptr data, NULL if the dynptr is
* read-only, if the dynptr is invalid, or if the offset and length
@ -6116,6 +6404,19 @@ struct bpf_sock_tuple {
};
};
/* (Simplified) user return codes for tcx prog type.
* A valid tcx program must return one of these defined values. All other
* return codes are reserved for future use. Must remain compatible with
* their TC_ACT_* counter-parts. For compatibility in behavior, unknown
* return codes are mapped to TCX_NEXT.
*/
enum tcx_action_base {
TCX_NEXT = -1,
TCX_PASS = 0,
TCX_DROP = 2,
TCX_REDIRECT = 7,
};
struct bpf_xdp_sock {
__u32 queue_id;
};
@ -6297,7 +6598,7 @@ struct bpf_map_info {
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 :32; /* alignment pad */
__u32 btf_vmlinux_id;
__u64 map_extra;
} __attribute__((aligned(8)));
@ -6359,6 +6660,76 @@ struct bpf_link_info {
struct {
__u32 ifindex;
} xdp;
struct {
__u32 map_id;
} struct_ops;
struct {
__u32 pf;
__u32 hooknum;
__s32 priority;
__u32 flags;
} netfilter;
struct {
__aligned_u64 addrs;
__u32 count; /* in/out: kprobe_multi function count */
__u32 flags;
__u64 missed;
__aligned_u64 cookies;
} kprobe_multi;
struct {
__aligned_u64 path;
__aligned_u64 offsets;
__aligned_u64 ref_ctr_offsets;
__aligned_u64 cookies;
__u32 path_size; /* in/out: real path size on success, including zero byte */
__u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
__u32 flags;
__u32 pid;
} uprobe_multi;
struct {
__u32 type; /* enum bpf_perf_event_type */
__u32 :32;
union {
struct {
__aligned_u64 file_name; /* in/out */
__u32 name_len;
__u32 offset; /* offset from file_name */
__u64 cookie;
} uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
struct {
__aligned_u64 func_name; /* in/out */
__u32 name_len;
__u32 offset; /* offset from func_name */
__u64 addr;
__u64 missed;
__u64 cookie;
} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
struct {
__aligned_u64 tp_name; /* in/out */
__u32 name_len;
__u32 :32;
__u64 cookie;
} tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
struct {
__u64 config;
__u32 type;
__u32 :32;
__u64 cookie;
} event; /* BPF_PERF_EVENT_EVENT */
};
} perf_event;
struct {
__u32 ifindex;
__u32 attach_type;
} tcx;
struct {
__u32 ifindex;
__u32 attach_type;
} netkit;
struct {
__u32 map_id;
__u32 attach_type;
} sockmap;
};
} __attribute__((aligned(8)));
@ -6577,6 +6948,8 @@ enum {
* socket transition to LISTEN state.
*/
BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
* Arg1: measured RTT input (mrtt)
* Arg2: updated srtt
*/
BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
* It will be called to handle
@ -6655,6 +7028,7 @@ enum {
BPF_TCP_LISTEN,
BPF_TCP_CLOSING, /* Now a valid state */
BPF_TCP_NEW_SYN_RECV,
BPF_TCP_BOUND_INACTIVE,
BPF_TCP_MAX_STATES /* Leave at the end! */
};
@ -6756,6 +7130,9 @@ enum {
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
BPF_FIB_LOOKUP_TBID = (1U << 3),
BPF_FIB_LOOKUP_SRC = (1U << 4),
BPF_FIB_LOOKUP_MARK = (1U << 5),
};
enum {
@ -6768,6 +7145,7 @@ enum {
BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */
};
struct bpf_fib_lookup {
@ -6787,7 +7165,7 @@ struct bpf_fib_lookup {
/* output: MTU value */
__u16 mtu_result;
};
} __attribute__((packed, aligned(2)));
/* input: L3 device index for lookup
* output: device index from FIB lookup
*/
@ -6802,6 +7180,9 @@ struct bpf_fib_lookup {
__u32 rt_metric;
};
/* input: source address to consider for lookup
* output: source address result from lookup
*/
union {
__be32 ipv4_src;
__u32 ipv6_src[4]; /* in6_addr; network order */
@ -6816,11 +7197,32 @@ struct bpf_fib_lookup {
__u32 ipv6_dst[4]; /* in6_addr; network order */
};
/* output */
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__u8 smac[6]; /* ETH_ALEN */
__u8 dmac[6]; /* ETH_ALEN */
union {
struct {
/* output */
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
};
/* input: when accompanied with the
* 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a
* specific routing table to use for the fib lookup.
*/
__u32 tbid;
};
union {
/* input */
struct {
__u32 mark; /* policy routing */
/* 2 4-byte holes for input */
};
/* output: source and dest mac */
struct {
__u8 smac[6]; /* ETH_ALEN */
__u8 dmac[6]; /* ETH_ALEN */
};
};
};
struct bpf_redir_neigh {
@ -6904,36 +7306,37 @@ struct bpf_spin_lock {
};
struct bpf_timer {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_wq {
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_dynptr {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_head {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_node {
__u64 :64;
__u64 :64;
__u64 __opaque[3];
} __attribute__((aligned(8)));
struct bpf_rb_root {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_rb_node {
__u64 :64;
__u64 :64;
__u64 :64;
__u64 __opaque[4];
} __attribute__((aligned(8)));
struct bpf_refcount {
__u32 __opaque[1];
} __attribute__((aligned(4)));
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.
@ -7083,4 +7486,23 @@ struct bpf_core_relo {
enum bpf_core_relo_kind kind;
};
/*
* Flags to control bpf_timer_start() behaviour.
* - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
* relative to current time.
* - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
*/
enum {
BPF_F_TIMER_ABS = (1ULL << 0),
BPF_F_TIMER_CPU_PIN = (1ULL << 1),
};
/* BPF numbers iterator state */
struct bpf_iter_num {
/* opaque iterator state; having __u64 here allows to preserve correct
* alignment requirements in vmlinux.h, generated from BTF
*/
__u64 __opaque[1];
} __attribute__((aligned(8)));
#endif /* __LINUX_BPF_H__ */

View File

@ -1,21 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _LINUX_BPFILTER_H
#define _LINUX_BPFILTER_H
#include <linux/if.h>
enum {
BPFILTER_IPT_SO_SET_REPLACE = 64,
BPFILTER_IPT_SO_SET_ADD_COUNTERS = 65,
BPFILTER_IPT_SET_MAX,
};
enum {
BPFILTER_IPT_SO_GET_INFO = 64,
BPFILTER_IPT_SO_GET_ENTRIES = 65,
BPFILTER_IPT_SO_GET_REVISION_MATCH = 66,
BPFILTER_IPT_SO_GET_REVISION_TARGET = 67,
BPFILTER_IPT_GET_MAX,
};
#endif /* _LINUX_BPFILTER_H */

View File

@ -90,6 +90,7 @@ struct btrfs_qgroup_limit {
* struct btrfs_qgroup_inherit.flags
*/
#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0)
#define BTRFS_QGROUP_INHERIT_FLAGS_SUPP (BTRFS_QGROUP_INHERIT_SET_LIMITS)
struct btrfs_qgroup_inherit {
__u64 flags;
@ -331,6 +332,8 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11)
#define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12)
#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13)
#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14)
#define BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA (1ULL << 16)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
@ -610,6 +613,9 @@ struct btrfs_ioctl_clone_range_args {
*/
#define BTRFS_DEFRAG_RANGE_COMPRESS 1
#define BTRFS_DEFRAG_RANGE_START_IO 2
#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \
BTRFS_DEFRAG_RANGE_START_IO)
struct btrfs_ioctl_defrag_range_args {
/* start of the defrag operation */
__u64 start;
@ -751,6 +757,7 @@ struct btrfs_ioctl_get_dev_stats {
#define BTRFS_QUOTA_CTL_ENABLE 1
#define BTRFS_QUOTA_CTL_DISABLE 2
#define BTRFS_QUOTA_CTL_RESCAN__NOTUSED 3
#define BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA 4
struct btrfs_ioctl_quota_ctl_args {
__u64 cmd;
__u64 status;

View File

@ -69,6 +69,9 @@
/* Holds the block group items for extent tree v2. */
#define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL
/* Tracks RAID stripes in block groups. */
#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL
/* device stats in the device tree */
#define BTRFS_DEV_STATS_OBJECTID 0ULL
@ -212,11 +215,31 @@
*/
#define BTRFS_METADATA_ITEM_KEY 169
/*
* Special __inline__ ref key which stores the id of the subvolume which originally
* created the extent. This subvolume owns the extent permanently from the
* perspective of simple quotas. Needed to know which subvolume to free quota
* usage from when the extent is deleted.
*
* Stored as an __inline__ ref rather to avoid wasting space on a separate item on
* top of the existing extent item. However, unlike the other __inline__ refs,
* there is one one owner ref per extent rather than one per extent.
*
* Because of this, it goes at the front of the list of __inline__ refs, and thus
* must have a lower type value than any other __inline__ ref type (to satisfy the
* disk format rule that __inline__ refs have non-decreasing type).
*/
#define BTRFS_EXTENT_OWNER_REF_KEY 172
#define BTRFS_TREE_BLOCK_REF_KEY 176
#define BTRFS_EXTENT_DATA_REF_KEY 178
#define BTRFS_EXTENT_REF_V0_KEY 180
/*
* Obsolete key. Defintion removed in 6.6, value may be reused in the future.
*
* #define BTRFS_EXTENT_REF_V0_KEY 180
*/
#define BTRFS_SHARED_BLOCK_REF_KEY 182
@ -253,6 +276,8 @@
#define BTRFS_DEV_ITEM_KEY 216
#define BTRFS_CHUNK_ITEM_KEY 228
#define BTRFS_RAID_STRIPE_KEY 230
/*
* Records the overall state of the qgroups.
* There's only one instance of this key present,
@ -711,6 +736,30 @@ struct btrfs_free_space_header {
__le64 num_bitmaps;
} __attribute__ ((__packed__));
struct btrfs_raid_stride {
/* The id of device this raid extent lives on. */
__le64 devid;
/* The physical location on disk. */
__le64 physical;
} __attribute__ ((__packed__));
/* The stripe_extent::encoding, 1:1 mapping of enum btrfs_raid_types. */
#define BTRFS_STRIPE_RAID0 1
#define BTRFS_STRIPE_RAID1 2
#define BTRFS_STRIPE_DUP 3
#define BTRFS_STRIPE_RAID10 4
#define BTRFS_STRIPE_RAID5 5
#define BTRFS_STRIPE_RAID6 6
#define BTRFS_STRIPE_RAID1C3 7
#define BTRFS_STRIPE_RAID1C4 8
struct btrfs_stripe_extent {
__u8 encoding;
__u8 reserved[7];
/* An array of raid strides this stripe is composed of. */
struct btrfs_raid_stride strides[];
} __attribute__ ((__packed__));
#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0)
#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1)
@ -779,6 +828,10 @@ struct btrfs_shared_data_ref {
__le32 count;
} __attribute__ ((__packed__));
struct btrfs_extent_owner_ref {
__le64 root_id;
} __attribute__ ((__packed__));
struct btrfs_extent_inline_ref {
__u8 type;
__le64 offset;
@ -1196,9 +1249,17 @@ static __inline__ __u16 btrfs_qgroup_level(__u64 qgroupid)
*/
#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2)
/*
* Whether or not this filesystem is using simple quotas. Not exactly the
* incompat bit, because we support using simple quotas, disabling it, then
* going back to full qgroup quotas.
*/
#define BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE (1ULL << 3)
#define BTRFS_QGROUP_STATUS_FLAGS_MASK (BTRFS_QGROUP_STATUS_FLAG_ON | \
BTRFS_QGROUP_STATUS_FLAG_RESCAN | \
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | \
BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
#define BTRFS_QGROUP_STATUS_VERSION 1
@ -1220,6 +1281,15 @@ struct btrfs_qgroup_status_item {
* of the scan. It contains a logical address
*/
__le64 rescan;
/*
* The generation when quotas were last enabled. Used by simple quotas to
* avoid decrementing when freeing an extent that was written before
* enable.
*
* Set only if flags contain BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE.
*/
__le64 enable_gen;
} __attribute__ ((__packed__));
struct btrfs_qgroup_info_item {

View File

@ -193,9 +193,14 @@ struct canfd_frame {
#define CANXL_XLF 0x80 /* mandatory CAN XL frame flag (must always be set!) */
#define CANXL_SEC 0x01 /* Simple Extended Content (security/segmentation) */
/* the 8-bit VCID is optionally placed in the canxl_frame.prio element */
#define CANXL_VCID_OFFSET 16 /* bit offset of VCID in prio element */
#define CANXL_VCID_VAL_MASK 0xFFUL /* VCID is an 8-bit value */
#define CANXL_VCID_MASK (CANXL_VCID_VAL_MASK << CANXL_VCID_OFFSET)
/**
* struct canxl_frame - CAN with e'X'tended frame 'L'ength frame structure
* @prio: 11 bit arbitration priority with zero'ed CAN_*_FLAG flags
* @prio: 11 bit arbitration priority with zero'ed CAN_*_FLAG flags / VCID
* @flags: additional flags for CAN XL
* @sdt: SDU (service data unit) type
* @len: frame payload length in byte (CANXL_MIN_DLEN .. CANXL_MAX_DLEN)
@ -205,7 +210,7 @@ struct canfd_frame {
* @prio shares the same position as @can_id from struct can[fd]_frame.
*/
struct canxl_frame {
canid_t prio; /* 11 bit priority for arbitration (canid_t) */
canid_t prio; /* 11 bit priority for arbitration / 8 bit VCID */
__u8 flags; /* additional flags for CAN XL */
__u8 sdt; /* SDU (service data unit) type */
__u16 len; /* frame payload length in byte */
@ -285,6 +290,5 @@ struct can_filter {
};
#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
#endif /* !_UAPI_CAN_H */

View File

@ -137,6 +137,7 @@ struct can_isotp_ll_options {
#define CAN_ISOTP_WAIT_TX_DONE 0x0400 /* wait for tx completion */
#define CAN_ISOTP_SF_BROADCAST 0x0800 /* 1-to-N functional addressing */
#define CAN_ISOTP_CF_BROADCAST 0x1000 /* 1-to-N transmission w/o FC */
#define CAN_ISOTP_DYN_FC_PARMS 0x2000 /* dynamic FC parameters BS/STmin */
/* protocol machine default values */

View File

@ -49,6 +49,8 @@
#include <linux/can.h>
#define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW)
#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
enum {
SCM_CAN_RAW_ERRQUEUE = 1,
};
@ -63,6 +65,22 @@ enum {
CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */
CAN_RAW_XL_FRAMES, /* allow CAN XL frames (default:off) */
CAN_RAW_XL_VCID_OPTS, /* CAN XL VCID configuration options */
};
/* configuration for CAN XL virtual CAN identifier (VCID) handling */
struct can_raw_vcid_options {
__u8 flags; /* flags for vcid (filter) behaviour */
__u8 tx_vcid; /* VCID value set into canxl_frame.prio */
__u8 rx_vcid; /* VCID value for VCID filter */
__u8 rx_vcid_mask; /* VCID mask for VCID filter */
};
/* can_raw_vcid_options.flags for CAN XL virtual CAN identifier handling */
#define CAN_RAW_XL_VCID_TX_SET 0x01
#define CAN_RAW_XL_VCID_TX_PASS 0x02
#define CAN_RAW_XL_VCID_RX_FILTER 0x04
#endif /* !_UAPI_CAN_RAW_H */

View File

@ -41,11 +41,12 @@ typedef struct __user_cap_header_struct {
int pid;
} *cap_user_header_t;
typedef struct __user_cap_data_struct {
struct __user_cap_data_struct {
__u32 effective;
__u32 permitted;
__u32 inheritable;
} *cap_user_data_t;
};
typedef struct __user_cap_data_struct *cap_user_data_t;
#define VFS_CAP_REVISION_MASK 0xFF000000

View File

@ -24,8 +24,6 @@
* basis. This data is shared using taskstats.
*
* Most of these states are derived by looking at the task->state value
* For the nr_io_wait state, a flag in the delay accounting structure
* indicates that the task is waiting on IO
*
* Each member is aligned to a 8 byte boundary.
*/

View File

@ -1,64 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _CM4000_H_
#define _CM4000_H_
#include <linux/types.h>
#include <linux/ioctl.h>
#define MAX_ATR 33
#define CM4000_MAX_DEV 4
/* those two structures are passed via ioctl() from/to userspace. They are
* used by existing userspace programs, so I kepth the awkward "bIFSD" naming
* not to break compilation of userspace apps. -HW */
typedef struct atreq {
__s32 atr_len;
unsigned char atr[64];
__s32 power_act;
unsigned char bIFSD;
unsigned char bIFSC;
} atreq_t;
/* what is particularly stupid in the original driver is the arch-dependent
* member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace
* will lay out the structure members differently than the 64bit kernel.
*
* I've changed "ptsreq.protocol" from "unsigned long" to "__u32".
* On 32bit this will make no difference. With 64bit kernels, it will make
* 32bit apps work, too.
*/
typedef struct ptsreq {
__u32 protocol; /*T=0: 2^0, T=1: 2^1*/
unsigned char flags;
unsigned char pts1;
unsigned char pts2;
unsigned char pts3;
} ptsreq_t;
#define CM_IOC_MAGIC 'c'
#define CM_IOC_MAXNR 255
#define CM_IOCGSTATUS _IOR (CM_IOC_MAGIC, 0, unsigned char *)
#define CM_IOCGATR _IOWR(CM_IOC_MAGIC, 1, atreq_t *)
#define CM_IOCSPTS _IOW (CM_IOC_MAGIC, 2, ptsreq_t *)
#define CM_IOCSRDR _IO (CM_IOC_MAGIC, 3)
#define CM_IOCARDOFF _IO (CM_IOC_MAGIC, 4)
#define CM_IOSDBGLVL _IOW(CM_IOC_MAGIC, 250, int*)
/* card and device states */
#define CM_CARD_INSERTED 0x01
#define CM_CARD_POWERED 0x02
#define CM_ATR_PRESENT 0x04
#define CM_ATR_VALID 0x08
#define CM_STATE_VALID 0x0f
/* extra info only from CM4000 */
#define CM_NO_READER 0x10
#define CM_BAD_CARD 0x20
#endif /* _CM4000_H_ */

View File

@ -30,6 +30,48 @@ enum proc_cn_mcast_op {
PROC_CN_MCAST_IGNORE = 2
};
#define PROC_EVENT_ALL (PROC_EVENT_FORK | PROC_EVENT_EXEC | PROC_EVENT_UID | \
PROC_EVENT_GID | PROC_EVENT_SID | PROC_EVENT_PTRACE | \
PROC_EVENT_COMM | PROC_EVENT_NONZERO_EXIT | \
PROC_EVENT_COREDUMP | PROC_EVENT_EXIT)
/*
* If you add an entry in proc_cn_event, make sure you add it in
* PROC_EVENT_ALL above as well.
*/
enum proc_cn_event {
/* Use successive bits so the enums can be used to record
* sets of events as well
*/
PROC_EVENT_NONE = 0x00000000,
PROC_EVENT_FORK = 0x00000001,
PROC_EVENT_EXEC = 0x00000002,
PROC_EVENT_UID = 0x00000004,
PROC_EVENT_GID = 0x00000040,
PROC_EVENT_SID = 0x00000080,
PROC_EVENT_PTRACE = 0x00000100,
PROC_EVENT_COMM = 0x00000200,
/* "next" should be 0x00000400 */
/* "last" is the last process event: exit,
* while "next to last" is coredumping event
* before that is report only if process dies
* with non-zero exit status
*/
PROC_EVENT_NONZERO_EXIT = 0x20000000,
PROC_EVENT_COREDUMP = 0x40000000,
PROC_EVENT_EXIT = 0x80000000
};
struct proc_input {
enum proc_cn_mcast_op mcast_op;
enum proc_cn_event event_type;
};
static __inline__ enum proc_cn_event valid_event(enum proc_cn_event ev_type)
{
return (enum proc_cn_event)(ev_type & PROC_EVENT_ALL);
}
/*
* From the user's point of view, the process
* ID is the thread group ID and thread ID is the internal
@ -44,24 +86,7 @@ enum proc_cn_mcast_op {
*/
struct proc_event {
enum what {
/* Use successive bits so the enums can be used to record
* sets of events as well
*/
PROC_EVENT_NONE = 0x00000000,
PROC_EVENT_FORK = 0x00000001,
PROC_EVENT_EXEC = 0x00000002,
PROC_EVENT_UID = 0x00000004,
PROC_EVENT_GID = 0x00000040,
PROC_EVENT_SID = 0x00000080,
PROC_EVENT_PTRACE = 0x00000100,
PROC_EVENT_COMM = 0x00000200,
/* "next" should be 0x00000400 */
/* "last" is the last process event: exit,
* while "next to last" is coredumping event */
PROC_EVENT_COREDUMP = 0x40000000,
PROC_EVENT_EXIT = 0x80000000
} what;
enum proc_cn_event what;
__u32 cpu;
__u64 __attribute__((aligned(8))) timestamp_ns;
/* Number of nano seconds since system boot */

View File

@ -38,7 +38,7 @@ enum counter_scope {
*
* For example, if the Count 2 ceiling extension of Counter device 4 is desired,
* set type equal to COUNTER_COMPONENT_EXTENSION, scope equal to
* COUNTER_COUNT_SCOPE, parent equal to 2, and id equal to the value provided by
* COUNTER_SCOPE_COUNT, parent equal to 2, and id equal to the value provided by
* the respective /sys/bus/counter/devices/counter4/count2/ceiling_component_id
* sysfs attribute.
*/
@ -127,6 +127,12 @@ enum counter_count_mode {
COUNTER_COUNT_MODE_RANGE_LIMIT,
COUNTER_COUNT_MODE_NON_RECYCLE,
COUNTER_COUNT_MODE_MODULO_N,
COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT,
COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT,
COUNTER_COUNT_MODE_RATE_GENERATOR,
COUNTER_COUNT_MODE_SQUARE_WAVE_MODE,
COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE,
COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE,
};
/* Count function values */

View File

@ -32,7 +32,7 @@ enum {
CRYPTO_MSG_UPDATEALG,
CRYPTO_MSG_GETALG,
CRYPTO_MSG_DELRNG,
CRYPTO_MSG_GETSTAT,
CRYPTO_MSG_GETSTAT, /* No longer supported, do not use. */
__CRYPTO_MSG_MAX
};
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
@ -54,16 +54,16 @@ enum crypto_attr_type_t {
CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
CRYPTOCFGA_STAT_LARVAL, /* struct crypto_stat */
CRYPTOCFGA_STAT_HASH, /* struct crypto_stat */
CRYPTOCFGA_STAT_BLKCIPHER, /* struct crypto_stat */
CRYPTOCFGA_STAT_AEAD, /* struct crypto_stat */
CRYPTOCFGA_STAT_COMPRESS, /* struct crypto_stat */
CRYPTOCFGA_STAT_RNG, /* struct crypto_stat */
CRYPTOCFGA_STAT_CIPHER, /* struct crypto_stat */
CRYPTOCFGA_STAT_AKCIPHER, /* struct crypto_stat */
CRYPTOCFGA_STAT_KPP, /* struct crypto_stat */
CRYPTOCFGA_STAT_ACOMP, /* struct crypto_stat */
CRYPTOCFGA_STAT_LARVAL, /* No longer supported, do not use. */
CRYPTOCFGA_STAT_HASH, /* No longer supported, do not use. */
CRYPTOCFGA_STAT_BLKCIPHER, /* No longer supported, do not use. */
CRYPTOCFGA_STAT_AEAD, /* No longer supported, do not use. */
CRYPTOCFGA_STAT_COMPRESS, /* No longer supported, do not use. */
CRYPTOCFGA_STAT_RNG, /* No longer supported, do not use. */
CRYPTOCFGA_STAT_CIPHER, /* No longer supported, do not use. */
CRYPTOCFGA_STAT_AKCIPHER, /* No longer supported, do not use. */
CRYPTOCFGA_STAT_KPP, /* No longer supported, do not use. */
CRYPTOCFGA_STAT_ACOMP, /* No longer supported, do not use. */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@ -79,6 +79,7 @@ struct crypto_user_alg {
__u32 cru_flags;
};
/* No longer supported, do not use. */
struct crypto_stat_aead {
char type[CRYPTO_MAX_NAME];
__u64 stat_encrypt_cnt;
@ -88,6 +89,7 @@ struct crypto_stat_aead {
__u64 stat_err_cnt;
};
/* No longer supported, do not use. */
struct crypto_stat_akcipher {
char type[CRYPTO_MAX_NAME];
__u64 stat_encrypt_cnt;
@ -99,6 +101,7 @@ struct crypto_stat_akcipher {
__u64 stat_err_cnt;
};
/* No longer supported, do not use. */
struct crypto_stat_cipher {
char type[CRYPTO_MAX_NAME];
__u64 stat_encrypt_cnt;
@ -108,6 +111,7 @@ struct crypto_stat_cipher {
__u64 stat_err_cnt;
};
/* No longer supported, do not use. */
struct crypto_stat_compress {
char type[CRYPTO_MAX_NAME];
__u64 stat_compress_cnt;
@ -117,6 +121,7 @@ struct crypto_stat_compress {
__u64 stat_err_cnt;
};
/* No longer supported, do not use. */
struct crypto_stat_hash {
char type[CRYPTO_MAX_NAME];
__u64 stat_hash_cnt;
@ -124,6 +129,7 @@ struct crypto_stat_hash {
__u64 stat_err_cnt;
};
/* No longer supported, do not use. */
struct crypto_stat_kpp {
char type[CRYPTO_MAX_NAME];
__u64 stat_setsecret_cnt;
@ -132,6 +138,7 @@ struct crypto_stat_kpp {
__u64 stat_err_cnt;
};
/* No longer supported, do not use. */
struct crypto_stat_rng {
char type[CRYPTO_MAX_NAME];
__u64 stat_generate_cnt;
@ -140,6 +147,7 @@ struct crypto_stat_rng {
__u64 stat_err_cnt;
};
/* No longer supported, do not use. */
struct crypto_stat_larval {
char type[CRYPTO_MAX_NAME];
};

View File

@ -40,19 +40,26 @@
___C(SET_ALERT_CONFIG, "Set Alert Configuration"), \
___C(GET_SHUTDOWN_STATE, "Get Shutdown State"), \
___C(SET_SHUTDOWN_STATE, "Set Shutdown State"), \
___C(GET_POISON, "Get Poison List"), \
___C(INJECT_POISON, "Inject Poison"), \
___C(CLEAR_POISON, "Clear Poison"), \
___DEPRECATED(GET_POISON, "Get Poison List"), \
___DEPRECATED(INJECT_POISON, "Inject Poison"), \
___DEPRECATED(CLEAR_POISON, "Clear Poison"), \
___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \
___C(SCAN_MEDIA, "Scan Media"), \
___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \
___DEPRECATED(SCAN_MEDIA, "Scan Media"), \
___DEPRECATED(GET_SCAN_MEDIA, "Get Scan Media Results"), \
___C(GET_TIMESTAMP, "Get Timestamp"), \
___C(GET_LOG_CAPS, "Get Log Capabilities"), \
___C(CLEAR_LOG, "Clear Log"), \
___C(GET_SUP_LOG_SUBLIST, "Get Supported Logs Sub-List"), \
___C(MAX, "invalid / last command")
#define ___C(a, b) CXL_MEM_COMMAND_ID_##a
#define ___DEPRECATED(a, b) CXL_MEM_DEPRECATED_ID_##a
enum { CXL_CMDS };
#undef ___C
#undef ___DEPRECATED
#define ___C(a, b) { b }
#define ___DEPRECATED(a, b) { "Deprecated " b }
static const struct {
const char *name;
} cxl_command_names[] __attribute__((__unused__)) = { CXL_CMDS };
@ -68,6 +75,28 @@ static const struct {
*/
#undef ___C
#undef ___DEPRECATED
#define ___C(a, b) (0)
#define ___DEPRECATED(a, b) (1)
static const __u8 cxl_deprecated_commands[]
__attribute__((__unused__)) = { CXL_CMDS };
/*
* Here's how this actually breaks out:
* cxl_deprecated_commands[] = {
* [CXL_MEM_COMMAND_ID_INVALID] = 0,
* [CXL_MEM_COMMAND_ID_IDENTIFY] = 0,
* ...
* [CXL_MEM_DEPRECATED_ID_GET_POISON] = 1,
* [CXL_MEM_DEPRECATED_ID_INJECT_POISON] = 1,
* [CXL_MEM_DEPRECATED_ID_CLEAR_POISON] = 1,
* ...
* };
*/
#undef ___C
#undef ___DEPRECATED
/**
* struct cxl_command_info - Command information returned from a query.

View File

@ -139,6 +139,8 @@ enum devlink_command {
DEVLINK_CMD_SELFTESTS_GET, /* can dump */
DEVLINK_CMD_SELFTESTS_RUN,
DEVLINK_CMD_NOTIFY_FILTER_SET,
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@ -265,7 +267,7 @@ enum {
* Documentation/networking/devlink/devlink-flash.rst
*
*/
enum {
enum devlink_flash_overwrite {
DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT,
DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT,
@ -612,7 +614,10 @@ enum devlink_attr {
DEVLINK_ATTR_REGION_DIRECT, /* flag */
/* add new attributes above here, update the policy in devlink.c */
/* Add new attributes above here, update the spec in
* Documentation/netlink/specs/devlink.yaml and re-generate
* net/devlink/netlink_gen.c.
*/
__DEVLINK_ATTR_MAX,
DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1
@ -661,6 +666,8 @@ enum devlink_resource_unit {
enum devlink_port_fn_attr_cap {
DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT,
DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT,
DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT,
DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT,
/* Add new caps above */
__DEVLINK_PORT_FN_ATTR_CAPS_MAX,
@ -669,6 +676,8 @@ enum devlink_port_fn_attr_cap {
#define DEVLINK_PORT_FN_CAP_ROCE _BITUL(DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT)
#define DEVLINK_PORT_FN_CAP_MIGRATABLE \
_BITUL(DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT)
#define DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO _BITUL(DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT)
#define DEVLINK_PORT_FN_CAP_IPSEC_PACKET _BITUL(DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT)
enum devlink_port_function_attr {
DEVLINK_PORT_FUNCTION_ATTR_UNSPEC,
@ -676,6 +685,8 @@ enum devlink_port_function_attr {
DEVLINK_PORT_FN_ATTR_STATE, /* u8 */
DEVLINK_PORT_FN_ATTR_OPSTATE, /* u8 */
DEVLINK_PORT_FN_ATTR_CAPS, /* bitfield32 */
DEVLINK_PORT_FN_ATTR_DEVLINK, /* nested */
DEVLINK_PORT_FN_ATTR_MAX_IO_EQS, /* u32 */
__DEVLINK_PORT_FUNCTION_ATTR_MAX,
DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1

View File

@ -68,6 +68,7 @@ struct dlm_lksb {
/* dlm_new_lockspace() flags */
/* DLM_LSFL_TIMEWARN is deprecated and reserved. DO NOT USE! */
#define DLM_LSFL_TIMEWARN 0x00000002
#define DLM_LSFL_NEWEXCL 0x00000008

View File

@ -1,60 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2007 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*/
#ifndef _DLM_NETLINK_H
#define _DLM_NETLINK_H
#include <linux/types.h>
#include <linux/dlmconstants.h>
enum {
DLM_STATUS_WAITING = 1,
DLM_STATUS_GRANTED = 2,
DLM_STATUS_CONVERT = 3,
};
#define DLM_LOCK_DATA_VERSION 1
struct dlm_lock_data {
__u16 version;
__u32 lockspace_id;
int nodeid;
int ownpid;
__u32 id;
__u32 remid;
__u64 xid;
__s8 status;
__s8 grmode;
__s8 rqmode;
unsigned long timestamp;
int resource_namelen;
char resource_name[DLM_RESNAME_MAXLEN];
};
enum {
DLM_CMD_UNSPEC = 0,
DLM_CMD_HELLO, /* user->kernel */
DLM_CMD_TIMEOUT, /* kernel->user */
__DLM_CMD_MAX,
};
#define DLM_CMD_MAX (__DLM_CMD_MAX - 1)
enum {
DLM_TYPE_UNSPEC = 0,
DLM_TYPE_LOCK,
__DLM_TYPE_MAX,
};
#define DLM_TYPE_MAX (__DLM_TYPE_MAX - 1)
#define DLM_GENL_VERSION 0x1
#define DLM_GENL_NAME "DLM"
#endif /* _DLM_NETLINK_H */

View File

@ -22,6 +22,7 @@ enum {
DLM_PLOCK_OP_LOCK = 1,
DLM_PLOCK_OP_UNLOCK,
DLM_PLOCK_OP_GET,
DLM_PLOCK_OP_CANCEL,
};
#define DLM_PLOCK_FL_CLOSE 1

View File

@ -87,7 +87,6 @@
* DLM_LKF_NODLCKWT
*
* Do not cancel the lock if it gets into conversion deadlock.
* Exclude this lock from being monitored due to DLM_LSFL_TIMEWARN.
*
* DLM_LKF_NODLCKBLK
*
@ -132,6 +131,10 @@
* Unlock the lock even if it is converting or waiting or has sublocks.
* Only really for use by the userland device.c code.
*
* DLM_LKF_TIMEOUT
*
* This value is deprecated and reserved. DO NOT USE!
*
*/
#define DLM_LKF_NOQUEUE 0x00000001

View File

@ -286,9 +286,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 47
#define DM_VERSION_MINOR 48
#define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2022-07-28)"
#define DM_VERSION_EXTRA "-ioctl (2023-03-01)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */

View File

@ -0,0 +1,238 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/dpll.yaml */
/* YNL-GEN uapi header */
#ifndef _LINUX_DPLL_H
#define _LINUX_DPLL_H
#define DPLL_FAMILY_NAME "dpll"
#define DPLL_FAMILY_VERSION 1
/**
* enum dpll_mode - working modes a dpll can support, differentiates if and how
* dpll selects one of its inputs to syntonize with it, valid values for
* DPLL_A_MODE attribute
* @DPLL_MODE_MANUAL: input can be only selected by sending a request to dpll
* @DPLL_MODE_AUTOMATIC: highest prio input pin auto selected by dpll
*/
enum dpll_mode {
DPLL_MODE_MANUAL = 1,
DPLL_MODE_AUTOMATIC,
/* private: */
__DPLL_MODE_MAX,
DPLL_MODE_MAX = (__DPLL_MODE_MAX - 1)
};
/**
* enum dpll_lock_status - provides information of dpll device lock status,
* valid values for DPLL_A_LOCK_STATUS attribute
* @DPLL_LOCK_STATUS_UNLOCKED: dpll was not yet locked to any valid input (or
* forced by setting DPLL_A_MODE to DPLL_MODE_DETACHED)
* @DPLL_LOCK_STATUS_LOCKED: dpll is locked to a valid signal, but no holdover
* available
* @DPLL_LOCK_STATUS_LOCKED_HO_ACQ: dpll is locked and holdover acquired
* @DPLL_LOCK_STATUS_HOLDOVER: dpll is in holdover state - lost a valid lock or
* was forced by disconnecting all the pins (latter possible only when dpll
* lock-state was already DPLL_LOCK_STATUS_LOCKED_HO_ACQ, if dpll lock-state
* was not DPLL_LOCK_STATUS_LOCKED_HO_ACQ, the dpll's lock-state shall remain
* DPLL_LOCK_STATUS_UNLOCKED)
*/
enum dpll_lock_status {
DPLL_LOCK_STATUS_UNLOCKED = 1,
DPLL_LOCK_STATUS_LOCKED,
DPLL_LOCK_STATUS_LOCKED_HO_ACQ,
DPLL_LOCK_STATUS_HOLDOVER,
/* private: */
__DPLL_LOCK_STATUS_MAX,
DPLL_LOCK_STATUS_MAX = (__DPLL_LOCK_STATUS_MAX - 1)
};
/**
* enum dpll_lock_status_error - if previous status change was done due to a
* failure, this provides information of dpll device lock status error. Valid
* values for DPLL_A_LOCK_STATUS_ERROR attribute
* @DPLL_LOCK_STATUS_ERROR_NONE: dpll device lock status was changed without
* any error
* @DPLL_LOCK_STATUS_ERROR_UNDEFINED: dpll device lock status was changed due
* to undefined error. Driver fills this value up in case it is not able to
* obtain suitable exact error type.
* @DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN: dpll device lock status was changed
* because of associated media got down. This may happen for example if dpll
* device was previously locked on an input pin of type
* PIN_TYPE_SYNCE_ETH_PORT.
* @DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH: the FFO
* (Fractional Frequency Offset) between the RX and TX symbol rate on the
* media got too high. This may happen for example if dpll device was
* previously locked on an input pin of type PIN_TYPE_SYNCE_ETH_PORT.
*/
enum dpll_lock_status_error {
DPLL_LOCK_STATUS_ERROR_NONE = 1,
DPLL_LOCK_STATUS_ERROR_UNDEFINED,
DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN,
DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH,
/* private: */
__DPLL_LOCK_STATUS_ERROR_MAX,
DPLL_LOCK_STATUS_ERROR_MAX = (__DPLL_LOCK_STATUS_ERROR_MAX - 1)
};
#define DPLL_TEMP_DIVIDER 1000
/**
* enum dpll_type - type of dpll, valid values for DPLL_A_TYPE attribute
* @DPLL_TYPE_PPS: dpll produces Pulse-Per-Second signal
* @DPLL_TYPE_EEC: dpll drives the Ethernet Equipment Clock
*/
enum dpll_type {
DPLL_TYPE_PPS = 1,
DPLL_TYPE_EEC,
/* private: */
__DPLL_TYPE_MAX,
DPLL_TYPE_MAX = (__DPLL_TYPE_MAX - 1)
};
/**
* enum dpll_pin_type - defines possible types of a pin, valid values for
* DPLL_A_PIN_TYPE attribute
* @DPLL_PIN_TYPE_MUX: aggregates another layer of selectable pins
* @DPLL_PIN_TYPE_EXT: external input
* @DPLL_PIN_TYPE_SYNCE_ETH_PORT: ethernet port PHY's recovered clock
* @DPLL_PIN_TYPE_INT_OSCILLATOR: device internal oscillator
* @DPLL_PIN_TYPE_GNSS: GNSS recovered clock
*/
enum dpll_pin_type {
DPLL_PIN_TYPE_MUX = 1,
DPLL_PIN_TYPE_EXT,
DPLL_PIN_TYPE_SYNCE_ETH_PORT,
DPLL_PIN_TYPE_INT_OSCILLATOR,
DPLL_PIN_TYPE_GNSS,
/* private: */
__DPLL_PIN_TYPE_MAX,
DPLL_PIN_TYPE_MAX = (__DPLL_PIN_TYPE_MAX - 1)
};
/**
* enum dpll_pin_direction - defines possible direction of a pin, valid values
* for DPLL_A_PIN_DIRECTION attribute
* @DPLL_PIN_DIRECTION_INPUT: pin used as a input of a signal
* @DPLL_PIN_DIRECTION_OUTPUT: pin used to output the signal
*/
enum dpll_pin_direction {
DPLL_PIN_DIRECTION_INPUT = 1,
DPLL_PIN_DIRECTION_OUTPUT,
/* private: */
__DPLL_PIN_DIRECTION_MAX,
DPLL_PIN_DIRECTION_MAX = (__DPLL_PIN_DIRECTION_MAX - 1)
};
#define DPLL_PIN_FREQUENCY_1_HZ 1
#define DPLL_PIN_FREQUENCY_10_KHZ 10000
#define DPLL_PIN_FREQUENCY_77_5_KHZ 77500
#define DPLL_PIN_FREQUENCY_10_MHZ 10000000
/**
* enum dpll_pin_state - defines possible states of a pin, valid values for
* DPLL_A_PIN_STATE attribute
* @DPLL_PIN_STATE_CONNECTED: pin connected, active input of phase locked loop
* @DPLL_PIN_STATE_DISCONNECTED: pin disconnected, not considered as a valid
* input
* @DPLL_PIN_STATE_SELECTABLE: pin enabled for automatic input selection
*/
enum dpll_pin_state {
DPLL_PIN_STATE_CONNECTED = 1,
DPLL_PIN_STATE_DISCONNECTED,
DPLL_PIN_STATE_SELECTABLE,
/* private: */
__DPLL_PIN_STATE_MAX,
DPLL_PIN_STATE_MAX = (__DPLL_PIN_STATE_MAX - 1)
};
/**
* enum dpll_pin_capabilities - defines possible capabilities of a pin, valid
* flags on DPLL_A_PIN_CAPABILITIES attribute
* @DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE: pin direction can be changed
* @DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE: pin priority can be changed
* @DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE: pin state can be changed
*/
enum dpll_pin_capabilities {
DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE = 1,
DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE = 2,
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE = 4,
};
#define DPLL_PHASE_OFFSET_DIVIDER 1000
enum dpll_a {
DPLL_A_ID = 1,
DPLL_A_MODULE_NAME,
DPLL_A_PAD,
DPLL_A_CLOCK_ID,
DPLL_A_MODE,
DPLL_A_MODE_SUPPORTED,
DPLL_A_LOCK_STATUS,
DPLL_A_TEMP,
DPLL_A_TYPE,
DPLL_A_LOCK_STATUS_ERROR,
__DPLL_A_MAX,
DPLL_A_MAX = (__DPLL_A_MAX - 1)
};
enum dpll_a_pin {
DPLL_A_PIN_ID = 1,
DPLL_A_PIN_PARENT_ID,
DPLL_A_PIN_MODULE_NAME,
DPLL_A_PIN_PAD,
DPLL_A_PIN_CLOCK_ID,
DPLL_A_PIN_BOARD_LABEL,
DPLL_A_PIN_PANEL_LABEL,
DPLL_A_PIN_PACKAGE_LABEL,
DPLL_A_PIN_TYPE,
DPLL_A_PIN_DIRECTION,
DPLL_A_PIN_FREQUENCY,
DPLL_A_PIN_FREQUENCY_SUPPORTED,
DPLL_A_PIN_FREQUENCY_MIN,
DPLL_A_PIN_FREQUENCY_MAX,
DPLL_A_PIN_PRIO,
DPLL_A_PIN_STATE,
DPLL_A_PIN_CAPABILITIES,
DPLL_A_PIN_PARENT_DEVICE,
DPLL_A_PIN_PARENT_PIN,
DPLL_A_PIN_PHASE_ADJUST_MIN,
DPLL_A_PIN_PHASE_ADJUST_MAX,
DPLL_A_PIN_PHASE_ADJUST,
DPLL_A_PIN_PHASE_OFFSET,
DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET,
__DPLL_A_PIN_MAX,
DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1)
};
enum dpll_cmd {
DPLL_CMD_DEVICE_ID_GET = 1,
DPLL_CMD_DEVICE_GET,
DPLL_CMD_DEVICE_SET,
DPLL_CMD_DEVICE_CREATE_NTF,
DPLL_CMD_DEVICE_DELETE_NTF,
DPLL_CMD_DEVICE_CHANGE_NTF,
DPLL_CMD_PIN_ID_GET,
DPLL_CMD_PIN_GET,
DPLL_CMD_PIN_SET,
DPLL_CMD_PIN_CREATE_NTF,
DPLL_CMD_PIN_DELETE_NTF,
DPLL_CMD_PIN_CHANGE_NTF,
__DPLL_CMD_MAX,
DPLL_CMD_MAX = (__DPLL_CMD_MAX - 1)
};
#define DPLL_MCGRP_MONITOR "monitor"
#endif /* _LINUX_DPLL_H */

View File

@ -296,6 +296,10 @@ enum fe_spectral_inversion {
* @FEC_28_45: Forward Error Correction Code 28/45
* @FEC_32_45: Forward Error Correction Code 32/45
* @FEC_77_90: Forward Error Correction Code 77/90
* @FEC_11_45: Forward Error Correction Code 11/45
* @FEC_4_15: Forward Error Correction Code 4/15
* @FEC_14_45: Forward Error Correction Code 14/45
* @FEC_7_15: Forward Error Correction Code 7/15
*
* Please note that not all FEC types are supported by a given standard.
*/
@ -329,6 +333,10 @@ enum fe_code_rate {
FEC_28_45,
FEC_32_45,
FEC_77_90,
FEC_11_45,
FEC_4_15,
FEC_14_45,
FEC_7_15,
};
/**
@ -846,7 +854,7 @@ struct dtv_stats {
union {
__u64 uvalue; /* for counters and relative scales */
__s64 svalue; /* for 0.001 dB measures */
};
} __attribute__ ((packed));
} __attribute__ ((packed));

View File

@ -10,6 +10,6 @@
#define _DVBVERSION_H_
#define DVB_API_VERSION 5
#define DVB_API_VERSION_MINOR 11
#define DVB_API_VERSION_MINOR 12
#endif /*_DVBVERSION_H_*/

View File

@ -32,4 +32,19 @@ struct elf32_fdpic_loadmap {
#define ELF32_FDPIC_LOADMAP_VERSION 0x0000
/* segment mappings for ELF FDPIC libraries/executables/interpreters */
struct elf64_fdpic_loadseg {
Elf64_Addr addr; /* core address to which mapped */
Elf64_Addr p_vaddr; /* VMA recorded in file */
Elf64_Word p_memsz; /* allocation size recorded in file */
};
struct elf64_fdpic_loadmap {
Elf64_Half version; /* version of these structures, just in case... */
Elf64_Half nsegs; /* number of segments */
struct elf64_fdpic_loadseg segs[];
};
#define ELF64_FDPIC_LOADMAP_VERSION 0x0000
#endif /* _LINUX_ELF_FDPIC_H */

View File

@ -140,7 +140,7 @@ typedef __s64 Elf64_Sxword;
#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
typedef struct dynamic {
typedef struct {
Elf32_Sword d_tag;
union {
Elf32_Sword d_val;
@ -372,7 +372,8 @@ typedef struct elf64_shdr {
* Notes used in ET_CORE. Architectures export some of the arch register sets
* using the corresponding note types via the PTRACE_GETREGSET and
* PTRACE_SETREGSET requests.
* The note name for all these is "LINUX".
* The note name for these types is "LINUX", except NT_PRFPREG that is named
* "CORE".
*/
#define NT_PRSTATUS 1
#define NT_PRFPREG 2
@ -403,9 +404,13 @@ typedef struct elf64_shdr {
#define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */
#define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */
#define NT_PPC_PKEY 0x110 /* Memory Protection Keys registers */
#define NT_PPC_DEXCR 0x111 /* PowerPC DEXCR registers */
#define NT_PPC_HASHKEYR 0x112 /* PowerPC HASHKEYR register */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
/* Old binutils treats 0x203 as a CET state */
#define NT_X86_SHSTK 0x204 /* x86 SHSTK state */
#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
#define NT_S390_TIMER 0x301 /* s390 timer register */
#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
@ -435,11 +440,14 @@ typedef struct elf64_shdr {
#define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers */
#define NT_ARM_ZA 0x40c /* ARM SME ZA registers */
#define NT_ARM_ZT 0x40d /* ARM SME ZT registers */
#define NT_ARM_FPMR 0x40e /* ARM floating point mode register */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */
#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */
#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */

View File

@ -750,6 +750,61 @@ enum ethtool_module_power_mode {
ETHTOOL_MODULE_POWER_MODE_HIGH,
};
/**
* enum ethtool_pse_types - Types of PSE controller.
* @ETHTOOL_PSE_UNKNOWN: Type of PSE controller is unknown
* @ETHTOOL_PSE_PODL: PSE controller which support PoDL
* @ETHTOOL_PSE_C33: PSE controller which support Clause 33 (PoE)
*/
enum ethtool_pse_types {
ETHTOOL_PSE_UNKNOWN = 1 << 0,
ETHTOOL_PSE_PODL = 1 << 1,
ETHTOOL_PSE_C33 = 1 << 2,
};
/**
* enum ethtool_c33_pse_admin_state - operational state of the PoDL PSE
* functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState
* @ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN: state of PSE functions is unknown
* @ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED: PSE functions are disabled
* @ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED: PSE functions are enabled
*/
enum ethtool_c33_pse_admin_state {
ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN = 1,
ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED,
ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED,
};
/**
* enum ethtool_c33_pse_pw_d_status - power detection status of the PSE.
* IEEE 802.3-2022 30.9.1.1.3 aPoDLPSEPowerDetectionStatus:
* @ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN: PSE status is unknown
* @ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED: The enumeration "disabled"
* indicates that the PSE State diagram is in the state DISABLED.
* @ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING: The enumeration "searching"
* indicates the PSE State diagram is in a state other than those
* listed.
* @ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING: The enumeration
* "deliveringPower" indicates that the PSE State diagram is in the
* state POWER_ON.
* @ETHTOOL_C33_PSE_PW_D_STATUS_TEST: The enumeration "test" indicates that
* the PSE State diagram is in the state TEST_MODE.
* @ETHTOOL_C33_PSE_PW_D_STATUS_FAULT: The enumeration "fault" indicates that
* the PSE State diagram is in the state TEST_ERROR.
* @ETHTOOL_C33_PSE_PW_D_STATUS_OTHERFAULT: The enumeration "otherFault"
* indicates that the PSE State diagram is in the state IDLE due to
* the variable error_condition = true.
*/
enum ethtool_c33_pse_pw_d_status {
ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN = 1,
ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED,
ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING,
ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING,
ETHTOOL_C33_PSE_PW_D_STATUS_TEST,
ETHTOOL_C33_PSE_PW_D_STATUS_FAULT,
ETHTOOL_C33_PSE_PW_D_STATUS_OTHERFAULT,
};
/**
* enum ethtool_podl_pse_admin_state - operational state of the PoDL PSE
* functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
@ -1264,6 +1319,8 @@ struct ethtool_rxfh_indir {
* hardware hash key.
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
* @input_xfrm: Defines how the input data is transformed. Valid values are one
* of %RXH_XFRM_*.
* @rsvd8: Reserved for future use; see the note on reserved space.
* @rsvd32: Reserved for future use; see the note on reserved space.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
@ -1283,7 +1340,8 @@ struct ethtool_rxfh {
__u32 indir_size;
__u32 key_size;
__u8 hfunc;
__u8 rsvd8[3];
__u8 input_xfrm;
__u8 rsvd8[2];
__u32 rsvd32;
__u32 rss_config[];
};
@ -1990,6 +2048,15 @@ static __inline__ int ethtool_validate_duplex(__u8 duplex)
#define WOL_MODE_COUNT 8
/* RSS hash function data
* XOR the corresponding source and destination fields of each specified
* protocol. Both copies of the XOR'ed fields are fed into the RSS and RXHASH
* calculation. Note that this XORing reduces the input set entropy and could
* be exploited to reduce the RSS queue spread.
*/
#define RXH_XFRM_SYM_XOR (1 << 0)
#define RXH_XFRM_NO_CHANGE 0xff
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
@ -2009,6 +2076,53 @@ static __inline__ int ethtool_validate_duplex(__u8 duplex)
#define IPV4_FLOW 0x10 /* hash only */
#define IPV6_FLOW 0x11 /* hash only */
#define ETHER_FLOW 0x12 /* spec only (ether_spec) */
/* Used for GTP-U IPv4 and IPv6.
* The format of GTP packets only includes
* elements such as TEID and GTP version.
* It is primarily intended for data communication of the UE.
*/
#define GTPU_V4_FLOW 0x13 /* hash only */
#define GTPU_V6_FLOW 0x14 /* hash only */
/* Use for GTP-C IPv4 and v6.
* The format of these GTP packets does not include TEID.
* Primarily expected to be used for communication
* to create sessions for UE data communication,
* commonly referred to as CSR (Create Session Request).
*/
#define GTPC_V4_FLOW 0x15 /* hash only */
#define GTPC_V6_FLOW 0x16 /* hash only */
/* Use for GTP-C IPv4 and v6.
* Unlike GTPC_V4_FLOW, the format of these GTP packets includes TEID.
* After session creation, it becomes this packet.
* This is mainly used for requests to realize UE handover.
*/
#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
#define GTPC_TEID_V6_FLOW 0x18 /* hash only */
/* Use for GTP-U and extended headers for the PSC (PDU Session Container).
* The format of these GTP packets includes TEID and QFI.
* In 5G communication using UPF (User Plane Function),
* data communication with this extended header is performed.
*/
#define GTPU_EH_V4_FLOW 0x19 /* hash only */
#define GTPU_EH_V6_FLOW 0x1a /* hash only */
/* Use for GTP-U IPv4 and v6 PSC (PDU Session Container) extended headers.
* This differs from GTPU_EH_V(4|6)_FLOW in that it is distinguished by
* UL/DL included in the PSC.
* There are differences in the data included based on Downlink/Uplink,
* and can be used to distinguish packets.
* The functions described so far are useful when you want to
* handle communication from the mobile network in UPF, PGW, etc.
*/
#define GTPU_UL_V4_FLOW 0x1b /* hash only */
#define GTPU_UL_V6_FLOW 0x1c /* hash only */
#define GTPU_DL_V4_FLOW 0x1d /* hash only */
#define GTPU_DL_V6_FLOW 0x1e /* hash only */
/* Flag to enable additional fields in struct ethtool_rx_flow_spec */
#define FLOW_EXT 0x80000000
#define FLOW_MAC_EXT 0x40000000
@ -2023,6 +2137,7 @@ static __inline__ int ethtool_validate_duplex(__u8 duplex)
#define RXH_IP_DST (1 << 5)
#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */
#define RXH_DISCARD (1 << 31)
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
@ -2126,18 +2241,6 @@ enum ethtool_reset_flags {
* refused. For drivers: ignore this field (use kernel's
* __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will
* be overwritten by kernel.
* @supported: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, physical
* connectors and other link features for which the interface
* supports autonegotiation or auto-detection. Read-only.
* @advertising: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, physical
* connectors and other link features that are advertised through
* autonegotiation or enabled for auto-detection.
* @lp_advertising: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, and other
* link features that the link partner advertised through
* autonegotiation; 0 if unknown or not applicable. Read-only.
* @transceiver: Used to distinguish different possible PHY types,
* reported consistently by PHYLIB. Read-only.
* @master_slave_cfg: Master/slave port mode.
@ -2179,6 +2282,21 @@ enum ethtool_reset_flags {
* %set_link_ksettings() should validate all fields other than @cmd
* and @link_mode_masks_nwords that are not described as read-only or
* deprecated, and must ignore all fields described as read-only.
*
* @link_mode_masks is divided into three bitfields, each of length
* @link_mode_masks_nwords:
* - supported: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, physical
* connectors and other link features for which the interface
* supports autonegotiation or auto-detection. Read-only.
* - advertising: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, physical
* connectors and other link features that are advertised through
* autonegotiation or enabled for auto-detection.
* - lp_advertising: Bitmap with each bit meaning given by
* %ethtool_link_mode_bit_indices for the link modes, and other
* link features that the link partner advertised through
* autonegotiation; 0 if unknown or not applicable. Read-only.
*/
struct ethtool_link_settings {
__u32 cmd;

View File

@ -117,12 +117,11 @@ enum {
/* request header */
/* use compact bitsets in reply */
#define ETHTOOL_FLAG_COMPACT_BITSETS (1 << 0)
/* provide optional reply for SET or ACT requests */
#define ETHTOOL_FLAG_OMIT_REPLY (1 << 1)
/* request statistics, if supported by the driver */
#define ETHTOOL_FLAG_STATS (1 << 2)
enum ethtool_header_flags {
ETHTOOL_FLAG_COMPACT_BITSETS = 1 << 0, /* use compact bitsets in reply */
ETHTOOL_FLAG_OMIT_REPLY = 1 << 1, /* provide optional reply for SET or ACT requests */
ETHTOOL_FLAG_STATS = 1 << 2, /* request statistics, if supported by the driver */
};
#define ETHTOOL_FLAG_ALL (ETHTOOL_FLAG_COMPACT_BITSETS | \
ETHTOOL_FLAG_OMIT_REPLY | \
@ -357,6 +356,8 @@ enum {
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
ETHTOOL_A_RINGS_RX_PUSH, /* u8 */
ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, /* u32 */
ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, /* u32 */
/* add new constants above here */
__ETHTOOL_A_RINGS_CNT,
@ -476,12 +477,26 @@ enum {
ETHTOOL_A_TSINFO_TX_TYPES, /* bitset */
ETHTOOL_A_TSINFO_RX_FILTERS, /* bitset */
ETHTOOL_A_TSINFO_PHC_INDEX, /* u32 */
ETHTOOL_A_TSINFO_STATS, /* nest - _A_TSINFO_STAT */
/* add new constants above here */
__ETHTOOL_A_TSINFO_CNT,
ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1)
};
enum {
ETHTOOL_A_TS_STAT_UNSPEC,
ETHTOOL_A_TS_STAT_TX_PKTS, /* uint */
ETHTOOL_A_TS_STAT_TX_LOST, /* uint */
ETHTOOL_A_TS_STAT_TX_ERR, /* uint */
/* add new constants above here */
__ETHTOOL_A_TS_STAT_CNT,
ETHTOOL_A_TS_STAT_MAX = (__ETHTOOL_A_TS_STAT_CNT - 1)
};
/* PHC VCLOCKS */
enum {
@ -513,6 +528,10 @@ enum {
ETHTOOL_A_CABLE_RESULT_CODE_OPEN,
ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT,
ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT,
/* detected reflection caused by the impedance discontinuity between
* a regular 100 Ohm cable and a part with the abnormal impedance value
*/
ETHTOOL_A_CABLE_RESULT_CODE_IMPEDANCE_MISMATCH,
};
enum {
@ -781,7 +800,7 @@ enum {
/* add new constants above here */
__ETHTOOL_A_STATS_GRP_CNT,
ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_CNT - 1)
ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_GRP_CNT - 1)
};
enum {
@ -893,6 +912,9 @@ enum {
ETHTOOL_A_PODL_PSE_ADMIN_STATE, /* u32 */
ETHTOOL_A_PODL_PSE_ADMIN_CONTROL, /* u32 */
ETHTOOL_A_PODL_PSE_PW_D_STATUS, /* u32 */
ETHTOOL_A_C33_PSE_ADMIN_STATE, /* u32 */
ETHTOOL_A_C33_PSE_ADMIN_CONTROL, /* u32 */
ETHTOOL_A_C33_PSE_PW_D_STATUS, /* u32 */
/* add new constants above here */
__ETHTOOL_A_PSE_CNT,
@ -906,6 +928,7 @@ enum {
ETHTOOL_A_RSS_HFUNC, /* u32 */
ETHTOOL_A_RSS_INDIR, /* binary */
ETHTOOL_A_RSS_HKEY, /* binary */
ETHTOOL_A_RSS_INPUT_XFRM, /* u32 */
__ETHTOOL_A_RSS_CNT,
ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1),

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _LINUX_EVENTFD_H
#define _LINUX_EVENTFD_H
#include <linux/fcntl.h>
#define EFD_SEMAPHORE (1 << 0)
#define EFD_CLOEXEC O_CLOEXEC
#define EFD_NONBLOCK O_NONBLOCK
#endif /* _LINUX_EVENTFD_H */

View File

@ -85,16 +85,17 @@ struct epoll_event {
__u64 data;
} EPOLL_PACKED;
#ifdef CONFIG_PM_SLEEP
static __inline__ void ep_take_care_of_epollwakeup(struct epoll_event *epev)
{
if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
epev->events &= ~EPOLLWAKEUP;
}
#else
static __inline__ void ep_take_care_of_epollwakeup(struct epoll_event *epev)
{
epev->events &= ~EPOLLWAKEUP;
}
#endif
struct epoll_params {
__u32 busy_poll_usecs;
__u16 busy_poll_budget;
__u8 prefer_busy_poll;
/* pad the struct to a multiple of 64bits */
__u8 __pad;
};
#define EPOLL_IOC_TYPE 0x8A
#define EPIOCSPARAMS _IOW(EPOLL_IOC_TYPE, 0x01, struct epoll_params)
#define EPIOCGPARAMS _IOR(EPOLL_IOC_TYPE, 0x02, struct epoll_params)
#endif /* _LINUX_EVENTPOLL_H */

View File

@ -0,0 +1,117 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _LINUX_EXT4_H
#define _LINUX_EXT4_H
#include <linux/fiemap.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/types.h>
/*
* ext4-specific ioctl commands
*/
#define EXT4_IOC_GETVERSION _IOR('f', 3, long)
#define EXT4_IOC_SETVERSION _IOW('f', 4, long)
#define EXT4_IOC_GETVERSION_OLD FS_IOC_GETVERSION
#define EXT4_IOC_SETVERSION_OLD FS_IOC_SETVERSION
#define EXT4_IOC_GETRSVSZ _IOR('f', 5, long)
#define EXT4_IOC_SETRSVSZ _IOW('f', 6, long)
#define EXT4_IOC_GROUP_EXTEND _IOW('f', 7, unsigned long)
#define EXT4_IOC_GROUP_ADD _IOW('f', 8, struct ext4_new_group_input)
#define EXT4_IOC_MIGRATE _IO('f', 9)
/* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
/* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
#define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent)
#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
#define EXT4_IOC_SWAP_BOOT _IO('f', 17)
#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18)
/* ioctl codes 19--39 are reserved for fscrypt */
#define EXT4_IOC_CLEAR_ES_CACHE _IO('f', 40)
#define EXT4_IOC_GETSTATE _IOW('f', 41, __u32)
#define EXT4_IOC_GET_ES_CACHE _IOWR('f', 42, struct fiemap)
#define EXT4_IOC_CHECKPOINT _IOW('f', 43, __u32)
#define EXT4_IOC_GETFSUUID _IOR('f', 44, struct fsuuid)
#define EXT4_IOC_SETFSUUID _IOW('f', 44, struct fsuuid)
#define EXT4_IOC_SHUTDOWN _IOR('X', 125, __u32)
/*
* ioctl commands in 32 bit emulation
*/
#define EXT4_IOC32_GETVERSION _IOR('f', 3, int)
#define EXT4_IOC32_SETVERSION _IOW('f', 4, int)
#define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int)
#define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int)
#define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int)
#define EXT4_IOC32_GROUP_ADD _IOW('f', 8, struct compat_ext4_new_group_input)
#define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION
#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
/*
* Flags returned by EXT4_IOC_GETSTATE
*
* We only expose to userspace a subset of the state flags in
* i_state_flags
*/
#define EXT4_STATE_FLAG_EXT_PRECACHED 0x00000001
#define EXT4_STATE_FLAG_NEW 0x00000002
#define EXT4_STATE_FLAG_NEWENTRY 0x00000004
#define EXT4_STATE_FLAG_DA_ALLOC_CLOSE 0x00000008
/*
* Flags for ioctl EXT4_IOC_CHECKPOINT
*/
#define EXT4_IOC_CHECKPOINT_FLAG_DISCARD 0x1
#define EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT 0x2
#define EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN 0x4
#define EXT4_IOC_CHECKPOINT_FLAG_VALID (EXT4_IOC_CHECKPOINT_FLAG_DISCARD | \
EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT | \
EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN)
/*
* Structure for EXT4_IOC_GETFSUUID/EXT4_IOC_SETFSUUID
*/
struct fsuuid {
__u32 fsu_len;
__u32 fsu_flags;
__u8 fsu_uuid[];
};
/*
* Structure for EXT4_IOC_MOVE_EXT
*/
struct move_extent {
__u32 reserved; /* should be zero */
__u32 donor_fd; /* donor file descriptor */
__u64 orig_start; /* logical start offset in block for orig */
__u64 donor_start; /* logical start offset in block for donor */
__u64 len; /* block length to be moved */
__u64 moved_len; /* moved block length */
};
/*
* Flags used by EXT4_IOC_SHUTDOWN
*/
#define EXT4_GOING_FLAGS_DEFAULT 0x0 /* going down */
#define EXT4_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */
#define EXT4_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */
/* Used to pass group descriptor data when online resize is done */
struct ext4_new_group_input {
__u32 group; /* Group number for this data */
__u64 block_bitmap; /* Absolute block number of block bitmap */
__u64 inode_bitmap; /* Absolute block number of inode bitmap */
__u64 inode_table; /* Absolute block number of inode table start */
__u32 blocks_count; /* Total number of blocks in this group */
__u16 reserved_blocks; /* Number of reserved blocks in this group */
__u16 unused;
};
/*
* Returned by EXT4_IOC_GET_ES_CACHE as an additional possible flag.
* It indicates that the entry in extent status cache is for a hole.
*/
#define EXT4_FIEMAP_EXTENT_HOLE 0x08000000
#endif /* _LINUX_EXT4_H */

View File

@ -8,8 +8,8 @@
#define FAN_ACCESS 0x00000001 /* File was accessed */
#define FAN_MODIFY 0x00000002 /* File was modified */
#define FAN_ATTRIB 0x00000004 /* Metadata changed */
#define FAN_CLOSE_WRITE 0x00000008 /* Writtable file closed */
#define FAN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
#define FAN_CLOSE_WRITE 0x00000008 /* Writable file closed */
#define FAN_CLOSE_NOWRITE 0x00000010 /* Unwritable file closed */
#define FAN_OPEN 0x00000020 /* File was opened */
#define FAN_MOVED_FROM 0x00000040 /* File was moved from X */
#define FAN_MOVED_TO 0x00000080 /* File was moved to Y */

View File

@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/i2c.h>
#include <linux/vesa.h>
/* Definitions of frame buffers */
@ -291,13 +292,6 @@ struct fb_con2fbmap {
__u32 framebuffer;
};
/* VESA Blanking Levels */
#define VESA_NO_BLANKING 0
#define VESA_VSYNC_SUSPEND 1
#define VESA_HSYNC_SUSPEND 2
#define VESA_POWERDOWN 3
enum {
/* screen: unblanked, hsync: on, vsync: on */
FB_BLANK_UNBLANK = VESA_NO_BLANKING,

View File

@ -8,6 +8,14 @@
#define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0)
#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
/*
* Request nofications on a directory.
* See below for events that may be notified.
*/
#define F_NOTIFY (F_LINUX_SPECIFIC_BASE + 2)
#define F_DUPFD_QUERY (F_LINUX_SPECIFIC_BASE + 3)
/*
* Cancel a blocking posix lock; internal use only until we expose an
* asynchronous lock api to userspace:
@ -17,12 +25,6 @@
/* Create a file descriptor with FD_CLOEXEC set. */
#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6)
/*
* Request nofications on a directory.
* See below for events that may be notified.
*/
#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2)
/*
* Set and get of pipe page size array
*/
@ -112,4 +114,9 @@
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
/* Flags for name_to_handle_at(2). We reuse AT_ flag space to save bits... */
#define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
compare object identity and may not
be usable to open_by_handle_at(2) */
#endif /* _LINUX_FCNTL_H */

View File

@ -46,6 +46,12 @@
#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED 0x08
#define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09
/* available since kernel version 6.5 */
#define FW_CDEV_EVENT_REQUEST3 0x0a
#define FW_CDEV_EVENT_RESPONSE2 0x0b
#define FW_CDEV_EVENT_PHY_PACKET_SENT2 0x0c
#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED2 0x0d
/**
* struct fw_cdev_event_common - Common part of all fw_cdev_event_* types
* @closure: For arbitrary use by userspace
@ -103,6 +109,32 @@ struct fw_cdev_event_bus_reset {
* @length: Data length, i.e. the response's payload size in bytes
* @data: Payload data, if any
*
* This event is sent instead of &fw_cdev_event_response if the kernel or the client implements
* ABI version <= 5. It has the lack of time stamp field comparing to &fw_cdev_event_response2.
*/
struct fw_cdev_event_response {
__u64 closure;
__u32 type;
__u32 rcode;
__u32 length;
__u32 data[];
};
/**
* struct fw_cdev_event_response2 - Sent when a response packet was received
* @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST
* or %FW_CDEV_IOC_SEND_BROADCAST_REQUEST
* or %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE
* @rcode: Response code returned by the remote node
* @length: Data length, i.e. the response's payload size in bytes
* @request_tstamp: The time stamp of isochronous cycle at which the request was sent.
* @response_tstamp: The time stamp of isochronous cycle at which the response was sent.
* @padding: Padding to keep the size of structure as multiples of 8 in various architectures
* since 4 byte alignment is used for 8 byte of object type in System V ABI for i386
* architecture.
* @data: Payload data, if any
*
* This event is sent when the stack receives a response to an outgoing request
* sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses
* carrying data (read and lock responses) follows immediately and can be
@ -112,12 +144,21 @@ struct fw_cdev_event_bus_reset {
* involve response packets. This includes unified write transactions,
* broadcast write transactions, and transmission of asynchronous stream
* packets. @rcode indicates success or failure of such transmissions.
*
* The value of @request_tstamp expresses the isochronous cycle at which the request was sent to
* initiate the transaction. The value of @response_tstamp expresses the isochronous cycle at which
* the response arrived to complete the transaction. Each value is unsigned 16 bit integer
* containing three low order bits of second field and all 13 bits of cycle field in format of
* CYCLE_TIMER register.
*/
struct fw_cdev_event_response {
struct fw_cdev_event_response2 {
__u64 closure;
__u32 type;
__u32 rcode;
__u32 length;
__u32 request_tstamp;
__u32 response_tstamp;
__u32 padding;
__u32 data[];
};
@ -159,6 +200,41 @@ struct fw_cdev_event_request {
* @length: Data length, i.e. the request's payload size in bytes
* @data: Incoming data, if any
*
* This event is sent instead of &fw_cdev_event_request3 if the kernel or the client implements
* ABI version <= 5. It has the lack of time stamp field comparing to &fw_cdev_event_request3.
*/
struct fw_cdev_event_request2 {
__u64 closure;
__u32 type;
__u32 tcode;
__u64 offset;
__u32 source_node_id;
__u32 destination_node_id;
__u32 card;
__u32 generation;
__u32 handle;
__u32 length;
__u32 data[];
};
/**
* struct fw_cdev_event_request3 - Sent on incoming request to an address region
* @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST2
* @tcode: Transaction code of the incoming request
* @offset: The offset into the 48-bit per-node address space
* @source_node_id: Sender node ID
* @destination_node_id: Destination node ID
* @card: The index of the card from which the request came
* @generation: Bus generation in which the request is valid
* @handle: Reference to the kernel-side pending request
* @length: Data length, i.e. the request's payload size in bytes
* @tstamp: The time stamp of isochronous cycle at which the request arrived.
* @padding: Padding to keep the size of structure as multiples of 8 in various architectures
* since 4 byte alignment is used for 8 byte of object type in System V ABI for i386
* architecture.
* @data: Incoming data, if any
*
* This event is sent when the stack receives an incoming request to an address
* region registered using the %FW_CDEV_IOC_ALLOCATE ioctl. The request is
* guaranteed to be completely contained in the specified region. Userspace is
@ -191,10 +267,14 @@ struct fw_cdev_event_request {
* sent.
*
* If the client subsequently needs to initiate requests to the sender node of
* an &fw_cdev_event_request2, it needs to use a device file with matching
* an &fw_cdev_event_request3, it needs to use a device file with matching
* card index, node ID, and generation for outbound requests.
*
* @tstamp is isochronous cycle at which the request arrived. It is 16 bit integer value and the
* higher 3 bits expresses three low order bits of second field in the format of CYCLE_TIME
* register and the rest 13 bits expresses cycle field.
*/
struct fw_cdev_event_request2 {
struct fw_cdev_event_request3 {
__u64 closure;
__u32 type;
__u32 tcode;
@ -205,6 +285,8 @@ struct fw_cdev_event_request2 {
__u32 generation;
__u32 handle;
__u32 length;
__u32 tstamp;
__u32 padding;
__u32 data[];
};
@ -341,14 +423,12 @@ struct fw_cdev_event_iso_resource {
* @type: %FW_CDEV_EVENT_PHY_PACKET_SENT or %..._RECEIVED
* @rcode: %RCODE_..., indicates success or failure of transmission
* @length: Data length in bytes
* @data: Incoming data
* @data: Incoming data for %FW_CDEV_IOC_RECEIVE_PHY_PACKETS. For %FW_CDEV_IOC_SEND_PHY_PACKET
* the field has the same data in the request, thus the length of 8 bytes.
*
* If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT, @length is 0 and @data empty,
* except in case of a ping packet: Then, @length is 4, and @data[0] is the
* ping time in 49.152MHz clocks if @rcode is %RCODE_COMPLETE.
*
* If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED, @length is 8 and @data
* consists of the two PHY packet quadlets, in host byte order.
* This event is sent instead of &fw_cdev_event_phy_packet2 if the kernel or
* the client implements ABI version <= 5. It has the lack of time stamp field comparing to
* &fw_cdev_event_phy_packet2.
*/
struct fw_cdev_event_phy_packet {
__u64 closure;
@ -358,6 +438,47 @@ struct fw_cdev_event_phy_packet {
__u32 data[];
};
/**
* struct fw_cdev_event_phy_packet2 - A PHY packet was transmitted or received with time stamp.
* @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_PHY_PACKET
* or %FW_CDEV_IOC_RECEIVE_PHY_PACKETS ioctl
* @type: %FW_CDEV_EVENT_PHY_PACKET_SENT2 or %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
* @rcode: %RCODE_..., indicates success or failure of transmission
* @length: Data length in bytes
* @tstamp: For %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, the time stamp of isochronous cycle at
* which the packet arrived. For %FW_CDEV_EVENT_PHY_PACKET_SENT2 and non-ping packet,
* the time stamp of isochronous cycle at which the packet was sent. For ping packet,
* the tick count for round-trip time measured by 1394 OHCI controller.
* The time stamp of isochronous cycle at which either the response was sent for
* %FW_CDEV_EVENT_PHY_PACKET_SENT2 or the request arrived for
* %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2.
* @data: Incoming data
*
* If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT2, @length is 8 and @data consists of the two PHY
* packet quadlets to be sent, in host byte order,
*
* If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, @length is 8 and @data consists of the two PHY
* packet quadlets, in host byte order.
*
* For %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, the @tstamp is the isochronous cycle at which the
* packet arrived. It is 16 bit integer value and the higher 3 bits expresses three low order bits
* of second field and the rest 13 bits expresses cycle field in the format of CYCLE_TIME register.
*
* For %FW_CDEV_EVENT_PHY_PACKET_SENT2, the @tstamp has different meanings whether to sent the
* packet for ping or not. If it's not for ping, the @tstamp is the isochronous cycle at which the
* packet was sent, and use the same format as the case of %FW_CDEV_EVENT_PHY_PACKET_SENT2. If it's
* for ping, the @tstamp is for round-trip time measured by 1394 OHCI controller with 42.195 MHz
* resolution.
*/
struct fw_cdev_event_phy_packet2 {
__u64 closure;
__u32 type;
__u32 rcode;
__u32 length;
__u32 tstamp;
__u32 data[];
};
/**
* union fw_cdev_event - Convenience union of fw_cdev_event_* types
* @common: Valid for all types
@ -375,6 +496,11 @@ struct fw_cdev_event_phy_packet {
* %FW_CDEV_EVENT_PHY_PACKET_SENT or
* %FW_CDEV_EVENT_PHY_PACKET_RECEIVED
*
* @request3: Valid if @common.type == %FW_CDEV_EVENT_REQUEST3
* @response2: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE2
* @phy_packet2: Valid if @common.type == %FW_CDEV_EVENT_PHY_PACKET_SENT2 or
* %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
*
* Convenience union for userspace use. Events could be read(2) into an
* appropriately aligned char buffer and then cast to this union for further
* processing. Note that for a request, response or iso_interrupt event,
@ -393,6 +519,9 @@ union fw_cdev_event {
struct fw_cdev_event_iso_interrupt_mc iso_interrupt_mc; /* added in 2.6.36 */
struct fw_cdev_event_iso_resource iso_resource; /* added in 2.6.30 */
struct fw_cdev_event_phy_packet phy_packet; /* added in 2.6.36 */
struct fw_cdev_event_request3 request3; /* added in 6.5 */
struct fw_cdev_event_response2 response2; /* added in 6.5 */
struct fw_cdev_event_phy_packet2 phy_packet2; /* added in 6.5 */
};
/* available since kernel version 2.6.22 */
@ -457,6 +586,11 @@ union fw_cdev_event {
* 5 (3.4) - send %FW_CDEV_EVENT_ISO_INTERRUPT events when needed to
* avoid dropping data
* - added %FW_CDEV_IOC_FLUSH_ISO
* 6 (6.5) - added some event for subactions of asynchronous transaction with time stamp
* - %FW_CDEV_EVENT_REQUEST3
* - %FW_CDEV_EVENT_RESPONSE2
* - %FW_CDEV_EVENT_PHY_PACKET_SENT2
* - %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
*/
/**
@ -502,11 +636,11 @@ struct fw_cdev_get_info {
* @data: Userspace pointer to payload
* @generation: The bus generation where packet is valid
*
* Send a request to the device. This ioctl implements all outgoing requests.
* Both quadlet and block request specify the payload as a pointer to the data
* in the @data field. Once the transaction completes, the kernel writes an
* &fw_cdev_event_response event back. The @closure field is passed back to
* user space in the response event.
* Send a request to the device. This ioctl implements all outgoing requests. Both quadlet and
* block request specify the payload as a pointer to the data in the @data field. Once the
* transaction completes, the kernel writes either &fw_cdev_event_response event or
* &fw_cdev_event_response event back. The @closure field is passed back to user space in the
* response event.
*/
struct fw_cdev_send_request {
__u32 tcode;
@ -989,10 +1123,9 @@ struct fw_cdev_allocate_iso_resource {
* @generation: The bus generation where packet is valid
* @speed: Speed to transmit at
*
* The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet
* to every device which is listening to the specified channel. The kernel
* writes an &fw_cdev_event_response event which indicates success or failure of
* the transmission.
* The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet to every device
* which is listening to the specified channel. The kernel writes either &fw_cdev_event_response
* event or &fw_cdev_event_response2 event which indicates success or failure of the transmission.
*/
struct fw_cdev_send_stream_packet {
__u32 length;
@ -1011,8 +1144,8 @@ struct fw_cdev_send_stream_packet {
* @data: First and second quadlet of the PHY packet
* @generation: The bus generation where packet is valid
*
* The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes
* on the same card as this device. After transmission, an
* The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes on the same card as this
* device. After transmission, either %FW_CDEV_EVENT_PHY_PACKET_SENT event or
* %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated.
*
* The payload @data\[\] shall be specified in host byte order. Usually,
@ -1031,8 +1164,9 @@ struct fw_cdev_send_phy_packet {
* struct fw_cdev_receive_phy_packets - start reception of PHY packets
* @closure: Passed back to userspace in phy packet events
*
* This ioctl activates issuing of %FW_CDEV_EVENT_PHY_PACKET_RECEIVED due to
* incoming PHY packets from any node on the same bus as the device.
* This ioctl activates issuing of either %FW_CDEV_EVENT_PHY_PACKET_RECEIVED or
* %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2 due to incoming PHY packets from any node on the same bus
* as the device.
*
* The ioctl is only permitted on device files which represent a local node.
*/

View File

@ -60,6 +60,24 @@ struct fstrim_range {
__u64 minlen;
};
/*
* We include a length field because some filesystems (vfat) have an identifier
* that we do want to expose as a UUID, but doesn't have the standard length.
*
* We use a fixed size buffer beacuse this interface will, by fiat, never
* support "UUIDs" longer than 16 bytes; we don't want to force all downstream
* users to have to deal with that.
*/
struct fsuuid2 {
__u8 len;
__u8 uuid[16];
};
struct fs_sysfs_path {
__u8 len;
__u8 name[128];
};
/* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */
#define FILE_DEDUPE_RANGE_SAME 0
#define FILE_DEDUPE_RANGE_DIFFERS 1
@ -211,6 +229,13 @@ struct fsxattr {
#define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr)
#define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX])
#define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX])
/* Returns the external filesystem UUID, the same one blkid returns */
#define FS_IOC_GETFSUUID _IOR(0x15, 0, struct fsuuid2)
/*
* Returns the path component under /sys/fs/ that refers to this filesystem;
* also /sys/kernel/debug/ for filesystems with debugfs exports
*/
#define FS_IOC_GETFSSYSFSPATH _IOR(0x15, 1, struct fs_sysfs_path)
/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
@ -297,8 +322,71 @@ typedef int __bitwise __kernel_rwf_t;
/* per-IO O_APPEND */
#define RWF_APPEND ((__kernel_rwf_t)0x00000010)
/* per-IO negation of O_APPEND */
#define RWF_NOAPPEND ((__kernel_rwf_t)0x00000020)
/* mask of flags supported by the kernel */
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
RWF_APPEND)
RWF_APPEND | RWF_NOAPPEND)
/* Pagemap ioctl */
#define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg)
/* Bitmasks provided in pm_scan_args masks and reported in page_region.categories. */
#define PAGE_IS_WPALLOWED (1 << 0)
#define PAGE_IS_WRITTEN (1 << 1)
#define PAGE_IS_FILE (1 << 2)
#define PAGE_IS_PRESENT (1 << 3)
#define PAGE_IS_SWAPPED (1 << 4)
#define PAGE_IS_PFNZERO (1 << 5)
#define PAGE_IS_HUGE (1 << 6)
#define PAGE_IS_SOFT_DIRTY (1 << 7)
/*
* struct page_region - Page region with flags
* @start: Start of the region
* @end: End of the region (exclusive)
* @categories: PAGE_IS_* category bitmask for the region
*/
struct page_region {
__u64 start;
__u64 end;
__u64 categories;
};
/* Flags for PAGEMAP_SCAN ioctl */
#define PM_SCAN_WP_MATCHING (1 << 0) /* Write protect the pages matched. */
#define PM_SCAN_CHECK_WPASYNC (1 << 1) /* Abort the scan when a non-WP-enabled page is found. */
/*
* struct pm_scan_arg - Pagemap ioctl argument
* @size: Size of the structure
* @flags: Flags for the IOCTL
* @start: Starting address of the region
* @end: Ending address of the region
* @walk_end Address where the scan stopped (written by kernel).
* walk_end == end (address tags cleared) informs that the scan completed on entire range.
* @vec: Address of page_region struct array for output
* @vec_len: Length of the page_region struct array
* @max_pages: Optional limit for number of returned pages (0 = disabled)
* @category_inverted: PAGE_IS_* categories which values match if 0 instead of 1
* @category_mask: Skip pages for which any category doesn't match
* @category_anyof_mask: Skip pages for which no category matches
* @return_mask: PAGE_IS_* categories that are to be reported in `page_region`s returned
*/
struct pm_scan_arg {
__u64 size;
__u64 flags;
__u64 start;
__u64 end;
__u64 walk_end;
__u64 vec;
__u64 vec_len;
__u64 max_pages;
__u64 category_inverted;
__u64 category_mask;
__u64 category_anyof_mask;
__u64 return_mask;
};
#endif /* _LINUX_FS_H */

View File

@ -71,7 +71,8 @@ struct fscrypt_policy_v2 {
__u8 contents_encryption_mode;
__u8 filenames_encryption_mode;
__u8 flags;
__u8 __reserved[4];
__u8 log2_data_unit_size;
__u8 __reserved[3];
__u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
};

View File

@ -59,6 +59,16 @@ struct scom_access {
* /dev/sbefifo* ioctl interface
*/
/**
* FSI_SBEFIFO_CMD_TIMEOUT sets the timeout for writing data to the SBEFIFO.
*
* The command timeout is specified in seconds. The minimum value of command
* timeout is 1 seconds (default) and the maximum value of command timeout is
* 120 seconds. A command timeout of 0 will reset the value to the default of
* 1 seconds.
*/
#define FSI_SBEFIFO_CMD_TIMEOUT_SECONDS _IOW('s', 0x01, __u32)
/**
* FSI_SBEFIFO_READ_TIMEOUT sets the read timeout for response from SBE.
*

View File

@ -206,6 +206,17 @@
* - add extension header
* - add FUSE_EXT_GROUPS
* - add FUSE_CREATE_SUPP_GROUP
* - add FUSE_HAS_EXPIRE_ONLY
*
* 7.39
* - add FUSE_DIRECT_IO_ALLOW_MMAP
* - add FUSE_STATX and related structures
*
* 7.40
* - add max_stack_depth to fuse_init_out, add FUSE_PASSTHROUGH init flag
* - add backing_id to fuse_open_out, add FOPEN_PASSTHROUGH open flag
* - add FUSE_NO_EXPORT_SUPPORT init flag
* - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag
*/
#ifndef _LINUX_FUSE_H
@ -237,7 +248,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
#define FUSE_KERNEL_MINOR_VERSION 38
#define FUSE_KERNEL_MINOR_VERSION 40
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@ -264,6 +275,40 @@ struct fuse_attr {
uint32_t flags;
};
/*
* The following structures are bit-for-bit compatible with the statx(2) ABI in
* Linux.
*/
struct fuse_sx_time {
int64_t tv_sec;
uint32_t tv_nsec;
int32_t __reserved;
};
struct fuse_statx {
uint32_t mask;
uint32_t blksize;
uint64_t attributes;
uint32_t nlink;
uint32_t uid;
uint32_t gid;
uint16_t mode;
uint16_t __spare0[1];
uint64_t ino;
uint64_t size;
uint64_t blocks;
uint64_t attributes_mask;
struct fuse_sx_time atime;
struct fuse_sx_time btime;
struct fuse_sx_time ctime;
struct fuse_sx_time mtime;
uint32_t rdev_major;
uint32_t rdev_minor;
uint32_t dev_major;
uint32_t dev_minor;
uint64_t __spare2[14];
};
struct fuse_kstatfs {
uint64_t blocks;
uint64_t bfree;
@ -310,6 +355,7 @@ struct fuse_file_lock {
* FOPEN_STREAM: the file is stream-like (no file position at all)
* FOPEN_NOFLUSH: don't flush data cache on close (unless FUSE_WRITEBACK_CACHE)
* FOPEN_PARALLEL_DIRECT_WRITES: Allow concurrent direct writes on the same inode
* FOPEN_PASSTHROUGH: passthrough read/write io for this open file
*/
#define FOPEN_DIRECT_IO (1 << 0)
#define FOPEN_KEEP_CACHE (1 << 1)
@ -318,6 +364,7 @@ struct fuse_file_lock {
#define FOPEN_STREAM (1 << 4)
#define FOPEN_NOFLUSH (1 << 5)
#define FOPEN_PARALLEL_DIRECT_WRITES (1 << 6)
#define FOPEN_PASSTHROUGH (1 << 7)
/**
* INIT request/reply flags
@ -365,6 +412,11 @@ struct fuse_file_lock {
* FUSE_HAS_INODE_DAX: use per inode DAX
* FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
* symlink and mknod (single group that matches parent)
* FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
* FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode.
* FUSE_NO_EXPORT_SUPPORT: explicitly disable export support
* FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
* of the request ID indicates resend requests
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@ -402,6 +454,14 @@ struct fuse_file_lock {
#define FUSE_SECURITY_CTX (1ULL << 32)
#define FUSE_HAS_INODE_DAX (1ULL << 33)
#define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
#define FUSE_HAS_EXPIRE_ONLY (1ULL << 35)
#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36)
#define FUSE_PASSTHROUGH (1ULL << 37)
#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38)
#define FUSE_HAS_RESEND (1ULL << 39)
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
/**
* CUSE INIT request/reply flags
@ -568,6 +628,7 @@ enum fuse_opcode {
FUSE_REMOVEMAPPING = 49,
FUSE_SYNCFS = 50,
FUSE_TMPFILE = 51,
FUSE_STATX = 52,
/* CUSE specific operations */
CUSE_INIT = 4096,
@ -584,6 +645,7 @@ enum fuse_notify_code {
FUSE_NOTIFY_STORE = 4,
FUSE_NOTIFY_RETRIEVE = 5,
FUSE_NOTIFY_DELETE = 6,
FUSE_NOTIFY_RESEND = 7,
FUSE_NOTIFY_CODE_MAX,
};
@ -632,6 +694,22 @@ struct fuse_attr_out {
struct fuse_attr attr;
};
struct fuse_statx_in {
uint32_t getattr_flags;
uint32_t reserved;
uint64_t fh;
uint32_t sx_flags;
uint32_t sx_mask;
};
struct fuse_statx_out {
uint64_t attr_valid; /* Cache timeout for the attributes */
uint32_t attr_valid_nsec;
uint32_t flags;
uint64_t spare[2];
struct fuse_statx stat;
};
#define FUSE_COMPAT_MKNOD_IN_SIZE 8
struct fuse_mknod_in {
@ -694,7 +772,7 @@ struct fuse_create_in {
struct fuse_open_out {
uint64_t fh;
uint32_t open_flags;
uint32_t padding;
int32_t backing_id;
};
struct fuse_release_in {
@ -810,7 +888,8 @@ struct fuse_init_out {
uint16_t max_pages;
uint16_t map_alignment;
uint32_t flags2;
uint32_t unused[7];
uint32_t max_stack_depth;
uint32_t unused[6];
};
#define CUSE_INIT_INFO_MAX 4096
@ -893,6 +972,14 @@ struct fuse_fallocate_in {
uint32_t padding;
};
/**
* FUSE request unique ID flag
*
* Indicates whether this is a resend request. The receiver should handle this
* request accordingly.
*/
#define FUSE_UNIQUE_RESEND (1ULL << 63)
struct fuse_in_header {
uint32_t len;
uint32_t opcode;
@ -982,9 +1069,18 @@ struct fuse_notify_retrieve_in {
uint64_t dummy4;
};
struct fuse_backing_map {
int32_t fd;
uint32_t flags;
uint64_t padding;
};
/* Device ioctls: */
#define FUSE_DEV_IOC_MAGIC 229
#define FUSE_DEV_IOC_CLONE _IOR(FUSE_DEV_IOC_MAGIC, 0, uint32_t)
#define FUSE_DEV_IOC_BACKING_OPEN _IOW(FUSE_DEV_IOC_MAGIC, 1, \
struct fuse_backing_map)
#define FUSE_DEV_IOC_BACKING_CLOSE _IOW(FUSE_DEV_IOC_MAGIC, 2, uint32_t)
struct fuse_lseek_in {
uint64_t fh;

View File

@ -44,10 +44,35 @@
FUTEX_PRIVATE_FLAG)
/*
* Flags to specify the bit length of the futex word for futex2 syscalls.
* Currently, only 32 is supported.
* Flags for futex2 syscalls.
*
* NOTE: these are not pure flags, they can also be seen as:
*
* union {
* u32 flags;
* struct {
* u32 size : 2,
* numa : 1,
* : 4,
* private : 1;
* };
* };
*/
#define FUTEX_32 2
#define FUTEX2_SIZE_U8 0x00
#define FUTEX2_SIZE_U16 0x01
#define FUTEX2_SIZE_U32 0x02
#define FUTEX2_SIZE_U64 0x03
#define FUTEX2_NUMA 0x04
/* 0x08 */
/* 0x10 */
/* 0x20 */
/* 0x40 */
#define FUTEX2_PRIVATE FUTEX_PRIVATE_FLAG
#define FUTEX2_SIZE_MASK 0x03
/* do not use */
#define FUTEX_32 FUTEX2_SIZE_U32 /* historical accident :-( */
/*
* Max numbers of elements in a futex_waitv array

View File

@ -67,7 +67,7 @@ struct gpiochip_info {
* @GPIO_V2_LINE_FLAG_BIAS_DISABLED: line has bias disabled
* @GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME: line events contain REALTIME timestamps
* @GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE: line events contain timestamps from
* hardware timestamp engine
* the hardware timestamping engine (HTE) subsystem
*/
enum gpio_v2_line_flag {
GPIO_V2_LINE_FLAG_USED = _BITULL(0),
@ -88,10 +88,10 @@ enum gpio_v2_line_flag {
/**
* struct gpio_v2_line_values - Values of GPIO lines
* @bits: a bitmap containing the value of the lines, set to 1 for active
* and 0 for inactive.
* and 0 for inactive
* @mask: a bitmap identifying the lines to get or set, with each bit
* number corresponding to the index into &struct
* gpio_v2_line_request.offsets.
* gpio_v2_line_request.offsets
*/
struct gpio_v2_line_values {
__aligned_u64 bits;
@ -123,7 +123,7 @@ enum gpio_v2_line_attr_id {
* @values: if id is %GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES, a bitmap
* containing the values to which the lines will be set, with each bit
* number corresponding to the index into &struct
* gpio_v2_line_request.offsets.
* gpio_v2_line_request.offsets
* @debounce_period_us: if id is %GPIO_V2_LINE_ATTR_ID_DEBOUNCE, the
* desired debounce period, in microseconds
*/
@ -143,7 +143,7 @@ struct gpio_v2_line_attribute {
* @attr: the configurable attribute
* @mask: a bitmap identifying the lines to which the attribute applies,
* with each bit number corresponding to the index into &struct
* gpio_v2_line_request.offsets.
* gpio_v2_line_request.offsets
*/
struct gpio_v2_line_config_attribute {
struct gpio_v2_line_attribute attr;
@ -178,7 +178,7 @@ struct gpio_v2_line_config {
* associated GPIO chip
* @consumer: a desired consumer label for the selected GPIO lines such as
* "my-bitbanged-relay"
* @config: requested configuration for the lines.
* @config: requested configuration for the lines
* @num_lines: number of lines requested in this request, i.e. the number
* of valid fields in the %GPIO_V2_LINES_MAX sized arrays, set to 1 to
* request a single line
@ -189,9 +189,8 @@ struct gpio_v2_line_config {
* buffer. If this field is zero then the buffer size defaults to a minimum
* of @num_lines * 16.
* @padding: reserved for future use and must be zero filled
* @fd: if successful this field will contain a valid anonymous file handle
* after a %GPIO_GET_LINE_IOCTL operation, zero or negative value means
* error
* @fd: after a successful %GPIO_V2_GET_LINE_IOCTL operation, contains
* a valid anonymous file descriptor representing the request
*/
struct gpio_v2_line_request {
__u32 offsets[GPIO_V2_LINES_MAX];
@ -217,7 +216,7 @@ struct gpio_v2_line_request {
* @num_attrs: the number of attributes in @attrs
* @flags: flags for this GPIO line, with values from &enum
* gpio_v2_line_flag, such as %GPIO_V2_LINE_FLAG_ACTIVE_LOW,
* %GPIO_V2_LINE_FLAG_OUTPUT etc, added together.
* %GPIO_V2_LINE_FLAG_OUTPUT etc, added together
* @attrs: the configuration attributes associated with the line
* @padding: reserved for future use
*/
@ -274,7 +273,7 @@ enum gpio_v2_line_event_id {
/**
* struct gpio_v2_line_event - The actual event being pushed to userspace
* @timestamp_ns: best estimate of time of event occurrence, in nanoseconds.
* @timestamp_ns: best estimate of time of event occurrence, in nanoseconds
* @id: event identifier with value from &enum gpio_v2_line_event_id
* @offset: the offset of the line that triggered the event
* @seqno: the sequence number for this event in the sequence of events for
@ -289,6 +288,10 @@ enum gpio_v2_line_event_id {
*
* If the %GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME flag is set then the
* @timestamp_ns is read from %CLOCK_REALTIME.
*
* If the %GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE flag is set then the
* @timestamp_ns is provided by the hardware timestamping engine (HTE)
* subsystem.
*/
struct gpio_v2_line_event {
__aligned_u64 timestamp_ns;
@ -330,7 +333,7 @@ struct gpio_v2_line_event {
* also be empty if the consumer doesn't set this up
*
* Note: This struct is part of ABI v1 and is deprecated.
* Use &struct gpio_v2_line_info instead.
* Use ABI v2 and &struct gpio_v2_line_info instead.
*/
struct gpioline_info {
__u32 line_offset;
@ -365,7 +368,7 @@ enum {
* at the end of the structure on 64-bit architectures.
*
* Note: This struct is part of ABI v1 and is deprecated.
* Use &struct gpio_v2_line_info_changed instead.
* Use ABI v2 and &struct gpio_v2_line_info_changed instead.
*/
struct gpioline_info_changed {
struct gpioline_info info;
@ -396,18 +399,17 @@ struct gpioline_info_changed {
* a batch of input or output lines, but they must all have the same
* characteristics, i.e. all inputs or all outputs, all active low etc
* @default_values: if the %GPIOHANDLE_REQUEST_OUTPUT is set for a requested
* line, this specifies the default output value, should be 0 (low) or
* 1 (high), anything else than 0 or 1 will be interpreted as 1 (high)
* line, this specifies the default output value, should be 0 (inactive) or
* 1 (active). Anything other than 0 or 1 will be interpreted as active.
* @consumer_label: a desired consumer label for the selected GPIO line(s)
* such as "my-bitbanged-relay"
* @lines: number of lines requested in this request, i.e. the number of
* valid fields in the above arrays, set to 1 to request a single line
* @fd: if successful this field will contain a valid anonymous file handle
* after a %GPIO_GET_LINEHANDLE_IOCTL operation, zero or negative value
* means error
* @fd: after a successful %GPIO_GET_LINEHANDLE_IOCTL operation, contains
* a valid anonymous file descriptor representing the request
*
* Note: This struct is part of ABI v1 and is deprecated.
* Use &struct gpio_v2_line_request instead.
* Use ABI v2 and &struct gpio_v2_line_request instead.
*/
struct gpiohandle_request {
__u32 lineoffsets[GPIOHANDLES_MAX];
@ -424,12 +426,12 @@ struct gpiohandle_request {
* %GPIOHANDLE_REQUEST_OUTPUT, %GPIOHANDLE_REQUEST_ACTIVE_LOW etc, added
* together
* @default_values: if the %GPIOHANDLE_REQUEST_OUTPUT is set in flags,
* this specifies the default output value, should be 0 (low) or
* 1 (high), anything else than 0 or 1 will be interpreted as 1 (high)
* this specifies the default output value, should be 0 (inactive) or
* 1 (active). Anything other than 0 or 1 will be interpreted as active.
* @padding: reserved for future use and should be zero filled
*
* Note: This struct is part of ABI v1 and is deprecated.
* Use &struct gpio_v2_line_config instead.
* Use ABI v2 and &struct gpio_v2_line_config instead.
*/
struct gpiohandle_config {
__u32 flags;
@ -441,10 +443,11 @@ struct gpiohandle_config {
* struct gpiohandle_data - Information of values on a GPIO handle
* @values: when getting the state of lines this contains the current
* state of a line, when setting the state of lines these should contain
* the desired target state
* the desired target state. States are 0 (inactive) or 1 (active).
* When setting, anything other than 0 or 1 will be interpreted as active.
*
* Note: This struct is part of ABI v1 and is deprecated.
* Use &struct gpio_v2_line_values instead.
* Use ABI v2 and &struct gpio_v2_line_values instead.
*/
struct gpiohandle_data {
__u8 values[GPIOHANDLES_MAX];
@ -465,12 +468,11 @@ struct gpiohandle_data {
* %GPIOEVENT_REQUEST_RISING_EDGE or %GPIOEVENT_REQUEST_FALLING_EDGE
* @consumer_label: a desired consumer label for the selected GPIO line(s)
* such as "my-listener"
* @fd: if successful this field will contain a valid anonymous file handle
* after a %GPIO_GET_LINEEVENT_IOCTL operation, zero or negative value
* means error
* @fd: after a successful %GPIO_GET_LINEEVENT_IOCTL operation, contains a
* valid anonymous file descriptor representing the request
*
* Note: This struct is part of ABI v1 and is deprecated.
* Use &struct gpio_v2_line_request instead.
* Use ABI v2 and &struct gpio_v2_line_request instead.
*/
struct gpioevent_request {
__u32 lineoffset;
@ -489,10 +491,11 @@ struct gpioevent_request {
/**
* struct gpioevent_data - The actual event being pushed to userspace
* @timestamp: best estimate of time of event occurrence, in nanoseconds
* @id: event identifier
* @id: event identifier, one of %GPIOEVENT_EVENT_RISING_EDGE or
* %GPIOEVENT_EVENT_FALLING_EDGE
*
* Note: This struct is part of ABI v1 and is deprecated.
* Use &struct gpio_v2_line_event instead.
* Use ABI v2 and &struct gpio_v2_line_event instead.
*/
struct gpioevent_data {
__u64 timestamp;

View File

@ -1,11 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* Copyright (c) 2022/23 Siemens Mobility GmbH */
#ifndef _LINUX_GSMMUX_H
#define _LINUX_GSMMUX_H
#include <linux/const.h>
#include <linux/if.h>
#include <linux/ioctl.h>
#include <linux/types.h>
/*
* flags definition for n_gsm
*
* Used by:
* struct gsm_config_ext.flags
* struct gsm_dlci_config.flags
*/
/* Forces a DLCI reset if set. Otherwise, a DLCI reset is only done if
* incompatible settings were provided. Always cleared on retrieval.
*/
#define GSM_FL_RESTART _BITUL(0)
/**
* struct gsm_config - n_gsm basic configuration parameters
*
* This structure is used in combination with GSMIOC_GETCONF and GSMIOC_SETCONF
* to retrieve and set the basic parameters of an n_gsm ldisc.
* struct gsm_config_ext can be used to configure extended ldisc parameters.
*
* All timers are in units of 1/100th of a second.
*
* @adaption: Convergence layer type
* @encapsulation: Framing (0 = basic option, 1 = advanced option)
* @initiator: Initiator or responder
* @t1: Acknowledgment timer
* @t2: Response timer for multiplexer control channel
* @t3: Response timer for wake-up procedure
* @n2: Maximum number of retransmissions
* @mru: Maximum incoming frame payload size
* @mtu: Maximum outgoing frame payload size
* @k: Window size
* @i: Frame type (1 = UIH, 2 = UI)
* @unused: Can not be used
*/
struct gsm_config
{
unsigned int adaption;
@ -19,18 +55,32 @@ struct gsm_config
unsigned int mtu;
unsigned int k;
unsigned int i;
unsigned int unused[8]; /* Can not be used */
unsigned int unused[8];
};
#define GSMIOC_GETCONF _IOR('G', 0, struct gsm_config)
#define GSMIOC_SETCONF _IOW('G', 1, struct gsm_config)
/**
* struct gsm_netconfig - n_gsm network configuration parameters
*
* This structure is used in combination with GSMIOC_ENABLE_NET and
* GSMIOC_DISABLE_NET to enable or disable a network data connection
* over a mux virtual tty channel. This is for modems that support
* data connections with raw IP frames instead of PPP.
*
* @adaption: Adaption to use in network mode.
* @protocol: Protocol to use - only ETH_P_IP supported.
* @unused2: Can not be used.
* @if_name: Interface name format string.
* @unused: Can not be used.
*/
struct gsm_netconfig {
unsigned int adaption; /* Adaption to use in network mode */
unsigned short protocol;/* Protocol to use - only ETH_P_IP supported */
unsigned short unused2; /* Can not be used */
char if_name[IFNAMSIZ]; /* interface name format string */
__u8 unused[28]; /* Can not be used */
unsigned int adaption;
unsigned short protocol;
unsigned short unused2;
char if_name[IFNAMSIZ];
__u8 unused[28];
};
#define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig)
@ -39,14 +89,60 @@ struct gsm_netconfig {
/* get the base tty number for a configured gsmmux tty */
#define GSMIOC_GETFIRST _IOR('G', 4, __u32)
/**
* struct gsm_config_ext - n_gsm extended configuration parameters
*
* This structure is used in combination with GSMIOC_GETCONF_EXT and
* GSMIOC_SETCONF_EXT to retrieve and set the extended parameters of an
* n_gsm ldisc.
*
* All timers are in units of 1/100th of a second.
*
* @keep_alive: Control channel keep-alive in 1/100th of a second (0 to disable).
* @wait_config: Wait for DLCI config before opening virtual link?
* @flags: Mux specific flags.
* @reserved: For future use, must be initialized to zero.
*/
struct gsm_config_ext {
__u32 keep_alive; /* Control channel keep-alive in 1/100th of a
* second (0 to disable)
*/
__u32 reserved[7]; /* For future use, must be initialized to zero */
__u32 keep_alive;
__u32 wait_config;
__u32 flags;
__u32 reserved[5];
};
#define GSMIOC_GETCONF_EXT _IOR('G', 5, struct gsm_config_ext)
#define GSMIOC_SETCONF_EXT _IOW('G', 6, struct gsm_config_ext)
/**
* struct gsm_dlci_config - n_gsm channel configuration parameters
*
* This structure is used in combination with GSMIOC_GETCONF_DLCI and
* GSMIOC_SETCONF_DLCI to retrieve and set the channel specific parameters
* of an n_gsm ldisc.
*
* Set the channel accordingly before calling GSMIOC_GETCONF_DLCI.
*
* @channel: DLCI (0 for the associated DLCI).
* @adaption: Convergence layer type.
* @mtu: Maximum transfer unit.
* @priority: Priority (0 for default value).
* @i: Frame type (1 = UIH, 2 = UI).
* @k: Window size (0 for default value).
* @flags: DLCI specific flags.
* @reserved: For future use, must be initialized to zero.
*/
struct gsm_dlci_config {
__u32 channel;
__u32 adaption;
__u32 mtu;
__u32 priority;
__u32 i;
__u32 k;
__u32 flags;
__u32 reserved[7];
};
#define GSMIOC_GETCONF_DLCI _IOWR('G', 7, struct gsm_dlci_config)
#define GSMIOC_SETCONF_DLCI _IOW('G', 8, struct gsm_dlci_config)
#endif

View File

@ -31,8 +31,11 @@ enum gtp_attrs {
GTPA_I_TEI, /* for GTPv1 only */
GTPA_O_TEI, /* for GTPv1 only */
GTPA_PAD,
GTPA_PEER_ADDR6,
GTPA_MS_ADDR6,
GTPA_FAMILY,
__GTPA_MAX,
};
#define GTPA_MAX (__GTPA_MAX + 1)
#define GTPA_MAX (__GTPA_MAX - 1)
#endif /* _LINUX_GTP_H_ */

View File

@ -0,0 +1,74 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/handshake.yaml */
/* YNL-GEN uapi header */
#ifndef _LINUX_HANDSHAKE_H
#define _LINUX_HANDSHAKE_H
#define HANDSHAKE_FAMILY_NAME "handshake"
#define HANDSHAKE_FAMILY_VERSION 1
enum handshake_handler_class {
HANDSHAKE_HANDLER_CLASS_NONE,
HANDSHAKE_HANDLER_CLASS_TLSHD,
HANDSHAKE_HANDLER_CLASS_MAX,
};
enum handshake_msg_type {
HANDSHAKE_MSG_TYPE_UNSPEC,
HANDSHAKE_MSG_TYPE_CLIENTHELLO,
HANDSHAKE_MSG_TYPE_SERVERHELLO,
};
enum handshake_auth {
HANDSHAKE_AUTH_UNSPEC,
HANDSHAKE_AUTH_UNAUTH,
HANDSHAKE_AUTH_PSK,
HANDSHAKE_AUTH_X509,
};
enum {
HANDSHAKE_A_X509_CERT = 1,
HANDSHAKE_A_X509_PRIVKEY,
__HANDSHAKE_A_X509_MAX,
HANDSHAKE_A_X509_MAX = (__HANDSHAKE_A_X509_MAX - 1)
};
enum {
HANDSHAKE_A_ACCEPT_SOCKFD = 1,
HANDSHAKE_A_ACCEPT_HANDLER_CLASS,
HANDSHAKE_A_ACCEPT_MESSAGE_TYPE,
HANDSHAKE_A_ACCEPT_TIMEOUT,
HANDSHAKE_A_ACCEPT_AUTH_MODE,
HANDSHAKE_A_ACCEPT_PEER_IDENTITY,
HANDSHAKE_A_ACCEPT_CERTIFICATE,
HANDSHAKE_A_ACCEPT_PEERNAME,
__HANDSHAKE_A_ACCEPT_MAX,
HANDSHAKE_A_ACCEPT_MAX = (__HANDSHAKE_A_ACCEPT_MAX - 1)
};
enum {
HANDSHAKE_A_DONE_STATUS = 1,
HANDSHAKE_A_DONE_SOCKFD,
HANDSHAKE_A_DONE_REMOTE_AUTH,
__HANDSHAKE_A_DONE_MAX,
HANDSHAKE_A_DONE_MAX = (__HANDSHAKE_A_DONE_MAX - 1)
};
enum {
HANDSHAKE_CMD_READY = 1,
HANDSHAKE_CMD_ACCEPT,
HANDSHAKE_CMD_DONE,
__HANDSHAKE_CMD_MAX,
HANDSHAKE_CMD_MAX = (__HANDSHAKE_CMD_MAX - 1)
};
#define HANDSHAKE_MCGRP_NONE "none"
#define HANDSHAKE_MCGRP_TLSHD "tlshd"
#endif /* _LINUX_HANDSHAKE_H */

View File

@ -35,6 +35,9 @@ enum hash_algo {
HASH_ALGO_SM3_256,
HASH_ALGO_STREEBOG_256,
HASH_ALGO_STREEBOG_512,
HASH_ALGO_SHA3_256,
HASH_ALGO_SHA3_384,
HASH_ALGO_SHA3_512,
HASH_ALGO__LAST
};

View File

@ -22,14 +22,4 @@ enum {
HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
};
enum bp_type_idx {
TYPE_INST = 0,
#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
TYPE_DATA = 0,
#else
TYPE_DATA = 1,
#endif
TYPE_MAX
};
#endif /* _LINUX_HW_BREAKPOINT_H */

View File

@ -112,6 +112,7 @@ struct icmp6hdr {
#define ICMPV6_MOBILE_PREFIX_ADV 147
#define ICMPV6_MRDISC_ADV 151
#define ICMPV6_MRDISC_SOL 152
#define ICMPV6_MSG_MAX 255

View File

@ -26,6 +26,8 @@ enum idxd_scmd_stat {
IDXD_SCMD_WQ_NO_PRIV = 0x800f0000,
IDXD_SCMD_WQ_IRQ_ERR = 0x80100000,
IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000,
IDXD_SCMD_DEV_EVL_ERR = 0x80120000,
IDXD_SCMD_WQ_NO_DRV_NAME = 0x80200000,
};
#define IDXD_SCMD_SOFTERR_MASK 0x80000000
@ -68,12 +70,14 @@ enum dsa_opcode {
DSA_OPCODE_CR_DELTA,
DSA_OPCODE_AP_DELTA,
DSA_OPCODE_DUALCAST,
DSA_OPCODE_TRANSL_FETCH,
DSA_OPCODE_CRCGEN = 0x10,
DSA_OPCODE_COPY_CRC,
DSA_OPCODE_DIF_CHECK,
DSA_OPCODE_DIF_INS,
DSA_OPCODE_DIF_STRP,
DSA_OPCODE_DIF_UPDT,
DSA_OPCODE_DIX_GEN = 0x17,
DSA_OPCODE_CFLUSH = 0x20,
};
@ -128,6 +132,8 @@ enum dsa_completion_status {
DSA_COMP_HW_ERR1,
DSA_COMP_HW_ERR_DRB,
DSA_COMP_TRANSLATION_FAIL,
DSA_COMP_DRAIN_EVL = 0x26,
DSA_COMP_BATCH_EVL_ERR,
};
enum iax_completion_status {
@ -163,6 +169,7 @@ enum iax_completion_status {
#define DSA_COMP_STATUS_MASK 0x7f
#define DSA_COMP_STATUS_WRITE 0x80
#define DSA_COMP_STATUS(status) ((status) & DSA_COMP_STATUS_MASK)
struct dsa_hw_desc {
uint32_t pasid:20;
@ -176,6 +183,8 @@ struct dsa_hw_desc {
uint64_t rdback_addr;
uint64_t pattern;
uint64_t desc_list_addr;
uint64_t pattern_lower;
uint64_t transl_fetch_addr;
};
union {
uint64_t dst_addr;
@ -186,6 +195,7 @@ struct dsa_hw_desc {
union {
uint32_t xfer_size;
uint32_t desc_count;
uint32_t region_size;
};
uint16_t int_handle;
uint16_t rsvd1;
@ -240,6 +250,26 @@ struct dsa_hw_desc {
uint16_t dest_app_tag_seed;
};
/* Fill */
uint64_t pattern_upper;
/* Translation fetch */
struct {
uint64_t transl_fetch_res;
uint32_t region_stride;
};
/* DIX generate */
struct {
uint8_t dix_gen_res;
uint8_t dest_dif_flags;
uint8_t dif_flags;
uint8_t dix_gen_res2[13];
uint32_t ref_tag_seed;
uint16_t app_tag_mask;
uint16_t app_tag_seed;
};
uint8_t op_specific[24];
};
} __attribute__((packed));
@ -280,8 +310,12 @@ struct dsa_completion_record {
uint8_t result;
uint8_t dif_status;
};
uint16_t rsvd;
uint32_t bytes_completed;
uint8_t fault_info;
uint8_t rsvd;
union {
uint32_t bytes_completed;
uint32_t descs_completed;
};
uint64_t fault_addr;
union {
/* common record */
@ -318,6 +352,14 @@ struct dsa_completion_record {
uint16_t dif_upd_dest_app_tag;
};
/* DIX generate */
struct {
uint64_t dix_gen_res;
uint32_t dix_ref_tag;
uint16_t dix_app_tag_mask;
uint16_t dix_app_tag;
};
uint8_t op_specific[16];
};
} __attribute__((packed));
@ -329,7 +371,8 @@ struct dsa_raw_completion_record {
struct iax_completion_record {
__volatile__ uint8_t status;
uint8_t error_code;
uint16_t rsvd;
uint8_t fault_info;
uint8_t rsvd;
uint32_t bytes_completed;
uint64_t fault_addr;
uint32_t invalid_flags;

View File

@ -525,6 +525,7 @@ enum {
BRIDGE_VLANDB_ENTRY_MCAST_ROUTER,
BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS,
BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS,
BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS,
__BRIDGE_VLANDB_ENTRY_MAX,
};
#define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1)
@ -633,6 +634,11 @@ enum {
MDBA_MDB_EATTR_GROUP_MODE,
MDBA_MDB_EATTR_SOURCE,
MDBA_MDB_EATTR_RTPROT,
MDBA_MDB_EATTR_DST,
MDBA_MDB_EATTR_DST_PORT,
MDBA_MDB_EATTR_VNI,
MDBA_MDB_EATTR_IFINDEX,
MDBA_MDB_EATTR_SRC_VNI,
__MDBA_MDB_EATTR_MAX
};
#define MDBA_MDB_EATTR_MAX (__MDBA_MDB_EATTR_MAX - 1)
@ -717,6 +723,24 @@ enum {
};
#define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1)
/* [MDBA_GET_ENTRY] = {
* struct br_mdb_entry
* [MDBA_GET_ENTRY_ATTRS] = {
* [MDBE_ATTR_SOURCE]
* struct in_addr / struct in6_addr
* [MDBE_ATTR_SRC_VNI]
* u32
* }
* }
*/
enum {
MDBA_GET_ENTRY_UNSPEC,
MDBA_GET_ENTRY,
MDBA_GET_ENTRY_ATTRS,
__MDBA_GET_ENTRY_MAX,
};
#define MDBA_GET_ENTRY_MAX (__MDBA_GET_ENTRY_MAX - 1)
/* [MDBA_SET_ENTRY_ATTRS] = {
* [MDBE_ATTR_xxx]
* ...
@ -728,6 +752,12 @@ enum {
MDBE_ATTR_SRC_LIST,
MDBE_ATTR_GROUP_MODE,
MDBE_ATTR_RTPROT,
MDBE_ATTR_DST,
MDBE_ATTR_DST_PORT,
MDBE_ATTR_VNI,
MDBE_ATTR_IFINDEX,
MDBE_ATTR_SRC_VNI,
MDBE_ATTR_STATE_MASK,
__MDBE_ATTR_MAX,
};
#define MDBE_ATTR_MAX (__MDBE_ATTR_MAX - 1)

View File

@ -376,7 +376,7 @@ enum {
IFLA_GSO_IPV4_MAX_SIZE,
IFLA_GRO_IPV4_MAX_SIZE,
IFLA_DPLL_PIN,
__IFLA_MAX
};
@ -459,6 +459,286 @@ enum in6_addr_gen_mode {
/* Bridge section */
/**
* DOC: Bridge enum definition
*
* Please *note* that the timer values in the following section are expected
* in clock_t format, which is seconds multiplied by USER_HZ (generally
* defined as 100).
*
* @IFLA_BR_FORWARD_DELAY
* The bridge forwarding delay is the time spent in LISTENING state
* (before moving to LEARNING) and in LEARNING state (before moving
* to FORWARDING). Only relevant if STP is enabled.
*
* The valid values are between (2 * USER_HZ) and (30 * USER_HZ).
* The default value is (15 * USER_HZ).
*
* @IFLA_BR_HELLO_TIME
* The time between hello packets sent by the bridge, when it is a root
* bridge or a designated bridge. Only relevant if STP is enabled.
*
* The valid values are between (1 * USER_HZ) and (10 * USER_HZ).
* The default value is (2 * USER_HZ).
*
* @IFLA_BR_MAX_AGE
* The hello packet timeout is the time until another bridge in the
* spanning tree is assumed to be dead, after reception of its last hello
* message. Only relevant if STP is enabled.
*
* The valid values are between (6 * USER_HZ) and (40 * USER_HZ).
* The default value is (20 * USER_HZ).
*
* @IFLA_BR_AGEING_TIME
* Configure the bridge's FDB entries aging time. It is the time a MAC
* address will be kept in the FDB after a packet has been received from
* that address. After this time has passed, entries are cleaned up.
* Allow values outside the 802.1 standard specification for special cases:
*
* * 0 - entry never ages (all permanent)
* * 1 - entry disappears (no persistence)
*
* The default value is (300 * USER_HZ).
*
* @IFLA_BR_STP_STATE
* Turn spanning tree protocol on (*IFLA_BR_STP_STATE* > 0) or off
* (*IFLA_BR_STP_STATE* == 0) for this bridge.
*
* The default value is 0 (disabled).
*
* @IFLA_BR_PRIORITY
* Set this bridge's spanning tree priority, used during STP root bridge
* election.
*
* The valid values are between 0 and 65535.
*
* @IFLA_BR_VLAN_FILTERING
* Turn VLAN filtering on (*IFLA_BR_VLAN_FILTERING* > 0) or off
* (*IFLA_BR_VLAN_FILTERING* == 0). When disabled, the bridge will not
* consider the VLAN tag when handling packets.
*
* The default value is 0 (disabled).
*
* @IFLA_BR_VLAN_PROTOCOL
* Set the protocol used for VLAN filtering.
*
* The valid values are 0x8100(802.1Q) or 0x88A8(802.1AD). The default value
* is 0x8100(802.1Q).
*
* @IFLA_BR_GROUP_FWD_MASK
* The group forwarding mask. This is the bitmask that is applied to
* decide whether to forward incoming frames destined to link-local
* addresses (of the form 01:80:C2:00:00:0X).
*
* The default value is 0, which means the bridge does not forward any
* link-local frames coming on this port.
*
* @IFLA_BR_ROOT_ID
* The bridge root id, read only.
*
* @IFLA_BR_BRIDGE_ID
* The bridge id, read only.
*
* @IFLA_BR_ROOT_PORT
* The bridge root port, read only.
*
* @IFLA_BR_ROOT_PATH_COST
* The bridge root path cost, read only.
*
* @IFLA_BR_TOPOLOGY_CHANGE
* The bridge topology change, read only.
*
* @IFLA_BR_TOPOLOGY_CHANGE_DETECTED
* The bridge topology change detected, read only.
*
* @IFLA_BR_HELLO_TIMER
* The bridge hello timer, read only.
*
* @IFLA_BR_TCN_TIMER
* The bridge tcn timer, read only.
*
* @IFLA_BR_TOPOLOGY_CHANGE_TIMER
* The bridge topology change timer, read only.
*
* @IFLA_BR_GC_TIMER
* The bridge gc timer, read only.
*
* @IFLA_BR_GROUP_ADDR
* Set the MAC address of the multicast group this bridge uses for STP.
* The address must be a link-local address in standard Ethernet MAC address
* format. It is an address of the form 01:80:C2:00:00:0X, with X in [0, 4..f].
*
* The default value is 0.
*
* @IFLA_BR_FDB_FLUSH
* Flush bridge's fdb dynamic entries.
*
* @IFLA_BR_MCAST_ROUTER
* Set bridge's multicast router if IGMP snooping is enabled.
* The valid values are:
*
* * 0 - disabled.
* * 1 - automatic (queried).
* * 2 - permanently enabled.
*
* The default value is 1.
*
* @IFLA_BR_MCAST_SNOOPING
* Turn multicast snooping on (*IFLA_BR_MCAST_SNOOPING* > 0) or off
* (*IFLA_BR_MCAST_SNOOPING* == 0).
*
* The default value is 1.
*
* @IFLA_BR_MCAST_QUERY_USE_IFADDR
* If enabled use the bridge's own IP address as source address for IGMP
* queries (*IFLA_BR_MCAST_QUERY_USE_IFADDR* > 0) or the default of 0.0.0.0
* (*IFLA_BR_MCAST_QUERY_USE_IFADDR* == 0).
*
* The default value is 0 (disabled).
*
* @IFLA_BR_MCAST_QUERIER
* Enable (*IFLA_BR_MULTICAST_QUERIER* > 0) or disable
* (*IFLA_BR_MULTICAST_QUERIER* == 0) IGMP querier, ie sending of multicast
* queries by the bridge.
*
* The default value is 0 (disabled).
*
* @IFLA_BR_MCAST_HASH_ELASTICITY
* Set multicast database hash elasticity, It is the maximum chain length in
* the multicast hash table. This attribute is *deprecated* and the value
* is always 16.
*
* @IFLA_BR_MCAST_HASH_MAX
* Set maximum size of the multicast hash table
*
* The default value is 4096, the value must be a power of 2.
*
* @IFLA_BR_MCAST_LAST_MEMBER_CNT
* The Last Member Query Count is the number of Group-Specific Queries
* sent before the router assumes there are no local members. The Last
* Member Query Count is also the number of Group-and-Source-Specific
* Queries sent before the router assumes there are no listeners for a
* particular source.
*
* The default value is 2.
*
* @IFLA_BR_MCAST_STARTUP_QUERY_CNT
* The Startup Query Count is the number of Queries sent out on startup,
* separated by the Startup Query Interval.
*
* The default value is 2.
*
* @IFLA_BR_MCAST_LAST_MEMBER_INTVL
* The Last Member Query Interval is the Max Response Time inserted into
* Group-Specific Queries sent in response to Leave Group messages, and
* is also the amount of time between Group-Specific Query messages.
*
* The default value is (1 * USER_HZ).
*
* @IFLA_BR_MCAST_MEMBERSHIP_INTVL
* The interval after which the bridge will leave a group, if no membership
* reports for this group are received.
*
* The default value is (260 * USER_HZ).
*
* @IFLA_BR_MCAST_QUERIER_INTVL
* The interval between queries sent by other routers. if no queries are
* seen after this delay has passed, the bridge will start to send its own
* queries (as if *IFLA_BR_MCAST_QUERIER_INTVL* was enabled).
*
* The default value is (255 * USER_HZ).
*
* @IFLA_BR_MCAST_QUERY_INTVL
* The Query Interval is the interval between General Queries sent by
* the Querier.
*
* The default value is (125 * USER_HZ). The minimum value is (1 * USER_HZ).
*
* @IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
* The Max Response Time used to calculate the Max Resp Code inserted
* into the periodic General Queries.
*
* The default value is (10 * USER_HZ).
*
* @IFLA_BR_MCAST_STARTUP_QUERY_INTVL
* The interval between queries in the startup phase.
*
* The default value is (125 * USER_HZ) / 4. The minimum value is (1 * USER_HZ).
*
* @IFLA_BR_NF_CALL_IPTABLES
* Enable (*NF_CALL_IPTABLES* > 0) or disable (*NF_CALL_IPTABLES* == 0)
* iptables hooks on the bridge.
*
* The default value is 0 (disabled).
*
* @IFLA_BR_NF_CALL_IP6TABLES
* Enable (*NF_CALL_IP6TABLES* > 0) or disable (*NF_CALL_IP6TABLES* == 0)
* ip6tables hooks on the bridge.
*
* The default value is 0 (disabled).
*
* @IFLA_BR_NF_CALL_ARPTABLES
* Enable (*NF_CALL_ARPTABLES* > 0) or disable (*NF_CALL_ARPTABLES* == 0)
* arptables hooks on the bridge.
*
* The default value is 0 (disabled).
*
* @IFLA_BR_VLAN_DEFAULT_PVID
* VLAN ID applied to untagged and priority-tagged incoming packets.
*
* The default value is 1. Setting to the special value 0 makes all ports of
* this bridge not have a PVID by default, which means that they will
* not accept VLAN-untagged traffic.
*
* @IFLA_BR_PAD
* Bridge attribute padding type for netlink message.
*
* @IFLA_BR_VLAN_STATS_ENABLED
* Enable (*IFLA_BR_VLAN_STATS_ENABLED* == 1) or disable
* (*IFLA_BR_VLAN_STATS_ENABLED* == 0) per-VLAN stats accounting.
*
* The default value is 0 (disabled).
*
* @IFLA_BR_MCAST_STATS_ENABLED
* Enable (*IFLA_BR_MCAST_STATS_ENABLED* > 0) or disable
* (*IFLA_BR_MCAST_STATS_ENABLED* == 0) multicast (IGMP/MLD) stats
* accounting.
*
* The default value is 0 (disabled).
*
* @IFLA_BR_MCAST_IGMP_VERSION
* Set the IGMP version.
*
* The valid values are 2 and 3. The default value is 2.
*
* @IFLA_BR_MCAST_MLD_VERSION
* Set the MLD version.
*
* The valid values are 1 and 2. The default value is 1.
*
* @IFLA_BR_VLAN_STATS_PER_PORT
* Enable (*IFLA_BR_VLAN_STATS_PER_PORT* == 1) or disable
* (*IFLA_BR_VLAN_STATS_PER_PORT* == 0) per-VLAN per-port stats accounting.
* Can be changed only when there are no port VLANs configured.
*
* The default value is 0 (disabled).
*
* @IFLA_BR_MULTI_BOOLOPT
* The multi_boolopt is used to control new boolean options to avoid adding
* new netlink attributes. You can look at ``enum br_boolopt_id`` for those
* options.
*
* @IFLA_BR_MCAST_QUERIER_STATE
* Bridge mcast querier states, read only.
*
* @IFLA_BR_FDB_N_LEARNED
* The number of dynamically learned FDB entries for the current bridge,
* read only.
*
* @IFLA_BR_FDB_MAX_LEARNED
* Set the number of max dynamically learned FDB entries for the current
* bridge.
*/
enum {
IFLA_BR_UNSPEC,
IFLA_BR_FORWARD_DELAY,
@ -508,6 +788,8 @@ enum {
IFLA_BR_VLAN_STATS_PER_PORT,
IFLA_BR_MULTI_BOOLOPT,
IFLA_BR_MCAST_QUERIER_STATE,
IFLA_BR_FDB_N_LEARNED,
IFLA_BR_FDB_MAX_LEARNED,
__IFLA_BR_MAX,
};
@ -518,11 +800,252 @@ struct ifla_bridge_id {
__u8 addr[6]; /* ETH_ALEN */
};
/**
* DOC: Bridge mode enum definition
*
* @BRIDGE_MODE_HAIRPIN
* Controls whether traffic may be sent back out of the port on which it
* was received. This option is also called reflective relay mode, and is
* used to support basic VEPA (Virtual Ethernet Port Aggregator)
* capabilities. By default, this flag is turned off and the bridge will
* not forward traffic back out of the receiving port.
*/
enum {
BRIDGE_MODE_UNSPEC,
BRIDGE_MODE_HAIRPIN,
};
/**
* DOC: Bridge port enum definition
*
* @IFLA_BRPORT_STATE
* The operation state of the port. Here are the valid values.
*
* * 0 - port is in STP *DISABLED* state. Make this port completely
* inactive for STP. This is also called BPDU filter and could be used
* to disable STP on an untrusted port, like a leaf virtual device.
* The traffic forwarding is also stopped on this port.
* * 1 - port is in STP *LISTENING* state. Only valid if STP is enabled
* on the bridge. In this state the port listens for STP BPDUs and
* drops all other traffic frames.
* * 2 - port is in STP *LEARNING* state. Only valid if STP is enabled on
* the bridge. In this state the port will accept traffic only for the
* purpose of updating MAC address tables.
* * 3 - port is in STP *FORWARDING* state. Port is fully active.
* * 4 - port is in STP *BLOCKING* state. Only valid if STP is enabled on
* the bridge. This state is used during the STP election process.
* In this state, port will only process STP BPDUs.
*
* @IFLA_BRPORT_PRIORITY
* The STP port priority. The valid values are between 0 and 255.
*
* @IFLA_BRPORT_COST
* The STP path cost of the port. The valid values are between 1 and 65535.
*
* @IFLA_BRPORT_MODE
* Set the bridge port mode. See *BRIDGE_MODE_HAIRPIN* for more details.
*
* @IFLA_BRPORT_GUARD
* Controls whether STP BPDUs will be processed by the bridge port. By
* default, the flag is turned off to allow BPDU processing. Turning this
* flag on will disable the bridge port if a STP BPDU packet is received.
*
* If the bridge has Spanning Tree enabled, hostile devices on the network
* may send BPDU on a port and cause network failure. Setting *guard on*
* will detect and stop this by disabling the port. The port will be
* restarted if the link is brought down, or removed and reattached.
*
* @IFLA_BRPORT_PROTECT
* Controls whether a given port is allowed to become a root port or not.
* Only used when STP is enabled on the bridge. By default the flag is off.
*
* This feature is also called root port guard. If BPDU is received from a
* leaf (edge) port, it should not be elected as root port. This could
* be used if using STP on a bridge and the downstream bridges are not fully
* trusted; this prevents a hostile guest from rerouting traffic.
*
* @IFLA_BRPORT_FAST_LEAVE
* This flag allows the bridge to immediately stop multicast traffic
* forwarding on a port that receives an IGMP Leave message. It is only used
* when IGMP snooping is enabled on the bridge. By default the flag is off.
*
* @IFLA_BRPORT_LEARNING
* Controls whether a given port will learn *source* MAC addresses from
* received traffic or not. Also controls whether dynamic FDB entries
* (which can also be added by software) will be refreshed by incoming
* traffic. By default this flag is on.
*
* @IFLA_BRPORT_UNICAST_FLOOD
* Controls whether unicast traffic for which there is no FDB entry will
* be flooded towards this port. By default this flag is on.
*
* @IFLA_BRPORT_PROXYARP
* Enable proxy ARP on this port.
*
* @IFLA_BRPORT_LEARNING_SYNC
* Controls whether a given port will sync MAC addresses learned on device
* port to bridge FDB.
*
* @IFLA_BRPORT_PROXYARP_WIFI
* Enable proxy ARP on this port which meets extended requirements by
* IEEE 802.11 and Hotspot 2.0 specifications.
*
* @IFLA_BRPORT_ROOT_ID
*
* @IFLA_BRPORT_BRIDGE_ID
*
* @IFLA_BRPORT_DESIGNATED_PORT
*
* @IFLA_BRPORT_DESIGNATED_COST
*
* @IFLA_BRPORT_ID
*
* @IFLA_BRPORT_NO
*
* @IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
*
* @IFLA_BRPORT_CONFIG_PENDING
*
* @IFLA_BRPORT_MESSAGE_AGE_TIMER
*
* @IFLA_BRPORT_FORWARD_DELAY_TIMER
*
* @IFLA_BRPORT_HOLD_TIMER
*
* @IFLA_BRPORT_FLUSH
* Flush bridge ports' fdb dynamic entries.
*
* @IFLA_BRPORT_MULTICAST_ROUTER
* Configure the port's multicast router presence. A port with
* a multicast router will receive all multicast traffic.
* The valid values are:
*
* * 0 disable multicast routers on this port
* * 1 let the system detect the presence of routers (default)
* * 2 permanently enable multicast traffic forwarding on this port
* * 3 enable multicast routers temporarily on this port, not depending
* on incoming queries.
*
* @IFLA_BRPORT_PAD
*
* @IFLA_BRPORT_MCAST_FLOOD
* Controls whether a given port will flood multicast traffic for which
* there is no MDB entry. By default this flag is on.
*
* @IFLA_BRPORT_MCAST_TO_UCAST
* Controls whether a given port will replicate packets using unicast
* instead of multicast. By default this flag is off.
*
* This is done by copying the packet per host and changing the multicast
* destination MAC to a unicast one accordingly.
*
* *mcast_to_unicast* works on top of the multicast snooping feature of the
* bridge. Which means unicast copies are only delivered to hosts which
* are interested in unicast and signaled this via IGMP/MLD reports previously.
*
* This feature is intended for interface types which have a more reliable
* and/or efficient way to deliver unicast packets than broadcast ones
* (e.g. WiFi).
*
* However, it should only be enabled on interfaces where no IGMPv2/MLDv1
* report suppression takes place. IGMP/MLD report suppression issue is
* usually overcome by the network daemon (supplicant) enabling AP isolation
* and by that separating all STAs.
*
* Delivery of STA-to-STA IP multicast is made possible again by enabling
* and utilizing the bridge hairpin mode, which considers the incoming port
* as a potential outgoing port, too (see *BRIDGE_MODE_HAIRPIN* option).
* Hairpin mode is performed after multicast snooping, therefore leading
* to only deliver reports to STAs running a multicast router.
*
* @IFLA_BRPORT_VLAN_TUNNEL
* Controls whether vlan to tunnel mapping is enabled on the port.
* By default this flag is off.
*
* @IFLA_BRPORT_BCAST_FLOOD
* Controls flooding of broadcast traffic on the given port. By default
* this flag is on.
*
* @IFLA_BRPORT_GROUP_FWD_MASK
* Set the group forward mask. This is a bitmask that is applied to
* decide whether to forward incoming frames destined to link-local
* addresses. The addresses of the form are 01:80:C2:00:00:0X (defaults
* to 0, which means the bridge does not forward any link-local frames
* coming on this port).
*
* @IFLA_BRPORT_NEIGH_SUPPRESS
* Controls whether neighbor discovery (arp and nd) proxy and suppression
* is enabled on the port. By default this flag is off.
*
* @IFLA_BRPORT_ISOLATED
* Controls whether a given port will be isolated, which means it will be
* able to communicate with non-isolated ports only. By default this
* flag is off.
*
* @IFLA_BRPORT_BACKUP_PORT
* Set a backup port. If the port loses carrier all traffic will be
* redirected to the configured backup port. Set the value to 0 to disable
* it.
*
* @IFLA_BRPORT_MRP_RING_OPEN
*
* @IFLA_BRPORT_MRP_IN_OPEN
*
* @IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT
* The number of per-port EHT hosts limit. The default value is 512.
* Setting to 0 is not allowed.
*
* @IFLA_BRPORT_MCAST_EHT_HOSTS_CNT
* The current number of tracked hosts, read only.
*
* @IFLA_BRPORT_LOCKED
* Controls whether a port will be locked, meaning that hosts behind the
* port will not be able to communicate through the port unless an FDB
* entry with the unit's MAC address is in the FDB. The common use case is
* that hosts are allowed access through authentication with the IEEE 802.1X
* protocol or based on whitelists. By default this flag is off.
*
* Please note that secure 802.1X deployments should always use the
* *BR_BOOLOPT_NO_LL_LEARN* flag, to not permit the bridge to populate its
* FDB based on link-local (EAPOL) traffic received on the port.
*
* @IFLA_BRPORT_MAB
* Controls whether a port will use MAC Authentication Bypass (MAB), a
* technique through which select MAC addresses may be allowed on a locked
* port, without using 802.1X authentication. Packets with an unknown source
* MAC address generates a "locked" FDB entry on the incoming bridge port.
* The common use case is for user space to react to these bridge FDB
* notifications and optionally replace the locked FDB entry with a normal
* one, allowing traffic to pass for whitelisted MAC addresses.
*
* Setting this flag also requires *IFLA_BRPORT_LOCKED* and
* *IFLA_BRPORT_LEARNING*. *IFLA_BRPORT_LOCKED* ensures that unauthorized
* data packets are dropped, and *IFLA_BRPORT_LEARNING* allows the dynamic
* FDB entries installed by user space (as replacements for the locked FDB
* entries) to be refreshed and/or aged out.
*
* @IFLA_BRPORT_MCAST_N_GROUPS
*
* @IFLA_BRPORT_MCAST_MAX_GROUPS
* Sets the maximum number of MDB entries that can be registered for a
* given port. Attempts to register more MDB entries at the port than this
* limit allows will be rejected, whether they are done through netlink
* (e.g. the bridge tool), or IGMP or MLD membership reports. Setting a
* limit of 0 disables the limit. The default value is 0.
*
* @IFLA_BRPORT_NEIGH_VLAN_SUPPRESS
* Controls whether neighbor discovery (arp and nd) proxy and suppression is
* enabled for a given port. By default this flag is off.
*
* Note that this option only takes effect when *IFLA_BRPORT_NEIGH_SUPPRESS*
* is enabled for a given port.
*
* @IFLA_BRPORT_BACKUP_NHID
* The FDB nexthop object ID to attach to packets being redirected to a
* backup port that has VLAN tunnel mapping enabled (via the
* *IFLA_BRPORT_VLAN_TUNNEL* option). Setting a value of 0 (default) has
* the effect of not attaching any ID.
*/
enum {
IFLA_BRPORT_UNSPEC,
IFLA_BRPORT_STATE, /* Spanning tree state */
@ -567,6 +1090,8 @@ enum {
IFLA_BRPORT_MAB,
IFLA_BRPORT_MCAST_N_GROUPS,
IFLA_BRPORT_MCAST_MAX_GROUPS,
IFLA_BRPORT_NEIGH_VLAN_SUPPRESS,
IFLA_BRPORT_BACKUP_NHID,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@ -633,6 +1158,7 @@ enum {
IFLA_MACVLAN_MACADDR_COUNT,
IFLA_MACVLAN_BC_QUEUE_LEN,
IFLA_MACVLAN_BC_QUEUE_LEN_USED,
IFLA_MACVLAN_BC_CUTOFF,
__IFLA_MACVLAN_MAX,
};
@ -751,6 +1277,30 @@ struct tunnel_msg {
__u32 ifindex;
};
/* netkit section */
enum netkit_action {
NETKIT_NEXT = -1,
NETKIT_PASS = 0,
NETKIT_DROP = 2,
NETKIT_REDIRECT = 7,
};
enum netkit_mode {
NETKIT_L2,
NETKIT_L3,
};
enum {
IFLA_NETKIT_UNSPEC,
IFLA_NETKIT_PEER_INFO,
IFLA_NETKIT_PRIMARY,
IFLA_NETKIT_POLICY,
IFLA_NETKIT_PEER_POLICY,
IFLA_NETKIT_MODE,
__IFLA_NETKIT_MAX,
};
#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1)
/* VXLAN section */
/* include statistics in the dump */
@ -824,6 +1374,8 @@ enum {
IFLA_VXLAN_TTL_INHERIT,
IFLA_VXLAN_DF,
IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */
IFLA_VXLAN_LOCALBYPASS,
IFLA_VXLAN_LABEL_POLICY, /* IPv6 flow label policy; ifla_vxlan_label_policy */
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@ -841,6 +1393,13 @@ enum ifla_vxlan_df {
VXLAN_DF_MAX = __VXLAN_DF_END - 1,
};
enum ifla_vxlan_label_policy {
VXLAN_LABEL_FIXED = 0,
VXLAN_LABEL_INHERIT = 1,
__VXLAN_LABEL_END,
VXLAN_LABEL_MAX = __VXLAN_LABEL_END - 1,
};
/* GENEVE section */
enum {
IFLA_GENEVE_UNSPEC,
@ -905,6 +1464,8 @@ enum {
IFLA_GTP_ROLE,
IFLA_GTP_CREATE_SOCKETS,
IFLA_GTP_RESTART_COUNT,
IFLA_GTP_LOCAL,
IFLA_GTP_LOCAL6,
__IFLA_GTP_MAX,
};
#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
@ -944,6 +1505,7 @@ enum {
IFLA_BOND_AD_LACP_ACTIVE,
IFLA_BOND_MISSED_MAX,
IFLA_BOND_NS_IP6_TARGET,
IFLA_BOND_COUPLED_CONTROL,
__IFLA_BOND_MAX,
};
@ -1209,6 +1771,7 @@ enum {
IFLA_HSR_PROTOCOL, /* Indicate different protocol than
* HSR. For example PRP.
*/
IFLA_HSR_INTERLINK, /* HSR interlink network device */
__IFLA_HSR_MAX,
};
@ -1386,7 +1949,9 @@ enum {
enum {
IFLA_DSA_UNSPEC,
IFLA_DSA_MASTER,
IFLA_DSA_CONDUIT,
/* Deprecated, use IFLA_DSA_CONDUIT instead */
IFLA_DSA_MASTER = IFLA_DSA_CONDUIT,
__IFLA_DSA_MAX,
};

View File

@ -59,6 +59,7 @@ struct sockaddr_ll {
#define PACKET_ROLLOVER_STATS 21
#define PACKET_FANOUT_DATA 22
#define PACKET_IGNORE_OUTGOING 23
#define PACKET_VNET_HDR_SZ 24
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1

View File

@ -1,23 +1,69 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* include/linux/if_team.h - Network team device driver header
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/team.yaml */
/* YNL-GEN uapi header */
#ifndef _LINUX_IF_TEAM_H_
#define _LINUX_IF_TEAM_H_
#ifndef _LINUX_IF_TEAM_H
#define _LINUX_IF_TEAM_H
#define TEAM_GENL_NAME "team"
#define TEAM_GENL_VERSION 1
#define TEAM_STRING_MAX_LEN 32
#define TEAM_STRING_MAX_LEN 32
#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event"
/**********************************
* NETLINK_GENERIC netlink family.
**********************************/
enum {
TEAM_ATTR_UNSPEC,
TEAM_ATTR_TEAM_IFINDEX,
TEAM_ATTR_LIST_OPTION,
TEAM_ATTR_LIST_PORT,
__TEAM_ATTR_MAX,
TEAM_ATTR_MAX = (__TEAM_ATTR_MAX - 1)
};
enum {
TEAM_ATTR_ITEM_OPTION_UNSPEC,
TEAM_ATTR_ITEM_OPTION,
__TEAM_ATTR_ITEM_OPTION_MAX,
TEAM_ATTR_ITEM_OPTION_MAX = (__TEAM_ATTR_ITEM_OPTION_MAX - 1)
};
enum {
TEAM_ATTR_OPTION_UNSPEC,
TEAM_ATTR_OPTION_NAME,
TEAM_ATTR_OPTION_CHANGED,
TEAM_ATTR_OPTION_TYPE,
TEAM_ATTR_OPTION_DATA,
TEAM_ATTR_OPTION_REMOVED,
TEAM_ATTR_OPTION_PORT_IFINDEX,
TEAM_ATTR_OPTION_ARRAY_INDEX,
__TEAM_ATTR_OPTION_MAX,
TEAM_ATTR_OPTION_MAX = (__TEAM_ATTR_OPTION_MAX - 1)
};
enum {
TEAM_ATTR_ITEM_PORT_UNSPEC,
TEAM_ATTR_ITEM_PORT,
__TEAM_ATTR_ITEM_PORT_MAX,
TEAM_ATTR_ITEM_PORT_MAX = (__TEAM_ATTR_ITEM_PORT_MAX - 1)
};
enum {
TEAM_ATTR_PORT_UNSPEC,
TEAM_ATTR_PORT_IFINDEX,
TEAM_ATTR_PORT_CHANGED,
TEAM_ATTR_PORT_LINKUP,
TEAM_ATTR_PORT_SPEED,
TEAM_ATTR_PORT_DUPLEX,
TEAM_ATTR_PORT_REMOVED,
__TEAM_ATTR_PORT_MAX,
TEAM_ATTR_PORT_MAX = (__TEAM_ATTR_PORT_MAX - 1)
};
enum {
TEAM_CMD_NOOP,
@ -26,83 +72,7 @@ enum {
TEAM_CMD_PORT_LIST_GET,
__TEAM_CMD_MAX,
TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1),
TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1)
};
enum {
TEAM_ATTR_UNSPEC,
TEAM_ATTR_TEAM_IFINDEX, /* u32 */
TEAM_ATTR_LIST_OPTION, /* nest */
TEAM_ATTR_LIST_PORT, /* nest */
__TEAM_ATTR_MAX,
TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1,
};
/* Nested layout of get/set msg:
*
* [TEAM_ATTR_LIST_OPTION]
* [TEAM_ATTR_ITEM_OPTION]
* [TEAM_ATTR_OPTION_*], ...
* [TEAM_ATTR_ITEM_OPTION]
* [TEAM_ATTR_OPTION_*], ...
* ...
* [TEAM_ATTR_LIST_PORT]
* [TEAM_ATTR_ITEM_PORT]
* [TEAM_ATTR_PORT_*], ...
* [TEAM_ATTR_ITEM_PORT]
* [TEAM_ATTR_PORT_*], ...
* ...
*/
enum {
TEAM_ATTR_ITEM_OPTION_UNSPEC,
TEAM_ATTR_ITEM_OPTION, /* nest */
__TEAM_ATTR_ITEM_OPTION_MAX,
TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1,
};
enum {
TEAM_ATTR_OPTION_UNSPEC,
TEAM_ATTR_OPTION_NAME, /* string */
TEAM_ATTR_OPTION_CHANGED, /* flag */
TEAM_ATTR_OPTION_TYPE, /* u8 */
TEAM_ATTR_OPTION_DATA, /* dynamic */
TEAM_ATTR_OPTION_REMOVED, /* flag */
TEAM_ATTR_OPTION_PORT_IFINDEX, /* u32 */ /* for per-port options */
TEAM_ATTR_OPTION_ARRAY_INDEX, /* u32 */ /* for array options */
__TEAM_ATTR_OPTION_MAX,
TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
};
enum {
TEAM_ATTR_ITEM_PORT_UNSPEC,
TEAM_ATTR_ITEM_PORT, /* nest */
__TEAM_ATTR_ITEM_PORT_MAX,
TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1,
};
enum {
TEAM_ATTR_PORT_UNSPEC,
TEAM_ATTR_PORT_IFINDEX, /* u32 */
TEAM_ATTR_PORT_CHANGED, /* flag */
TEAM_ATTR_PORT_LINKUP, /* flag */
TEAM_ATTR_PORT_SPEED, /* u32 */
TEAM_ATTR_PORT_DUPLEX, /* u8 */
TEAM_ATTR_PORT_REMOVED, /* flag */
__TEAM_ATTR_PORT_MAX,
TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1,
};
/*
* NETLINK_GENERIC related info
*/
#define TEAM_GENL_NAME "team"
#define TEAM_GENL_VERSION 0x1
#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event"
#endif /* _LINUX_IF_TEAM_H_ */
#endif /* _LINUX_IF_TEAM_H */

View File

@ -161,6 +161,13 @@ enum {
#define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1)
/* Historically, tunnel flags have been defined as __be16 and now there are
* no free bits left. It is strongly advised to switch the already existing
* userspace code to the new *_BIT definitions from down below, as __be16
* can't be simply cast to a wider type on LE systems. All new flags and
* code must use *_BIT only.
*/
#define TUNNEL_CSUM __cpu_to_be16(0x01)
#define TUNNEL_ROUTING __cpu_to_be16(0x02)
#define TUNNEL_KEY __cpu_to_be16(0x04)
@ -182,4 +189,31 @@ enum {
(TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT | \
TUNNEL_GTP_OPT)
enum {
IP_TUNNEL_CSUM_BIT = 0U,
IP_TUNNEL_ROUTING_BIT,
IP_TUNNEL_KEY_BIT,
IP_TUNNEL_SEQ_BIT,
IP_TUNNEL_STRICT_BIT,
IP_TUNNEL_REC_BIT,
IP_TUNNEL_VERSION_BIT,
IP_TUNNEL_NO_KEY_BIT,
IP_TUNNEL_DONT_FRAGMENT_BIT,
IP_TUNNEL_OAM_BIT,
IP_TUNNEL_CRIT_OPT_BIT,
IP_TUNNEL_GENEVE_OPT_BIT, /* OPTIONS_PRESENT */
IP_TUNNEL_VXLAN_OPT_BIT, /* OPTIONS_PRESENT */
IP_TUNNEL_NOCACHE_BIT,
IP_TUNNEL_ERSPAN_OPT_BIT, /* OPTIONS_PRESENT */
IP_TUNNEL_GTP_OPT_BIT, /* OPTIONS_PRESENT */
IP_TUNNEL_VTI_BIT,
IP_TUNNEL_SIT_ISATAP_BIT = IP_TUNNEL_VTI_BIT,
/* Flags starting from here are not available via the old UAPI */
IP_TUNNEL_PFCP_OPT_BIT, /* OPTIONS_PRESENT */
__IP_TUNNEL_FLAG_NUM,
};
#endif /* _IF_TUNNEL_H_ */

View File

@ -25,9 +25,21 @@
* application.
*/
#define XDP_USE_NEED_WAKEUP (1 << 3)
/* By setting this option, userspace application indicates that it can
* handle multiple descriptors per packet thus enabling AF_XDP to split
* multi-buffer XDP frames into multiple Rx descriptors. Without this set
* such frames will be dropped.
*/
#define XDP_USE_SG (1 << 4)
/* Flags for xsk_umem_config flags */
#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
/* Force checksum calculation in software. Can be used for testing or
* working around potential HW issues. This option causes performance
* degradation and only works in XDP_COPY mode.
*/
#define XDP_UMEM_TX_SW_CSUM (1 << 1)
struct sockaddr_xdp {
__u16 sxdp_family;
@ -70,6 +82,7 @@ struct xdp_umem_reg {
__u32 chunk_size;
__u32 headroom;
__u32 flags;
__u32 tx_metadata_len;
};
struct xdp_statistics {
@ -99,6 +112,41 @@ struct xdp_options {
#define XSK_UNALIGNED_BUF_ADDR_MASK \
((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
/* Request transmit timestamp. Upon completion, put it into tx_timestamp
* field of struct xsk_tx_metadata.
*/
#define XDP_TXMD_FLAGS_TIMESTAMP (1 << 0)
/* Request transmit checksum offload. Checksum start position and offset
* are communicated via csum_start and csum_offset fields of struct
* xsk_tx_metadata.
*/
#define XDP_TXMD_FLAGS_CHECKSUM (1 << 1)
/* AF_XDP offloads request. 'request' union member is consumed by the driver
* when the packet is being transmitted. 'completion' union member is
* filled by the driver when the transmit completion arrives.
*/
struct xsk_tx_metadata {
__u64 flags;
union {
struct {
/* XDP_TXMD_FLAGS_CHECKSUM */
/* Offset from desc->addr where checksumming should start. */
__u16 csum_start;
/* Offset from csum_start where checksum should be stored. */
__u16 csum_offset;
} request;
struct {
/* XDP_TXMD_FLAGS_TIMESTAMP */
__u64 tx_timestamp;
} completion;
};
};
/* Rx/Tx descriptor */
struct xdp_desc {
__u64 addr;
@ -108,4 +156,14 @@ struct xdp_desc {
/* UMEM descriptor is __u64 */
/* Flag indicating that the packet continues with the buffer pointed out by the
* next frame in the ring. The end of the packet is signalled by setting this
* bit to zero. For single buffer packets, every descriptor has 'options' set
* to 0 and this maintains backward compatibility.
*/
#define XDP_PKT_CONTD (1 << 0)
/* TX packet carries valid metadata. */
#define XDP_TX_METADATA (1 << 1)
#endif /* _LINUX_IF_XDP_H */

View File

@ -47,6 +47,10 @@ enum iio_chan_type {
IIO_POSITIONRELATIVE,
IIO_PHASE,
IIO_MASSCONCENTRATION,
IIO_DELTA_ANGL,
IIO_DELTA_VELOCITY,
IIO_COLORTEMP,
IIO_CHROMATICITY,
};
enum iio_modifier {
@ -101,6 +105,8 @@ enum iio_modifier {
IIO_MOD_PITCH,
IIO_MOD_YAW,
IIO_MOD_ROLL,
IIO_MOD_LIGHT_UVA,
IIO_MOD_LIGHT_UVB,
};
enum iio_event_type {

View File

@ -145,7 +145,7 @@ struct in6_flowlabel_req {
#define IPV6_TLV_PADN 1
#define IPV6_TLV_ROUTERALERT 5
#define IPV6_TLV_CALIPSO 7 /* RFC 5570 */
#define IPV6_TLV_IOAM 49 /* TEMPORARY IANA allocation for IOAM */
#define IPV6_TLV_IOAM 49 /* RFC 9486 */
#define IPV6_TLV_JUMBO 194
#define IPV6_TLV_HAO 201 /* home address option */

View File

@ -30,8 +30,8 @@ struct inotify_event {
#define IN_ACCESS 0x00000001 /* File was accessed */
#define IN_MODIFY 0x00000002 /* File was modified */
#define IN_ATTRIB 0x00000004 /* Metadata changed */
#define IN_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
#define IN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
#define IN_CLOSE_WRITE 0x00000008 /* Writable file was closed */
#define IN_CLOSE_NOWRITE 0x00000010 /* Unwritable file closed */
#define IN_OPEN 0x00000020 /* File was opened */
#define IN_MOVED_FROM 0x00000040 /* File was moved from X */
#define IN_MOVED_TO 0x00000080 /* File was moved to Y */

View File

@ -602,6 +602,7 @@
#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
#define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */
#define KEY_REFRESH_RATE_TOGGLE 0x232 /* Display refresh rate toggle */
#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */
#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */
@ -617,6 +618,8 @@
#define KEY_CAMERA_ACCESS_ENABLE 0x24b /* Enables programmatic access to camera devices. (HUTRR72) */
#define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */
#define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */
#define KEY_ACCESSIBILITY 0x24e /* Toggles the system bound accessibility UI/command (HUTRR116) */
#define KEY_DO_NOT_DISTURB 0x24f /* Toggles the system-wide "Do Not Disturb" control (HUTRR94)*/
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */

View File

@ -43,6 +43,10 @@ struct io_uring_sqe {
union {
__u64 addr; /* pointer to buffer or iovecs */
__u64 splice_off_in;
struct {
__u32 level;
__u32 optname;
};
};
__u32 len; /* buffer size or number of iovecs */
union {
@ -65,6 +69,10 @@ struct io_uring_sqe {
__u32 xattr_flags;
__u32 msg_ring_flags;
__u32 uring_cmd_flags;
__u32 waitid_flags;
__u32 futex_flags;
__u32 install_fd_flags;
__u32 nop_flags;
};
__u64 user_data; /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
@ -79,6 +87,7 @@ struct io_uring_sqe {
union {
__s32 splice_fd_in;
__u32 file_index;
__u32 optlen;
struct {
__u16 addr_len;
__u16 __pad3[1];
@ -89,6 +98,7 @@ struct io_uring_sqe {
__u64 addr3;
__u64 __pad2[1];
};
__u64 optval;
/*
* If the ring is initialized with IORING_SETUP_SQE128, then
* this field is used for 80 bytes of arbitrary command data
@ -106,7 +116,7 @@ struct io_uring_sqe {
*/
#define IORING_FILE_INDEX_ALLOC (~0U)
enum {
enum io_uring_sqe_flags_bit {
IOSQE_FIXED_FILE_BIT,
IOSQE_IO_DRAIN_BIT,
IOSQE_IO_LINK_BIT,
@ -173,6 +183,23 @@ enum {
*/
#define IORING_SETUP_DEFER_TASKRUN (1U << 13)
/*
* Application provides the memory for the rings
*/
#define IORING_SETUP_NO_MMAP (1U << 14)
/*
* Register the ring fd in itself for use with
* IORING_REGISTER_USE_REGISTERED_RING; return a registered fd index rather
* than an fd.
*/
#define IORING_SETUP_REGISTERED_FD_ONLY (1U << 15)
/*
* Removes indirection through the SQ index array.
*/
#define IORING_SETUP_NO_SQARRAY (1U << 16)
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@ -223,17 +250,25 @@ enum io_uring_op {
IORING_OP_URING_CMD,
IORING_OP_SEND_ZC,
IORING_OP_SENDMSG_ZC,
IORING_OP_READ_MULTISHOT,
IORING_OP_WAITID,
IORING_OP_FUTEX_WAIT,
IORING_OP_FUTEX_WAKE,
IORING_OP_FUTEX_WAITV,
IORING_OP_FIXED_FD_INSTALL,
IORING_OP_FTRUNCATE,
/* this goes last, obviously */
IORING_OP_LAST,
};
/*
* sqe->uring_cmd_flags
* sqe->uring_cmd_flags top 8bits aren't available for userspace
* IORING_URING_CMD_FIXED use registered buffer; pass this flag
* along with setting sqe->buf_index.
*/
#define IORING_URING_CMD_FIXED (1U << 0)
#define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED
/*
@ -250,6 +285,7 @@ enum io_uring_op {
#define IORING_TIMEOUT_REALTIME (1U << 3)
#define IORING_LINK_TIMEOUT_UPDATE (1U << 4)
#define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5)
#define IORING_TIMEOUT_MULTISHOT (1U << 6)
#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
/*
@ -284,11 +320,15 @@ enum io_uring_op {
* request 'user_data'
* IORING_ASYNC_CANCEL_ANY Match any request
* IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor
* IORING_ASYNC_CANCEL_USERDATA Match on user_data, default for no other key
* IORING_ASYNC_CANCEL_OP Match request based on opcode
*/
#define IORING_ASYNC_CANCEL_ALL (1U << 0)
#define IORING_ASYNC_CANCEL_FD (1U << 1)
#define IORING_ASYNC_CANCEL_ANY (1U << 2)
#define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3)
#define IORING_ASYNC_CANCEL_USERDATA (1U << 4)
#define IORING_ASYNC_CANCEL_OP (1U << 5)
/*
* send/sendmsg and recv/recvmsg flags (sqe->ioprio)
@ -312,11 +352,20 @@ enum io_uring_op {
* 0 is reported if zerocopy was actually possible.
* IORING_NOTIF_USAGE_ZC_COPIED if data was copied
* (at least partially).
*
* IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send or
* recv will grab as many buffers from the buffer
* group ID given and send them all. The completion
* result will be the number of buffers send, with
* the starting buffer ID in cqe->flags as per
* usual for provided buffer usage. The buffers
* will be contigious from the starting buffer ID.
*/
#define IORING_RECVSEND_POLL_FIRST (1U << 0)
#define IORING_RECV_MULTISHOT (1U << 1)
#define IORING_RECVSEND_FIXED_BUF (1U << 2)
#define IORING_SEND_ZC_REPORT_USAGE (1U << 3)
#define IORING_RECVSEND_BUNDLE (1U << 4)
/*
* cqe.res for IORING_CQE_F_NOTIF if
@ -331,11 +380,13 @@ enum io_uring_op {
* accept flags stored in sqe->ioprio
*/
#define IORING_ACCEPT_MULTISHOT (1U << 0)
#define IORING_ACCEPT_DONTWAIT (1U << 1)
#define IORING_ACCEPT_POLL_FIRST (1U << 2)
/*
* IORING_OP_MSG_RING command types, stored in sqe->addr
*/
enum {
enum io_uring_msg_ring_flags {
IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */
IORING_MSG_SEND_FD, /* send a registered fd to another ring */
};
@ -350,6 +401,20 @@ enum {
/* Pass through the flags from sqe->file_index to cqe->flags */
#define IORING_MSG_RING_FLAGS_PASS (1U << 1)
/*
* IORING_OP_FIXED_FD_INSTALL flags (sqe->install_fd_flags)
*
* IORING_FIXED_FD_NO_CLOEXEC Don't mark the fd as O_CLOEXEC
*/
#define IORING_FIXED_FD_NO_CLOEXEC (1U << 0)
/*
* IORING_OP_NOP flags (sqe->nop_flags)
*
* IORING_NOP_INJECT_RESULT Inject result from sqe->result
*/
#define IORING_NOP_INJECT_RESULT (1U << 0)
/*
* IO completion data structure (Completion Queue Entry)
*/
@ -379,9 +444,7 @@ struct io_uring_cqe {
#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
#define IORING_CQE_F_NOTIF (1U << 3)
enum {
IORING_CQE_BUFFER_SHIFT = 16,
};
#define IORING_CQE_BUFFER_SHIFT 16
/*
* Magic offsets for the application to mmap the data it needs
@ -389,6 +452,9 @@ enum {
#define IORING_OFF_SQ_RING 0ULL
#define IORING_OFF_CQ_RING 0x8000000ULL
#define IORING_OFF_SQES 0x10000000ULL
#define IORING_OFF_PBUF_RING 0x80000000ULL
#define IORING_OFF_PBUF_SHIFT 16
#define IORING_OFF_MMAP_MASK 0xf8000000ULL
/*
* Filled with the offset for mmap(2)
@ -402,7 +468,7 @@ struct io_sqring_offsets {
__u32 dropped;
__u32 array;
__u32 resv1;
__u64 resv2;
__u64 user_addr;
};
/*
@ -421,7 +487,7 @@ struct io_cqring_offsets {
__u32 cqes;
__u32 flags;
__u32 resv1;
__u64 resv2;
__u64 user_addr;
};
/*
@ -473,11 +539,12 @@ struct io_uring_params {
#define IORING_FEAT_CQE_SKIP (1U << 11)
#define IORING_FEAT_LINKED_FILE (1U << 12)
#define IORING_FEAT_REG_REG_RING (1U << 13)
#define IORING_FEAT_RECVSEND_BUNDLE (1U << 14)
/*
* io_uring_register(2) opcodes and arguments
*/
enum {
enum io_uring_register_op {
IORING_REGISTER_BUFFERS = 0,
IORING_UNREGISTER_BUFFERS = 1,
IORING_REGISTER_FILES = 2,
@ -519,6 +586,13 @@ enum {
/* register a range of fixed file slots for automatic slot allocation */
IORING_REGISTER_FILE_ALLOC_RANGE = 25,
/* return status information for a buffer group */
IORING_REGISTER_PBUF_STATUS = 26,
/* set/clear busy poll settings */
IORING_REGISTER_NAPI = 27,
IORING_UNREGISTER_NAPI = 28,
/* this goes last */
IORING_REGISTER_LAST,
@ -527,7 +601,7 @@ enum {
};
/* io-wq worker categories */
enum {
enum io_wq_type {
IO_WQ_BOUND,
IO_WQ_UNBOUND,
};
@ -568,19 +642,6 @@ struct io_uring_rsrc_update2 {
__u32 resv2;
};
struct io_uring_notification_slot {
__u64 tag;
__u64 resv[3];
};
struct io_uring_notification_register {
__u32 nr_slots;
__u32 resv;
__u64 resv2;
__u64 data;
__u64 resv3;
};
/* Skip updating fd indexes set to this value in the fd table */
#define IORING_REGISTER_FILES_SKIP (-2)
@ -635,19 +696,48 @@ struct io_uring_buf_ring {
};
};
/*
* Flags for IORING_REGISTER_PBUF_RING.
*
* IOU_PBUF_RING_MMAP: If set, kernel will allocate the memory for the ring.
* The application must not set a ring_addr in struct
* io_uring_buf_reg, instead it must subsequently call
* mmap(2) with the offset set as:
* IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT)
* to get a virtual mapping for the ring.
*/
enum io_uring_register_pbuf_ring_flags {
IOU_PBUF_RING_MMAP = 1,
};
/* argument for IORING_(UN)REGISTER_PBUF_RING */
struct io_uring_buf_reg {
__u64 ring_addr;
__u32 ring_entries;
__u16 bgid;
__u16 pad;
__u16 flags;
__u64 resv[3];
};
/* argument for IORING_REGISTER_PBUF_STATUS */
struct io_uring_buf_status {
__u32 buf_group; /* input */
__u32 head; /* output */
__u32 resv[8];
};
/* argument for IORING_(UN)REGISTER_NAPI */
struct io_uring_napi {
__u32 busy_poll_to;
__u8 prefer_busy_poll;
__u8 pad[3];
__u64 resv;
};
/*
* io_uring_restriction->opcode values
*/
enum {
enum io_uring_register_restriction_op {
/* Allow an io_uring_register(2) opcode */
IORING_RESTRICTION_REGISTER_OP = 0,
@ -678,7 +768,9 @@ struct io_uring_sync_cancel_reg {
__s32 fd;
__u32 flags;
struct __kernel_timespec timeout;
__u64 pad[4];
__u8 opcode;
__u8 pad[7];
__u64 pad2[3];
};
/*
@ -698,6 +790,16 @@ struct io_uring_recvmsg_out {
__u32 flags;
};
/*
* Argument for IORING_OP_URING_CMD when file is a socket
*/
enum io_uring_socket_op {
SOCKET_URING_OP_SIOCINQ = 0,
SOCKET_URING_OP_SIOCOUTQ,
SOCKET_URING_OP_GETSOCKOPT,
SOCKET_URING_OP_SETSOCKOPT,
};
#ifdef __cplusplus
}
#endif

View File

@ -49,4 +49,24 @@ enum {
#define IOAM6_CMD_MAX (__IOAM6_CMD_MAX - 1)
#define IOAM6_GENL_EV_GRP_NAME "ioam6_events"
enum ioam6_event_type {
IOAM6_EVENT_UNSPEC,
IOAM6_EVENT_TRACE,
};
enum ioam6_event_attr {
IOAM6_EVENT_ATTR_UNSPEC,
IOAM6_EVENT_ATTR_TRACE_NAMESPACE, /* u16 */
IOAM6_EVENT_ATTR_TRACE_NODELEN, /* u8 */
IOAM6_EVENT_ATTR_TRACE_TYPE, /* u32 */
IOAM6_EVENT_ATTR_TRACE_DATA, /* Binary */
__IOAM6_EVENT_ATTR_MAX
};
#define IOAM6_EVENT_ATTR_MAX (__IOAM6_EVENT_ATTR_MAX - 1)
#endif /* _LINUX_IOAM6_GENL_H */

View File

@ -1,161 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* IOMMU user API definitions
*/
#ifndef _IOMMU_H
#define _IOMMU_H
#include <linux/types.h>
#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
/* Generic fault types, can be expanded IRQ remapping fault */
enum iommu_fault_type {
IOMMU_FAULT_DMA_UNRECOV = 1, /* unrecoverable fault */
IOMMU_FAULT_PAGE_REQ, /* page request fault */
};
enum iommu_fault_reason {
IOMMU_FAULT_REASON_UNKNOWN = 0,
/* Could not access the PASID table (fetch caused external abort) */
IOMMU_FAULT_REASON_PASID_FETCH,
/* PASID entry is invalid or has configuration errors */
IOMMU_FAULT_REASON_BAD_PASID_ENTRY,
/*
* PASID is out of range (e.g. exceeds the maximum PASID
* supported by the IOMMU) or disabled.
*/
IOMMU_FAULT_REASON_PASID_INVALID,
/*
* An external abort occurred fetching (or updating) a translation
* table descriptor
*/
IOMMU_FAULT_REASON_WALK_EABT,
/*
* Could not access the page table entry (Bad address),
* actual translation fault
*/
IOMMU_FAULT_REASON_PTE_FETCH,
/* Protection flag check failed */
IOMMU_FAULT_REASON_PERMISSION,
/* access flag check failed */
IOMMU_FAULT_REASON_ACCESS,
/* Output address of a translation stage caused Address Size fault */
IOMMU_FAULT_REASON_OOR_ADDRESS,
};
/**
* struct iommu_fault_unrecoverable - Unrecoverable fault data
* @reason: reason of the fault, from &enum iommu_fault_reason
* @flags: parameters of this fault (IOMMU_FAULT_UNRECOV_* values)
* @pasid: Process Address Space ID
* @perm: requested permission access using by the incoming transaction
* (IOMMU_FAULT_PERM_* values)
* @addr: offending page address
* @fetch_addr: address that caused a fetch abort, if any
*/
struct iommu_fault_unrecoverable {
__u32 reason;
#define IOMMU_FAULT_UNRECOV_PASID_VALID (1 << 0)
#define IOMMU_FAULT_UNRECOV_ADDR_VALID (1 << 1)
#define IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID (1 << 2)
__u32 flags;
__u32 pasid;
__u32 perm;
__u64 addr;
__u64 fetch_addr;
};
/**
* struct iommu_fault_page_request - Page Request data
* @flags: encodes whether the corresponding fields are valid and whether this
* is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
* When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
* must have the same PASID value as the page request. When it is clear,
* the page response should not have a PASID.
* @pasid: Process Address Space ID
* @grpid: Page Request Group Index
* @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
* @addr: page address
* @private_data: device-specific private information
*/
struct iommu_fault_page_request {
#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3)
__u32 flags;
__u32 pasid;
__u32 grpid;
__u32 perm;
__u64 addr;
__u64 private_data[2];
};
/**
* struct iommu_fault - Generic fault data
* @type: fault type from &enum iommu_fault_type
* @padding: reserved for future use (should be zero)
* @event: fault event, when @type is %IOMMU_FAULT_DMA_UNRECOV
* @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
* @padding2: sets the fault size to allow for future extensions
*/
struct iommu_fault {
__u32 type;
__u32 padding;
union {
struct iommu_fault_unrecoverable event;
struct iommu_fault_page_request prm;
__u8 padding2[56];
};
};
/**
* enum iommu_page_response_code - Return status of fault handlers
* @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
* populated, retry the access. This is "Success" in PCI PRI.
* @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
* this device if possible. This is "Response Failure" in PCI PRI.
* @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
* access. This is "Invalid Request" in PCI PRI.
*/
enum iommu_page_response_code {
IOMMU_PAGE_RESP_SUCCESS = 0,
IOMMU_PAGE_RESP_INVALID,
IOMMU_PAGE_RESP_FAILURE,
};
/**
* struct iommu_page_response - Generic page response information
* @argsz: User filled size of this data
* @version: API version of this structure
* @flags: encodes whether the corresponding fields are valid
* (IOMMU_FAULT_PAGE_RESPONSE_* values)
* @pasid: Process Address Space ID
* @grpid: Page Request Group Index
* @code: response code from &enum iommu_page_response_code
*/
struct iommu_page_response {
__u32 argsz;
#define IOMMU_PAGE_RESP_VERSION_1 1
__u32 version;
#define IOMMU_PAGE_RESP_PASID_VALID (1 << 0)
__u32 flags;
__u32 pasid;
__u32 grpid;
__u32 code;
};
#endif /* _IOMMU_H */

View File

@ -45,6 +45,11 @@ enum {
IOMMUFD_CMD_IOAS_UNMAP,
IOMMUFD_CMD_OPTION,
IOMMUFD_CMD_VFIO_IOAS,
IOMMUFD_CMD_HWPT_ALLOC,
IOMMUFD_CMD_GET_HW_INFO,
IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING,
IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP,
IOMMUFD_CMD_HWPT_INVALIDATE,
};
/**
@ -344,4 +349,347 @@ struct iommu_vfio_ioas {
__u16 __reserved;
};
#define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS)
/**
* enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation
* @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as
* the parent HWPT in a nesting configuration.
* @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is
* enforced on device attachment
*/
enum iommufd_hwpt_alloc_flags {
IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0,
IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1,
};
/**
* enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table
* entry attributes
* @IOMMU_VTD_S1_SRE: Supervisor request
* @IOMMU_VTD_S1_EAFE: Extended access enable
* @IOMMU_VTD_S1_WPE: Write protect enable
*/
enum iommu_hwpt_vtd_s1_flags {
IOMMU_VTD_S1_SRE = 1 << 0,
IOMMU_VTD_S1_EAFE = 1 << 1,
IOMMU_VTD_S1_WPE = 1 << 2,
};
/**
* struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table
* info (IOMMU_HWPT_DATA_VTD_S1)
* @flags: Combination of enum iommu_hwpt_vtd_s1_flags
* @pgtbl_addr: The base address of the stage-1 page table.
* @addr_width: The address width of the stage-1 page table
* @__reserved: Must be 0
*/
struct iommu_hwpt_vtd_s1 {
__aligned_u64 flags;
__aligned_u64 pgtbl_addr;
__u32 addr_width;
__u32 __reserved;
};
/**
* enum iommu_hwpt_data_type - IOMMU HWPT Data Type
* @IOMMU_HWPT_DATA_NONE: no data
* @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table
*/
enum iommu_hwpt_data_type {
IOMMU_HWPT_DATA_NONE,
IOMMU_HWPT_DATA_VTD_S1,
};
/**
* struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC)
* @size: sizeof(struct iommu_hwpt_alloc)
* @flags: Combination of enum iommufd_hwpt_alloc_flags
* @dev_id: The device to allocate this HWPT for
* @pt_id: The IOAS or HWPT to connect this HWPT to
* @out_hwpt_id: The ID of the new HWPT
* @__reserved: Must be 0
* @data_type: One of enum iommu_hwpt_data_type
* @data_len: Length of the type specific data
* @data_uptr: User pointer to the type specific data
*
* Explicitly allocate a hardware page table object. This is the same object
* type that is returned by iommufd_device_attach() and represents the
* underlying iommu driver's iommu_domain kernel object.
*
* A kernel-managed HWPT will be created with the mappings from the given
* IOAS via the @pt_id. The @data_type for this allocation must be set to
* IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a
* nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags.
*
* A user-managed nested HWPT will be created from a given parent HWPT via
* @pt_id, in which the parent HWPT must be allocated previously via the
* same ioctl from a given IOAS (@pt_id). In this case, the @data_type
* must be set to a pre-defined type corresponding to an I/O page table
* type supported by the underlying IOMMU hardware.
*
* If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and
* @data_uptr should be zero. Otherwise, both @data_len and @data_uptr
* must be given.
*/
struct iommu_hwpt_alloc {
__u32 size;
__u32 flags;
__u32 dev_id;
__u32 pt_id;
__u32 out_hwpt_id;
__u32 __reserved;
__u32 data_type;
__u32 data_len;
__aligned_u64 data_uptr;
};
#define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC)
/**
* enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info
* @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings
* on a nested_parent domain.
* https://www.intel.com/content/www/us/en/content-details/772415/content-details.html
*/
enum iommu_hw_info_vtd_flags {
IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0,
};
/**
* struct iommu_hw_info_vtd - Intel VT-d hardware information
*
* @flags: Combination of enum iommu_hw_info_vtd_flags
* @__reserved: Must be 0
*
* @cap_reg: Value of Intel VT-d capability register defined in VT-d spec
* section 11.4.2 Capability Register.
* @ecap_reg: Value of Intel VT-d capability register defined in VT-d spec
* section 11.4.3 Extended Capability Register.
*
* User needs to understand the Intel VT-d specification to decode the
* register value.
*/
struct iommu_hw_info_vtd {
__u32 flags;
__u32 __reserved;
__aligned_u64 cap_reg;
__aligned_u64 ecap_reg;
};
/**
* enum iommu_hw_info_type - IOMMU Hardware Info Types
* @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware
* info
* @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type
*/
enum iommu_hw_info_type {
IOMMU_HW_INFO_TYPE_NONE,
IOMMU_HW_INFO_TYPE_INTEL_VTD,
};
/**
* enum iommufd_hw_capabilities
* @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking
* If available, it means the following APIs
* are supported:
*
* IOMMU_HWPT_GET_DIRTY_BITMAP
* IOMMU_HWPT_SET_DIRTY_TRACKING
*
*/
enum iommufd_hw_capabilities {
IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
};
/**
* struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
* @size: sizeof(struct iommu_hw_info)
* @flags: Must be 0
* @dev_id: The device bound to the iommufd
* @data_len: Input the length of a user buffer in bytes. Output the length of
* data that kernel supports
* @data_uptr: User pointer to a user-space buffer used by the kernel to fill
* the iommu type specific hardware information data
* @out_data_type: Output the iommu hardware info type as defined in the enum
* iommu_hw_info_type.
* @out_capabilities: Output the generic iommu capability info type as defined
* in the enum iommu_hw_capabilities.
* @__reserved: Must be 0
*
* Query an iommu type specific hardware information data from an iommu behind
* a given device that has been bound to iommufd. This hardware info data will
* be used to sync capabilities between the virtual iommu and the physical
* iommu, e.g. a nested translation setup needs to check the hardware info, so
* a guest stage-1 page table can be compatible with the physical iommu.
*
* To capture an iommu type specific hardware information data, @data_uptr and
* its length @data_len must be provided. Trailing bytes will be zeroed if the
* user buffer is larger than the data that kernel has. Otherwise, kernel only
* fills the buffer using the given length in @data_len. If the ioctl succeeds,
* @data_len will be updated to the length that kernel actually supports,
* @out_data_type will be filled to decode the data filled in the buffer
* pointed by @data_uptr. Input @data_len == zero is allowed.
*/
struct iommu_hw_info {
__u32 size;
__u32 flags;
__u32 dev_id;
__u32 data_len;
__aligned_u64 data_uptr;
__u32 out_data_type;
__u32 __reserved;
__aligned_u64 out_capabilities;
};
#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
/*
* enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty
* tracking
* @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking
*/
enum iommufd_hwpt_set_dirty_tracking_flags {
IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1,
};
/**
* struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING)
* @size: sizeof(struct iommu_hwpt_set_dirty_tracking)
* @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags
* @hwpt_id: HW pagetable ID that represents the IOMMU domain
* @__reserved: Must be 0
*
* Toggle dirty tracking on an HW pagetable.
*/
struct iommu_hwpt_set_dirty_tracking {
__u32 size;
__u32 flags;
__u32 hwpt_id;
__u32 __reserved;
};
#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \
IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING)
/**
* enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits
* @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing
* any dirty bits metadata. This flag
* can be passed in the expectation
* where the next operation is an unmap
* of the same IOVA range.
*
*/
enum iommufd_hwpt_get_dirty_bitmap_flags {
IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1,
};
/**
* struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP)
* @size: sizeof(struct iommu_hwpt_get_dirty_bitmap)
* @hwpt_id: HW pagetable ID that represents the IOMMU domain
* @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags
* @__reserved: Must be 0
* @iova: base IOVA of the bitmap first bit
* @length: IOVA range size
* @page_size: page size granularity of each bit in the bitmap
* @data: bitmap where to set the dirty bits. The bitmap bits each
* represent a page_size which you deviate from an arbitrary iova.
*
* Checking a given IOVA is dirty:
*
* data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64))
*
* Walk the IOMMU pagetables for a given IOVA range to return a bitmap
* with the dirty IOVAs. In doing so it will also by default clear any
* dirty bit metadata set in the IOPTE.
*/
struct iommu_hwpt_get_dirty_bitmap {
__u32 size;
__u32 hwpt_id;
__u32 flags;
__u32 __reserved;
__aligned_u64 iova;
__aligned_u64 length;
__aligned_u64 page_size;
__aligned_u64 data;
};
#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \
IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP)
/**
* enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
* Data Type
* @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
*/
enum iommu_hwpt_invalidate_data_type {
IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
};
/**
* enum iommu_hwpt_vtd_s1_invalidate_flags - Flags for Intel VT-d
* stage-1 cache invalidation
* @IOMMU_VTD_INV_FLAGS_LEAF: Indicates whether the invalidation applies
* to all-levels page structure cache or just
* the leaf PTE cache.
*/
enum iommu_hwpt_vtd_s1_invalidate_flags {
IOMMU_VTD_INV_FLAGS_LEAF = 1 << 0,
};
/**
* struct iommu_hwpt_vtd_s1_invalidate - Intel VT-d cache invalidation
* (IOMMU_HWPT_INVALIDATE_DATA_VTD_S1)
* @addr: The start address of the range to be invalidated. It needs to
* be 4KB aligned.
* @npages: Number of contiguous 4K pages to be invalidated.
* @flags: Combination of enum iommu_hwpt_vtd_s1_invalidate_flags
* @__reserved: Must be 0
*
* The Intel VT-d specific invalidation data for user-managed stage-1 cache
* invalidation in nested translation. Userspace uses this structure to
* tell the impacted cache scope after modifying the stage-1 page table.
*
* Invalidating all the caches related to the page table by setting @addr
* to be 0 and @npages to be U64_MAX.
*
* The device TLB will be invalidated automatically if ATS is enabled.
*/
struct iommu_hwpt_vtd_s1_invalidate {
__aligned_u64 addr;
__aligned_u64 npages;
__u32 flags;
__u32 __reserved;
};
/**
* struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
* @size: sizeof(struct iommu_hwpt_invalidate)
* @hwpt_id: ID of a nested HWPT for cache invalidation
* @data_uptr: User pointer to an array of driver-specific cache invalidation
* data.
* @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data
* type of all the entries in the invalidation request array. It
* should be a type supported by the hwpt pointed by @hwpt_id.
* @entry_len: Length (in bytes) of a request entry in the request array
* @entry_num: Input the number of cache invalidation requests in the array.
* Output the number of requests successfully handled by kernel.
* @__reserved: Must be 0.
*
* Invalidate the iommu cache for user-managed page table. Modifications on a
* user-managed page table should be followed by this operation to sync cache.
* Each ioctl can support one or more cache invalidation requests in the array
* that has a total size of @entry_len * @entry_num.
*
* An empty invalidation request array by setting @entry_num==0 is allowed, and
* @entry_len and @data_uptr would be ignored in this case. This can be used to
* check if the given @data_type is supported or not by kernel.
*/
struct iommu_hwpt_invalidate {
__u32 size;
__u32 hwpt_id;
__aligned_u64 data_uptr;
__u32 data_type;
__u32 entry_len;
__u32 entry_num;
__u32 __reserved;
};
#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE)
#endif

View File

@ -2,22 +2,23 @@
#ifndef _LINUX_IOPRIO_H
#define _LINUX_IOPRIO_H
#include <linux/stddef.h>
#include <linux/types.h>
/*
* Gives us 8 prio classes with 13-bits of data for each class
*/
#define IOPRIO_CLASS_SHIFT 13
#define IOPRIO_CLASS_MASK 0x07
#define IOPRIO_NR_CLASSES 8
#define IOPRIO_CLASS_MASK (IOPRIO_NR_CLASSES - 1)
#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
#define IOPRIO_PRIO_CLASS(ioprio) \
(((ioprio) >> IOPRIO_CLASS_SHIFT) & IOPRIO_CLASS_MASK)
#define IOPRIO_PRIO_DATA(ioprio) ((ioprio) & IOPRIO_PRIO_MASK)
#define IOPRIO_PRIO_VALUE(class, data) \
((((class) & IOPRIO_CLASS_MASK) << IOPRIO_CLASS_SHIFT) | \
((data) & IOPRIO_PRIO_MASK))
/*
* These are the io priority groups as implemented by the BFQ and mq-deadline
* These are the io priority classes as implemented by the BFQ and mq-deadline
* schedulers. RT is the realtime class, it always gets premium service. For
* ATA disks supporting NCQ IO priority, RT class IOs will be processed using
* high priority NCQ commands. BE is the best-effort scheduling class, the
@ -25,18 +26,30 @@
* served when no one else is using the disk.
*/
enum {
IOPRIO_CLASS_NONE,
IOPRIO_CLASS_RT,
IOPRIO_CLASS_BE,
IOPRIO_CLASS_IDLE,
IOPRIO_CLASS_NONE = 0,
IOPRIO_CLASS_RT = 1,
IOPRIO_CLASS_BE = 2,
IOPRIO_CLASS_IDLE = 3,
/* Special class to indicate an invalid ioprio value */
IOPRIO_CLASS_INVALID = 7,
};
/*
* The RT and BE priority classes both support up to 8 priority levels.
* The RT and BE priority classes both support up to 8 priority levels that
* can be specified using the lower 3-bits of the priority data.
*/
#define IOPRIO_NR_LEVELS 8
#define IOPRIO_BE_NR IOPRIO_NR_LEVELS
#define IOPRIO_LEVEL_NR_BITS 3
#define IOPRIO_NR_LEVELS (1 << IOPRIO_LEVEL_NR_BITS)
#define IOPRIO_LEVEL_MASK (IOPRIO_NR_LEVELS - 1)
#define IOPRIO_PRIO_LEVEL(ioprio) ((ioprio) & IOPRIO_LEVEL_MASK)
#define IOPRIO_BE_NR IOPRIO_NR_LEVELS
/*
* Possible values for the "which" argument of the ioprio_get() and
* ioprio_set() system calls (see "man ioprio_set").
*/
enum {
IOPRIO_WHO_PROCESS = 1,
IOPRIO_WHO_PGRP,
@ -44,9 +57,71 @@ enum {
};
/*
* Fallback BE priority level.
* Fallback BE class priority level.
*/
#define IOPRIO_NORM 4
#define IOPRIO_BE_NORM IOPRIO_NORM
/*
* The 10 bits between the priority class and the priority level are used to
* optionally define I/O hints for any combination of I/O priority class and
* level. Depending on the kernel configuration, I/O scheduler being used and
* the target I/O device being used, hints can influence how I/Os are processed
* without affecting the I/O scheduling ordering defined by the I/O priority
* class and level.
*/
#define IOPRIO_HINT_SHIFT IOPRIO_LEVEL_NR_BITS
#define IOPRIO_HINT_NR_BITS 10
#define IOPRIO_NR_HINTS (1 << IOPRIO_HINT_NR_BITS)
#define IOPRIO_HINT_MASK (IOPRIO_NR_HINTS - 1)
#define IOPRIO_PRIO_HINT(ioprio) \
(((ioprio) >> IOPRIO_HINT_SHIFT) & IOPRIO_HINT_MASK)
/*
* I/O hints.
*/
enum {
/* No hint */
IOPRIO_HINT_NONE = 0,
/*
* Device command duration limits: indicate to the device a desired
* duration limit for the commands that will be used to process an I/O.
* These will currently only be effective for SCSI and ATA devices that
* support the command duration limits feature. If this feature is
* enabled, then the commands issued to the device to process an I/O with
* one of these hints set will have the duration limit index (dld field)
* set to the value of the hint.
*/
IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1,
IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2,
IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3,
IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4,
IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5,
IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6,
IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7,
};
#define IOPRIO_BAD_VALUE(val, max) ((val) < 0 || (val) >= (max))
/*
* Return an I/O priority value based on a class, a level and a hint.
*/
static __always_inline __u16 ioprio_value(int prioclass, int priolevel,
int priohint)
{
if (IOPRIO_BAD_VALUE(prioclass, IOPRIO_NR_CLASSES) ||
IOPRIO_BAD_VALUE(priolevel, IOPRIO_NR_LEVELS) ||
IOPRIO_BAD_VALUE(priohint, IOPRIO_NR_HINTS))
return IOPRIO_CLASS_INVALID << IOPRIO_CLASS_SHIFT;
return (prioclass << IOPRIO_CLASS_SHIFT) |
(priohint << IOPRIO_HINT_SHIFT) | priolevel;
}
#define IOPRIO_PRIO_VALUE(prioclass, priolevel) \
ioprio_value(prioclass, priolevel, IOPRIO_HINT_NONE)
#define IOPRIO_PRIO_VALUE_HINT(prioclass, priolevel, priohint) \
ioprio_value(prioclass, priolevel, priohint)
#endif /* _LINUX_IOPRIO_H */

View File

@ -81,7 +81,7 @@ struct ipv6_opt_hdr {
struct rt0_hdr {
struct ipv6_rt_hdr rt_hdr;
__u32 reserved;
struct in6_addr addr[0];
struct in6_addr addr[];
#define rt0_type rt_hdr.type
};
@ -198,6 +198,7 @@ enum {
DEVCONF_IOAM6_ID_WIDE,
DEVCONF_NDISC_EVICT_NOCARRIER,
DEVCONF_ACCEPT_UNTRACKED_NA,
DEVCONF_ACCEPT_RA_MIN_LFT,
DEVCONF_MAX
};

View File

@ -163,10 +163,313 @@ struct isst_if_msr_cmds {
struct isst_if_msr_cmd msr_cmd[1];
};
/**
* struct isst_core_power - Structure to get/set core_power feature
* @get_set: 0: Get, 1: Set
* @socket_id: Socket/package id
* @power_domain: Power Domain id
* @enable: Feature enable status
* @priority_type: Priority type for the feature (ordered/proportional)
*
* Structure to get/set core_power feature state using IOCTL
* ISST_IF_CORE_POWER_STATE.
*/
struct isst_core_power {
__u8 get_set;
__u8 socket_id;
__u8 power_domain_id;
__u8 enable;
__u8 supported;
__u8 priority_type;
};
/**
* struct isst_clos_param - Structure to get/set clos praram
* @get_set: 0: Get, 1: Set
* @socket_id: Socket/package id
* @power_domain: Power Domain id
* clos: Clos ID for the parameters
* min_freq_mhz: Minimum frequency in MHz
* max_freq_mhz: Maximum frequency in MHz
* prop_prio: Proportional priority from 0-15
*
* Structure to get/set per clos property using IOCTL
* ISST_IF_CLOS_PARAM.
*/
struct isst_clos_param {
__u8 get_set;
__u8 socket_id;
__u8 power_domain_id;
__u8 clos;
__u16 min_freq_mhz;
__u16 max_freq_mhz;
__u8 prop_prio;
};
/**
* struct isst_if_clos_assoc - Structure to assign clos to a CPU
* @socket_id: Socket/package id
* @power_domain: Power Domain id
* @logical_cpu: CPU number
* @clos: Clos ID to assign to the logical CPU
*
* Structure to get/set core_power feature.
*/
struct isst_if_clos_assoc {
__u8 socket_id;
__u8 power_domain_id;
__u16 logical_cpu;
__u16 clos;
};
/**
* struct isst_if_clos_assoc_cmds - Structure to assign clos to CPUs
* @cmd_count: Number of cmds (cpus) in this request
* @get_set: Request is for get or set
* @punit_cpu_map: Set to 1 if the CPU number is punit numbering not
* Linux CPU number
*
* Structure used to get/set associate CPUs to clos using IOCTL
* ISST_IF_CLOS_ASSOC.
*/
struct isst_if_clos_assoc_cmds {
__u16 cmd_count;
__u16 get_set;
__u16 punit_cpu_map;
struct isst_if_clos_assoc assoc_info[1];
};
/**
* struct isst_tpmi_instance_count - Get number of TPMI instances per socket
* @socket_id: Socket/package id
* @count: Number of instances
* @valid_mask: Mask of instances as there can be holes
*
* Structure used to get TPMI instances information using
* IOCTL ISST_IF_COUNT_TPMI_INSTANCES.
*/
struct isst_tpmi_instance_count {
__u8 socket_id;
__u8 count;
__u16 valid_mask;
};
/**
* struct isst_perf_level_info - Structure to get information on SST-PP levels
* @socket_id: Socket/package id
* @power_domain: Power Domain id
* @logical_cpu: CPU number
* @clos: Clos ID to assign to the logical CPU
* @max_level: Maximum performance level supported by the platform
* @feature_rev: The feature revision for SST-PP supported by the platform
* @level_mask: Mask of supported performance levels
* @current_level: Current performance level
* @feature_state: SST-BF and SST-TF (enabled/disabled) status at current level
* @locked: SST-PP performance level change is locked/unlocked
* @enabled: SST-PP feature is enabled or not
* @sst-tf_support: SST-TF support status at this level
* @sst-bf_support: SST-BF support status at this level
*
* Structure to get SST-PP details using IOCTL ISST_IF_PERF_LEVELS.
*/
struct isst_perf_level_info {
__u8 socket_id;
__u8 power_domain_id;
__u8 max_level;
__u8 feature_rev;
__u8 level_mask;
__u8 current_level;
__u8 feature_state;
__u8 locked;
__u8 enabled;
__u8 sst_tf_support;
__u8 sst_bf_support;
};
/**
* struct isst_perf_level_control - Structure to set SST-PP level
* @socket_id: Socket/package id
* @power_domain: Power Domain id
* @level: level to set
*
* Structure used change SST-PP level using IOCTL ISST_IF_PERF_SET_LEVEL.
*/
struct isst_perf_level_control {
__u8 socket_id;
__u8 power_domain_id;
__u8 level;
};
/**
* struct isst_perf_feature_control - Structure to activate SST-BF/SST-TF
* @socket_id: Socket/package id
* @power_domain: Power Domain id
* @feature: bit 0 = SST-BF state, bit 1 = SST-TF state
*
* Structure used to enable SST-BF/SST-TF using IOCTL ISST_IF_PERF_SET_FEATURE.
*/
struct isst_perf_feature_control {
__u8 socket_id;
__u8 power_domain_id;
__u8 feature;
};
#define TRL_MAX_BUCKETS 8
#define TRL_MAX_LEVELS 6
/**
* struct isst_perf_level_data_info - Structure to get SST-PP level details
* @socket_id: Socket/package id
* @power_domain: Power Domain id
* @level: SST-PP level for which caller wants to get information
* @tdp_ratio: TDP Ratio
* @base_freq_mhz: Base frequency in MHz
* @base_freq_avx2_mhz: AVX2 Base frequency in MHz
* @base_freq_avx512_mhz: AVX512 base frequency in MHz
* @base_freq_amx_mhz: AMX base frequency in MHz
* @thermal_design_power_w: Thermal design (TDP) power
* @tjunction_max_c: Max junction temperature
* @max_memory_freq_mhz: Max memory frequency in MHz
* @cooling_type: Type of cooling is used
* @p0_freq_mhz: core maximum frequency
* @p1_freq_mhz: Core TDP frequency
* @pn_freq_mhz: Core maximum efficiency frequency
* @pm_freq_mhz: Core minimum frequency
* @p0_fabric_freq_mhz: Fabric (Uncore) maximum frequency
* @p1_fabric_freq_mhz: Fabric (Uncore) TDP frequency
* @pn_fabric_freq_mhz: Fabric (Uncore) minimum efficiency frequency
* @pm_fabric_freq_mhz: Fabric (Uncore) minimum frequency
* @max_buckets: Maximum trl buckets
* @max_trl_levels: Maximum trl levels
* @bucket_core_counts[TRL_MAX_BUCKETS]: Number of cores per bucket
* @trl_freq_mhz[TRL_MAX_LEVELS][TRL_MAX_BUCKETS]: maximum frequency
* for a bucket and trl level
*
* Structure used to get information on frequencies and TDP for a SST-PP
* level using ISST_IF_GET_PERF_LEVEL_INFO.
*/
struct isst_perf_level_data_info {
__u8 socket_id;
__u8 power_domain_id;
__u16 level;
__u16 tdp_ratio;
__u16 base_freq_mhz;
__u16 base_freq_avx2_mhz;
__u16 base_freq_avx512_mhz;
__u16 base_freq_amx_mhz;
__u16 thermal_design_power_w;
__u16 tjunction_max_c;
__u16 max_memory_freq_mhz;
__u16 cooling_type;
__u16 p0_freq_mhz;
__u16 p1_freq_mhz;
__u16 pn_freq_mhz;
__u16 pm_freq_mhz;
__u16 p0_fabric_freq_mhz;
__u16 p1_fabric_freq_mhz;
__u16 pn_fabric_freq_mhz;
__u16 pm_fabric_freq_mhz;
__u16 max_buckets;
__u16 max_trl_levels;
__u16 bucket_core_counts[TRL_MAX_BUCKETS];
__u16 trl_freq_mhz[TRL_MAX_LEVELS][TRL_MAX_BUCKETS];
};
/**
* struct isst_perf_level_cpu_mask - Structure to get SST-PP level CPU mask
* @socket_id: Socket/package id
* @power_domain: Power Domain id
* @level: SST-PP level for which caller wants to get information
* @punit_cpu_map: Set to 1 if the CPU number is punit numbering not
* Linux CPU number. If 0 CPU buffer is copied to user space
* supplied cpu_buffer of size cpu_buffer_size. Punit
* cpu mask is copied to "mask" field.
* @mask: cpu mask for this PP level (punit CPU numbering)
* @cpu_buffer_size: size of cpu_buffer also used to return the copied CPU
* buffer size.
* @cpu_buffer: Buffer to copy CPU mask when punit_cpu_map is 0
*
* Structure used to get cpumask for a SST-PP level using
* IOCTL ISST_IF_GET_PERF_LEVEL_CPU_MASK. Also used to get CPU mask for
* IOCTL ISST_IF_GET_BASE_FREQ_CPU_MASK for SST-BF.
*/
struct isst_perf_level_cpu_mask {
__u8 socket_id;
__u8 power_domain_id;
__u8 level;
__u8 punit_cpu_map;
__u64 mask;
__u16 cpu_buffer_size;
__s8 cpu_buffer[1];
};
/**
* struct isst_base_freq_info - Structure to get SST-BF frequencies
* @socket_id: Socket/package id
* @power_domain: Power Domain id
* @level: SST-PP level for which caller wants to get information
* @high_base_freq_mhz: High priority CPU base frequency
* @low_base_freq_mhz: Low priority CPU base frequency
* @tjunction_max_c: Max junction temperature
* @thermal_design_power_w: Thermal design power in watts
*
* Structure used to get SST-BF information using
* IOCTL ISST_IF_GET_BASE_FREQ_INFO.
*/
struct isst_base_freq_info {
__u8 socket_id;
__u8 power_domain_id;
__u16 level;
__u16 high_base_freq_mhz;
__u16 low_base_freq_mhz;
__u16 tjunction_max_c;
__u16 thermal_design_power_w;
};
/**
* struct isst_turbo_freq_info - Structure to get SST-TF frequencies
* @socket_id: Socket/package id
* @power_domain: Power Domain id
* @level: SST-PP level for which caller wants to get information
* @max_clip_freqs: Maximum number of low priority core clipping frequencies
* @lp_clip_freq_mhz: Clip frequencies per trl level
* @bucket_core_counts: Maximum number of cores for a bucket
* @trl_freq_mhz: Frequencies per trl level for each bucket
*
* Structure used to get SST-TF information using
* IOCTL ISST_IF_GET_TURBO_FREQ_INFO.
*/
struct isst_turbo_freq_info {
__u8 socket_id;
__u8 power_domain_id;
__u16 level;
__u16 max_clip_freqs;
__u16 max_buckets;
__u16 max_trl_levels;
__u16 lp_clip_freq_mhz[TRL_MAX_LEVELS];
__u16 bucket_core_counts[TRL_MAX_BUCKETS];
__u16 trl_freq_mhz[TRL_MAX_LEVELS][TRL_MAX_BUCKETS];
};
#define ISST_IF_MAGIC 0xFE
#define ISST_IF_GET_PLATFORM_INFO _IOR(ISST_IF_MAGIC, 0, struct isst_if_platform_info *)
#define ISST_IF_GET_PHY_ID _IOWR(ISST_IF_MAGIC, 1, struct isst_if_cpu_map *)
#define ISST_IF_IO_CMD _IOW(ISST_IF_MAGIC, 2, struct isst_if_io_regs *)
#define ISST_IF_MBOX_COMMAND _IOWR(ISST_IF_MAGIC, 3, struct isst_if_mbox_cmds *)
#define ISST_IF_MSR_COMMAND _IOWR(ISST_IF_MAGIC, 4, struct isst_if_msr_cmds *)
#define ISST_IF_COUNT_TPMI_INSTANCES _IOR(ISST_IF_MAGIC, 5, struct isst_tpmi_instance_count *)
#define ISST_IF_CORE_POWER_STATE _IOWR(ISST_IF_MAGIC, 6, struct isst_core_power *)
#define ISST_IF_CLOS_PARAM _IOWR(ISST_IF_MAGIC, 7, struct isst_clos_param *)
#define ISST_IF_CLOS_ASSOC _IOWR(ISST_IF_MAGIC, 8, struct isst_if_clos_assoc_cmds *)
#define ISST_IF_PERF_LEVELS _IOWR(ISST_IF_MAGIC, 9, struct isst_perf_level_info *)
#define ISST_IF_PERF_SET_LEVEL _IOW(ISST_IF_MAGIC, 10, struct isst_perf_level_control *)
#define ISST_IF_PERF_SET_FEATURE _IOW(ISST_IF_MAGIC, 11, struct isst_perf_feature_control *)
#define ISST_IF_GET_PERF_LEVEL_INFO _IOR(ISST_IF_MAGIC, 12, struct isst_perf_level_data_info *)
#define ISST_IF_GET_PERF_LEVEL_CPU_MASK _IOR(ISST_IF_MAGIC, 13, struct isst_perf_level_cpu_mask *)
#define ISST_IF_GET_BASE_FREQ_INFO _IOR(ISST_IF_MAGIC, 14, struct isst_base_freq_info *)
#define ISST_IF_GET_BASE_FREQ_CPU_MASK _IOR(ISST_IF_MAGIC, 15, struct isst_perf_level_cpu_mask *)
#define ISST_IF_GET_TURBO_FREQ_INFO _IOR(ISST_IF_MAGIC, 16, struct isst_turbo_freq_info *)
#endif

View File

@ -12,6 +12,8 @@
/* kexec flags for different usage scenarios */
#define KEXEC_ON_CRASH 0x00000001
#define KEXEC_PRESERVE_CONTEXT 0x00000002
#define KEXEC_UPDATE_ELFCOREHDR 0x00000004
#define KEXEC_CRASH_HOTPLUG_SUPPORT 0x00000008
#define KEXEC_ARCH_MASK 0xffff0000
/*
@ -24,6 +26,7 @@
#define KEXEC_FILE_UNLOAD 0x00000001
#define KEXEC_FILE_ON_CRASH 0x00000002
#define KEXEC_FILE_NO_INITRAMFS 0x00000004
#define KEXEC_FILE_DEBUG 0x00000008
/* These values match the ELF architecture values.
* Unless there is a good reason that should continue to be the case.

View File

@ -37,9 +37,13 @@
* - 1.9 - Add available memory ioctl
* - 1.10 - Add SMI profiler event log
* - 1.11 - Add unified memory for ctx save/restore area
* - 1.12 - Add DMA buf export ioctl
* - 1.13 - Add debugger API
* - 1.14 - Update kfd_event_data
* - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl
*/
#define KFD_IOCTL_MAJOR_VERSION 1
#define KFD_IOCTL_MINOR_VERSION 11
#define KFD_IOCTL_MINOR_VERSION 15
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@ -109,6 +113,32 @@ struct kfd_ioctl_get_available_memory_args {
__u32 pad;
};
struct kfd_dbg_device_info_entry {
__u64 exception_status;
__u64 lds_base;
__u64 lds_limit;
__u64 scratch_base;
__u64 scratch_limit;
__u64 gpuvm_base;
__u64 gpuvm_limit;
__u32 gpu_id;
__u32 location_id;
__u32 vendor_id;
__u32 device_id;
__u32 revision_id;
__u32 subsystem_vendor_id;
__u32 subsystem_device_id;
__u32 fw_version;
__u32 gfx_target_version;
__u32 simd_count;
__u32 max_waves_per_simd;
__u32 array_count;
__u32 simd_arrays_per_engine;
__u32 num_xcc;
__u32 capability;
__u32 debug_prop;
};
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
#define KFD_IOC_CACHE_POLICY_COHERENT 0
#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
@ -292,12 +322,20 @@ struct kfd_hsa_hw_exception_data {
__u32 gpu_id;
};
/* hsa signal event data */
struct kfd_hsa_signal_event_data {
__u64 last_event_age; /* to and from KFD */
};
/* Event data */
struct kfd_event_data {
union {
/* From KFD */
struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_hsa_hw_exception_data hw_exception_data;
}; /* From KFD */
/* To and From KFD */
struct kfd_hsa_signal_event_data signal_event_data;
};
__u64 kfd_event_data_ext; /* pointer to an extension structure
for future exception types */
__u32 event_id; /* to KFD */
@ -368,6 +406,7 @@ struct kfd_ioctl_acquire_vm_args {
#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
#define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24)
/* Allocate memory for later SVM (shared virtual memory) mapping.
*
@ -463,6 +502,12 @@ struct kfd_ioctl_import_dmabuf_args {
__u32 dmabuf_fd; /* to KFD */
};
struct kfd_ioctl_export_dmabuf_args {
__u64 handle; /* to KFD */
__u32 flags; /* to KFD */
__u32 dmabuf_fd; /* from KFD */
};
/*
* KFD SMI(System Management Interface) events
*/
@ -616,6 +661,8 @@ enum kfd_mmio_remap {
#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
/* Keep GPU memory mapping always valid as if XNACK is disable */
#define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
/* Fine grained coherency between all devices using device-scope atomics */
#define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080
/**
* kfd_ioctl_svm_op - SVM ioctl operations
@ -766,6 +813,651 @@ struct kfd_ioctl_set_xnack_mode_args {
__s32 xnack_enabled;
};
/* Wave launch override modes */
enum kfd_dbg_trap_override_mode {
KFD_DBG_TRAP_OVERRIDE_OR = 0,
KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
};
/* Wave launch overrides */
enum kfd_dbg_trap_mask {
KFD_DBG_TRAP_MASK_FP_INVALID = 1,
KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
};
/* Wave launch modes */
enum kfd_dbg_trap_wave_launch_mode {
KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
};
/* Address watch modes */
enum kfd_dbg_trap_address_watch_mode {
KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
};
/* Additional wave settings */
enum kfd_dbg_trap_flags {
KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
};
/* Trap exceptions */
enum kfd_dbg_trap_exception_code {
EC_NONE = 0,
/* per queue */
EC_QUEUE_WAVE_ABORT = 1,
EC_QUEUE_WAVE_TRAP = 2,
EC_QUEUE_WAVE_MATH_ERROR = 3,
EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
EC_QUEUE_PACKET_RESERVED = 19,
EC_QUEUE_PACKET_UNSUPPORTED = 20,
EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
EC_QUEUE_PREEMPTION_ERROR = 30,
EC_QUEUE_NEW = 31,
/* per device */
EC_DEVICE_QUEUE_DELETE = 32,
EC_DEVICE_MEMORY_VIOLATION = 33,
EC_DEVICE_RAS_ERROR = 34,
EC_DEVICE_FATAL_HALT = 35,
EC_DEVICE_NEW = 36,
/* per process */
EC_PROCESS_RUNTIME = 48,
EC_PROCESS_DEVICE_REMOVE = 49,
EC_MAX
};
/* Mask generated by ecode in kfd_dbg_trap_exception_code */
#define KFD_EC_MASK(ecode) (1ULL << (ecode - 1))
/* Masks for exception code type checks below */
#define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \
KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \
KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \
KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \
KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \
KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \
KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \
KFD_EC_MASK(EC_QUEUE_NEW))
#define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \
KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \
KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \
KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \
KFD_EC_MASK(EC_DEVICE_NEW))
#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \
KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
#define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
/* Checks for exception code types for KFD search */
#define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \
(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \
(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \
(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
#define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \
(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
/* Runtime enable states */
enum kfd_dbg_runtime_state {
DEBUG_RUNTIME_STATE_DISABLED = 0,
DEBUG_RUNTIME_STATE_ENABLED = 1,
DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
};
/* Runtime enable status */
struct kfd_runtime_info {
__u64 r_debug;
__u32 runtime_state;
__u32 ttmp_setup;
};
/* Enable modes for runtime enable */
#define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1
#define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2
/**
* kfd_ioctl_runtime_enable_args - Arguments for runtime enable
*
* Coordinates debug exception signalling and debug device enablement with runtime.
*
* @r_debug - pointer to user struct for sharing information between ROCr and the debuggger
* @mode_mask - mask to set mode
* KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable
* KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable)
* @capabilities_mask - mask to notify runtime on what KFD supports
*
* Return - 0 on SUCCESS.
* - EBUSY if runtime enable call already pending.
* - EEXIST if user queues already active prior to call.
* If process is debug enabled, runtime enable will enable debug devices and
* wait for debugger process to send runtime exception EC_PROCESS_RUNTIME
* to unblock - see kfd_ioctl_dbg_trap_args.
*
*/
struct kfd_ioctl_runtime_enable_args {
__u64 r_debug;
__u32 mode_mask;
__u32 capabilities_mask;
};
/* Queue information */
struct kfd_queue_snapshot_entry {
__u64 exception_status;
__u64 ring_base_address;
__u64 write_pointer_address;
__u64 read_pointer_address;
__u64 ctx_save_restore_address;
__u32 queue_id;
__u32 gpu_id;
__u32 ring_size;
__u32 queue_type;
__u32 ctx_save_restore_area_size;
__u32 reserved;
};
/* Queue status return for suspend/resume */
#define KFD_DBG_QUEUE_ERROR_BIT 30
#define KFD_DBG_QUEUE_INVALID_BIT 31
#define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT)
#define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT)
/* Context save area header information */
struct kfd_context_save_area_header {
struct {
__u32 control_stack_offset;
__u32 control_stack_size;
__u32 wave_state_offset;
__u32 wave_state_size;
} wave_state;
__u32 debug_offset;
__u32 debug_size;
__u64 err_payload_addr;
__u32 err_event_id;
__u32 reserved1;
};
/*
* Debug operations
*
* For specifics on usage and return values, see documentation per operation
* below. Otherwise, generic error returns apply:
* - ESRCH if the process to debug does not exist.
*
* - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation
* KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior.
* Also returns this error if GPU hardware scheduling is not supported.
*
* - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not
* PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow
* clean up of debug mode as long as process is debug enabled.
*
* - EACCES if any DBG_HW_OP (debug hardware operation) is requested when
* AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior.
*
* - ENODEV if any GPU does not support debugging on a DBG_HW_OP call.
*
* - Other errors may be returned when a DBG_HW_OP occurs while the GPU
* is in a fatal state.
*
*/
enum kfd_dbg_trap_operations {
KFD_IOC_DBG_TRAP_ENABLE = 0,
KFD_IOC_DBG_TRAP_DISABLE = 1,
KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */
KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */
KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */
KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */
KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */
KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */
KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
};
/**
* kfd_ioctl_dbg_trap_enable_args
*
* Arguments for KFD_IOC_DBG_TRAP_ENABLE.
*
* Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in
* kfd_ioctl_dbg_trap_args to disable debug session.
*
* @exception_mask (IN) - exceptions to raise to the debugger
* @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info)
* @rinfo_size (IN/OUT) - size of runtime info buffer in bytes
* @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised
* exceptions set in exception_mask.
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on SUCCESS.
* Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable.
* Size of kfd_runtime saved by the KFD returned to @rinfo_size.
* - EBADF if KFD cannot get a reference to dbg_fd.
* - EFAULT if KFD cannot copy runtime info to rinfo_ptr.
* - EINVAL if target process is already debug enabled.
*
*/
struct kfd_ioctl_dbg_trap_enable_args {
__u64 exception_mask;
__u64 rinfo_ptr;
__u32 rinfo_size;
__u32 dbg_fd;
};
/**
* kfd_ioctl_dbg_trap_send_runtime_event_args
*
*
* Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT.
* Raises exceptions to runtime.
*
* @exception_mask (IN) - exceptions to raise to runtime
* @gpu_id (IN) - target device id
* @queue_id (IN) - target queue id
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on SUCCESS.
* - ENODEV if gpu_id not found.
* If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending
* AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args.
* All other exceptions are raised to runtime through err_payload_addr.
* See kfd_context_save_area_header.
*/
struct kfd_ioctl_dbg_trap_send_runtime_event_args {
__u64 exception_mask;
__u32 gpu_id;
__u32 queue_id;
};
/**
* kfd_ioctl_dbg_trap_set_exceptions_enabled_args
*
* Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED
* Set new exceptions to be raised to the debugger.
*
* @exception_mask (IN) - new exceptions to raise the debugger
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on SUCCESS.
*/
struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
__u64 exception_mask;
};
/**
* kfd_ioctl_dbg_trap_set_wave_launch_override_args
*
* Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE
* Enable HW exceptions to raise trap.
*
* @override_mode (IN) - see kfd_dbg_trap_override_mode
* @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask.
* IN is the override modes requested to be enabled.
* OUT is referenced in Return below.
* @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask.
* IN is the override modes requested for support check.
* OUT is referenced in Return below.
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on SUCCESS.
* Previous enablement is returned in @enable_mask.
* Actual override support is returned in @support_request_mask.
* - EINVAL if override mode is not supported.
* - EACCES if trap support requested is not actually supported.
* i.e. enable_mask (IN) is not a subset of support_request_mask (OUT).
* Otherwise it is considered a generic error (see kfd_dbg_trap_operations).
*/
struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
__u32 override_mode;
__u32 enable_mask;
__u32 support_request_mask;
__u32 pad;
};
/**
* kfd_ioctl_dbg_trap_set_wave_launch_mode_args
*
* Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE
* Set wave launch mode.
*
* @mode (IN) - see kfd_dbg_trap_wave_launch_mode
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on SUCCESS.
*/
struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
__u32 launch_mode;
__u32 pad;
};
/**
* kfd_ioctl_dbg_trap_suspend_queues_ags
*
* Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES
* Suspend queues.
*
* @exception_mask (IN) - raised exceptions to clear
* @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
* to suspend
* @num_queues (IN) - number of queues to suspend in @queue_array_ptr
* @grace_period (IN) - wave time allowance before preemption
* per 1K GPU clock cycle unit
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Destruction of a suspended queue is blocked until the queue is
* resumed. This allows the debugger to access queue information and
* the its context save area without running into a race condition on
* queue destruction.
* Automatically copies per queue context save area header information
* into the save area base
* (see kfd_queue_snapshot_entry and kfd_context_save_area_header).
*
* Return - Number of queues suspended on SUCCESS.
* . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked
* for each queue id in @queue_array_ptr array reports unsuccessful
* suspend reason.
* KFD_DBG_QUEUE_ERROR_MASK = HW failure.
* KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or
* is being destroyed.
*/
struct kfd_ioctl_dbg_trap_suspend_queues_args {
__u64 exception_mask;
__u64 queue_array_ptr;
__u32 num_queues;
__u32 grace_period;
};
/**
* kfd_ioctl_dbg_trap_resume_queues_args
*
* Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES
* Resume queues.
*
* @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
* to resume
* @num_queues (IN) - number of queues to resume in @queue_array_ptr
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - Number of queues resumed on SUCCESS.
* KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask
* for each queue id in @queue_array_ptr array reports unsuccessful
* resume reason.
* KFD_DBG_QUEUE_ERROR_MASK = HW failure.
* KFD_DBG_QUEUE_INVALID_MASK = queue does not exist.
*/
struct kfd_ioctl_dbg_trap_resume_queues_args {
__u64 queue_array_ptr;
__u32 num_queues;
__u32 pad;
};
/**
* kfd_ioctl_dbg_trap_set_node_address_watch_args
*
* Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH
* Sets address watch for device.
*
* @address (IN) - watch address to set
* @mode (IN) - see kfd_dbg_trap_address_watch_mode
* @mask (IN) - watch address mask
* @gpu_id (IN) - target gpu to set watch point
* @id (OUT) - watch id allocated
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on SUCCESS.
* Allocated watch ID returned to @id.
* - ENODEV if gpu_id not found.
* - ENOMEM if watch IDs can be allocated
*/
struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
__u64 address;
__u32 mode;
__u32 mask;
__u32 gpu_id;
__u32 id;
};
/**
* kfd_ioctl_dbg_trap_clear_node_address_watch_args
*
* Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH
* Clear address watch for device.
*
* @gpu_id (IN) - target device to clear watch point
* @id (IN) - allocated watch id to clear
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on SUCCESS.
* - ENODEV if gpu_id not found.
* - EINVAL if watch ID has not been allocated.
*/
struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
__u32 gpu_id;
__u32 id;
};
/**
* kfd_ioctl_dbg_trap_set_flags_args
*
* Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS
* Sets flags for wave behaviour.
*
* @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on SUCCESS.
* - EACCESS if any debug device does not allow flag options.
*/
struct kfd_ioctl_dbg_trap_set_flags_args {
__u32 flags;
__u32 pad;
};
/**
* kfd_ioctl_dbg_trap_query_debug_event_args
*
* Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT
*
* Find one or more raised exceptions. This function can return multiple
* exceptions from a single queue or a single device with one call. To find
* all raised exceptions, this function must be called repeatedly until it
* returns -EAGAIN. Returned exceptions can optionally be cleared by
* setting the corresponding bit in the @exception_mask input parameter.
* However, clearing an exception prevents retrieving further information
* about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO.
*
* @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT)
* @gpu_id (OUT) - gpu id of exceptions raised
* @queue_id (OUT) - queue id of exceptions raised
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on raised exception found
* Raised exceptions found are returned in @exception mask
* with reported source id returned in @gpu_id or @queue_id.
* - EAGAIN if no raised exception has been found
*/
struct kfd_ioctl_dbg_trap_query_debug_event_args {
__u64 exception_mask;
__u32 gpu_id;
__u32 queue_id;
};
/**
* kfd_ioctl_dbg_trap_query_exception_info_args
*
* Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO
* Get additional info on raised exception.
*
* @info_ptr (IN) - pointer to exception info buffer to copy to
* @info_size (IN/OUT) - exception info buffer size (bytes)
* @source_id (IN) - target gpu or queue id
* @exception_code (IN) - target exception
* @clear_exception (IN) - clear raised @exception_code exception
* (0 = false, 1 = true)
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on SUCCESS.
* If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT)
* bytes of memory exception data to @info_ptr.
* If @exception_code is EC_PROCESS_RUNTIME, copy saved
* kfd_runtime_info to @info_ptr.
* Actual required @info_ptr size (bytes) is returned in @info_size.
*/
struct kfd_ioctl_dbg_trap_query_exception_info_args {
__u64 info_ptr;
__u32 info_size;
__u32 source_id;
__u32 exception_code;
__u32 clear_exception;
};
/**
* kfd_ioctl_dbg_trap_get_queue_snapshot_args
*
* Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT
* Get queue information.
*
* @exception_mask (IN) - exceptions raised to clear
* @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry)
* @num_queues (IN/OUT) - number of queue snapshot entries
* The debugger specifies the size of the array allocated in @num_queues.
* KFD returns the number of queues that actually existed. If this is
* larger than the size specified by the debugger, KFD will not overflow
* the array allocated by the debugger.
*
* @entry_size (IN/OUT) - size per entry in bytes
* The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in
* @entry_size. KFD returns the number of bytes actually populated per
* entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine,
* which fields in struct kfd_queue_snapshot_entry are valid. This allows
* growing the ABI in a backwards compatible manner.
* Note that entry_size(IN) should still be used to stride the snapshot buffer in the
* event that it's larger than actual kfd_queue_snapshot_entry.
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on SUCCESS.
* Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN)
* into @snapshot_buf_ptr if @num_queues(IN) > 0.
* Otherwise return @num_queues(OUT) queue snapshot entries that exist.
*/
struct kfd_ioctl_dbg_trap_queue_snapshot_args {
__u64 exception_mask;
__u64 snapshot_buf_ptr;
__u32 num_queues;
__u32 entry_size;
};
/**
* kfd_ioctl_dbg_trap_get_device_snapshot_args
*
* Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT
* Get device information.
*
* @exception_mask (IN) - exceptions raised to clear
* @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry)
* @num_devices (IN/OUT) - number of debug devices to snapshot
* The debugger specifies the size of the array allocated in @num_devices.
* KFD returns the number of devices that actually existed. If this is
* larger than the size specified by the debugger, KFD will not overflow
* the array allocated by the debugger.
*
* @entry_size (IN/OUT) - size per entry in bytes
* The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in
* @entry_size. KFD returns the number of bytes actually populated. The
* debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields
* in struct kfd_dbg_device_info_entry are valid. This allows growing the
* ABI in a backwards compatible manner.
* Note that entry_size(IN) should still be used to stride the snapshot buffer in the
* event that it's larger than actual kfd_dbg_device_info_entry.
*
* Generic errors apply (see kfd_dbg_trap_operations).
* Return - 0 on SUCCESS.
* Copies @num_devices(IN) device snapshot entries of size @entry_size(IN)
* into @snapshot_buf_ptr if @num_devices(IN) > 0.
* Otherwise return @num_devices(OUT) queue snapshot entries that exist.
*/
struct kfd_ioctl_dbg_trap_device_snapshot_args {
__u64 exception_mask;
__u64 snapshot_buf_ptr;
__u32 num_devices;
__u32 entry_size;
};
/**
* kfd_ioctl_dbg_trap_args
*
* Arguments to debug target process.
*
* @pid - target process to debug
* @op - debug operation (see kfd_dbg_trap_operations)
*
* @op determines which union struct args to use.
* Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct.
*/
struct kfd_ioctl_dbg_trap_args {
__u32 pid;
__u32 op;
union {
struct kfd_ioctl_dbg_trap_enable_args enable;
struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
};
};
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@ -877,7 +1569,16 @@ struct kfd_ioctl_set_xnack_mode_args {
#define AMDKFD_IOC_AVAILABLE_MEMORY \
AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
#define AMDKFD_IOC_EXPORT_DMABUF \
AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
#define AMDKFD_IOC_RUNTIME_ENABLE \
AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
#define AMDKFD_IOC_DBG_TRAP \
AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
#define AMDKFD_COMMAND_START 0x01
#define AMDKFD_COMMAND_END 0x24
#define AMDKFD_COMMAND_END 0x27
#endif

View File

@ -43,6 +43,11 @@
#define HSA_CAP_DOORBELL_TYPE_2_0 0x2
#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
#define HSA_CAP_TRAP_DEBUG_SUPPORT 0x00008000
#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED 0x00010000
#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED 0x00020000
#define HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED 0x00040000
/* Old buggy user mode depends on this being 0 */
#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000
@ -53,8 +58,18 @@
#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000
#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000
#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000
#define HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED 0x20000000
#define HSA_CAP_RESERVED 0xe00f8000
/* debug_prop bits in node properties */
#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK 0x0000000f
#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_SHIFT 0
#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_MASK 0x000003f0
#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT 4
#define HSA_DBG_DISPATCH_INFO_ALWAYS_VALID 0x00000400
#define HSA_DBG_WATCHPOINTS_EXCLUSIVE 0x00000800
#define HSA_DBG_RESERVED 0xfffffffffffff000ull
/* Heap types in memory properties */
#define HSA_MEM_HEAP_TYPE_SYSTEM 0
#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More