diff --git a/MAINTAINERS b/MAINTAINERS index 294d2ce29b73..940194dea746 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10992,7 +10992,6 @@ F: drivers/iommu/ F: include/linux/iommu.h F: include/linux/iova.h F: include/linux/of_iommu.h -F: include/uapi/linux/iommu.h IOMMUFD M: Jason Gunthorpe diff --git a/arch/Kconfig b/arch/Kconfig index 09603e0bc2cc..da16dd35fd1a 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -309,6 +309,11 @@ config ARCH_HAS_DMA_CLEAR_UNCACHED config ARCH_HAS_CPU_FINALIZE_INIT bool +# The architecture has a per-task state that includes the mm's PASID +config ARCH_HAS_CPU_PASID + bool + select IOMMU_MM_DATA + # Select if arch init_task must go in the __init_task_data section config ARCH_TASK_STRUCT_ON_STACK bool diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 23fc49f23d25..5dc4416b75d3 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -1073,7 +1073,6 @@ CONFIG_QCOM_IPCC=y CONFIG_OMAP_IOMMU=y CONFIG_OMAP_IOMMU_DEBUG=y CONFIG_ROCKCHIP_IOMMU=y -CONFIG_TEGRA_IOMMU_GART=y CONFIG_TEGRA_IOMMU_SMMU=y CONFIG_EXYNOS_IOMMU=y CONFIG_QCOM_IOMMU=y diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index 613f07b8ce15..8635b7216bfc 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig @@ -292,7 +292,6 @@ CONFIG_CHROME_PLATFORMS=y CONFIG_CROS_EC=y CONFIG_CROS_EC_I2C=m CONFIG_CROS_EC_SPI=m -CONFIG_TEGRA_IOMMU_GART=y CONFIG_TEGRA_IOMMU_SMMU=y CONFIG_ARCH_TEGRA_2x_SOC=y CONFIG_ARCH_TEGRA_3x_SOC=y diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index efaca0c6eff9..e70924500022 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -1280,34 +1280,64 @@ struct iommu_table_group_ops spapr_tce_table_group_ops = { /* * A simple iommu_ops to allow less cruft in generic VFIO code. */ -static int spapr_tce_blocking_iommu_attach_dev(struct iommu_domain *dom, - struct device *dev) +static int +spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain, + struct device *dev) { + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); struct iommu_group *grp = iommu_group_get(dev); struct iommu_table_group *table_group; - int ret = -EINVAL; - if (!grp) - return -ENODEV; + /* At first attach the ownership is already set */ + if (!domain) + return 0; table_group = iommu_group_get_iommudata(grp); - ret = table_group->ops->take_ownership(table_group); + /* + * The domain being set to PLATFORM from earlier + * BLOCKED. The table_group ownership has to be released. + */ + table_group->ops->release_ownership(table_group); iommu_group_put(grp); - return ret; + return 0; } -static void spapr_tce_blocking_iommu_set_platform_dma(struct device *dev) +static const struct iommu_domain_ops spapr_tce_platform_domain_ops = { + .attach_dev = spapr_tce_platform_iommu_attach_dev, +}; + +static struct iommu_domain spapr_tce_platform_domain = { + .type = IOMMU_DOMAIN_PLATFORM, + .ops = &spapr_tce_platform_domain_ops, +}; + +static int +spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain, + struct device *dev) { struct iommu_group *grp = iommu_group_get(dev); struct iommu_table_group *table_group; + int ret = -EINVAL; + /* + * FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain + * also sets the dma_api ops + */ table_group = iommu_group_get_iommudata(grp); - table_group->ops->release_ownership(table_group); + ret = table_group->ops->take_ownership(table_group); + iommu_group_put(grp); + + return ret; } -static const struct iommu_domain_ops spapr_tce_blocking_domain_ops = { - .attach_dev = spapr_tce_blocking_iommu_attach_dev, +static const struct iommu_domain_ops spapr_tce_blocked_domain_ops = { + .attach_dev = spapr_tce_blocked_iommu_attach_dev, +}; + +static struct iommu_domain spapr_tce_blocked_domain = { + .type = IOMMU_DOMAIN_BLOCKED, + .ops = &spapr_tce_blocked_domain_ops, }; static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap) @@ -1322,22 +1352,6 @@ static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap) return false; } -static struct iommu_domain *spapr_tce_iommu_domain_alloc(unsigned int type) -{ - struct iommu_domain *dom; - - if (type != IOMMU_DOMAIN_BLOCKED) - return NULL; - - dom = kzalloc(sizeof(*dom), GFP_KERNEL); - if (!dom) - return NULL; - - dom->ops = &spapr_tce_blocking_domain_ops; - - return dom; -} - static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev) { struct pci_dev *pdev; @@ -1371,12 +1385,12 @@ static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev) } static const struct iommu_ops spapr_tce_iommu_ops = { + .default_domain = &spapr_tce_platform_domain, + .blocked_domain = &spapr_tce_blocked_domain, .capable = spapr_tce_iommu_capable, - .domain_alloc = spapr_tce_iommu_domain_alloc, .probe_device = spapr_tce_iommu_probe_device, .release_device = spapr_tce_iommu_release_device, .device_group = spapr_tce_iommu_device_group, - .set_platform_dma_ops = spapr_tce_blocking_iommu_set_platform_dma, }; static struct attribute *spapr_tce_iommu_attrs[] = { diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2b5b7d9a24e9..3aeb7e76b648 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -73,6 +73,7 @@ config X86 select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION select ARCH_HAS_CPU_FINALIZE_INIT + select ARCH_HAS_CPU_PASID if IOMMU_SVA select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index a52db362a65d..555a7b640437 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -668,7 +668,7 @@ static bool fixup_iopl_exception(struct pt_regs *regs) */ static bool try_fixup_enqcmd_gp(void) { -#ifdef CONFIG_IOMMU_SVA +#ifdef CONFIG_ARCH_HAS_CPU_PASID u32 pasid; /* @@ -694,7 +694,7 @@ static bool try_fixup_enqcmd_gp(void) if (!mm_valid_pasid(current->mm)) return false; - pasid = current->mm->pasid; + pasid = mm_get_enqcmd_pasid(current->mm); /* * Did this thread already have its PASID activated? diff --git a/config-6.6.63 b/config-6.6.63 new file mode 100644 index 000000000000..d4d63c9ae003 --- /dev/null +++ b/config-6.6.63 @@ -0,0 +1,10949 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 6.6.63 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (Debian 12.2.0-14+deb12u1) 12.2.0" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=120200 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=24000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=24000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=124 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="6.6.y.bsk.z-64k-arm64" +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_IRQ_IPI=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +# CONFIG_NO_HZ is not set +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +CONFIG_BPF_JIT_DEFAULT_ON=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_BUILD=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y +# CONFIG_SCHED_CORE is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_SCHED_THERMAL_PRESSURE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +# CONFIG_PSI_DEFAULT_DISABLED is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +# CONFIG_IKCONFIG is not set +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC10_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_BGD_RECLAIM=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +# CONFIG_SYSFS_SYSCALL is not set +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +# CONFIG_DEBUG_RSEQ is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +# CONFIG_KEXEC_SIG is not set +CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_ARM64=y +CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=16 +CONFIG_ARM64_CONT_PTE_SHIFT=5 +CONFIG_ARM64_CONT_PMD_SHIFT=5 +CONFIG_ARCH_MMAP_RND_BITS_MIN=14 +CONFIG_ARCH_MMAP_RND_BITS_MAX=29 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=7 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +CONFIG_ARCH_SUNXI=y +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_APPLE is not set +# CONFIG_ARCH_BCM is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BITMAIN is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_SPARX5 is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_KEEMBAY is not set +# CONFIG_ARCH_MEDIATEK is not set +CONFIG_ARCH_MESON=y +CONFIG_ARCH_MVEBU=y +# CONFIG_ARCH_NXP is not set +# CONFIG_ARCH_MA35 is not set +# CONFIG_ARCH_NPCM is not set +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_RENESAS is not set +CONFIG_ARCH_ROCKCHIP=y +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_INTEL_SOCFPGA is not set +# CONFIG_ARCH_STM32 is not set +CONFIG_ARCH_SYNQUACER=y +CONFIG_ARCH_TEGRA=y +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +# CONFIG_ARCH_VISCONTI is not set +CONFIG_ARCH_XGENE=y +CONFIG_ARCH_ZYNQMP=y +# end of Platform selection + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_1742098=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1418040=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y +CONFIG_ARM64_ERRATUM_1165522=y +CONFIG_ARM64_ERRATUM_1319367=y +CONFIG_ARM64_ERRATUM_1530923=y +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_ARM64_ERRATUM_2441007=y +CONFIG_ARM64_ERRATUM_1286807=y +CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_ARM64_ERRATUM_1542419=y +CONFIG_ARM64_ERRATUM_1508412=y +CONFIG_ARM64_ERRATUM_2051678=y +CONFIG_ARM64_ERRATUM_2077057=y +CONFIG_ARM64_ERRATUM_2658417=y +CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y +CONFIG_ARM64_ERRATUM_2054223=y +CONFIG_ARM64_ERRATUM_2067961=y +CONFIG_ARM64_ERRATUM_2441009=y +CONFIG_ARM64_ERRATUM_2457168=y +CONFIG_ARM64_ERRATUM_2645198=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y +CONFIG_ARM64_ERRATUM_2966298=y +CONFIG_ARM64_ERRATUM_3117295=y +CONFIG_ARM64_ERRATUM_3194386=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_CAVIUM_TX2_ERRATUM_219=y +CONFIG_FUJITSU_ERRATUM_010001=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y +CONFIG_ROCKCHIP_ERRATUM_3588001=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +# end of ARM errata workarounds via the alternatives framework + +# CONFIG_ARM64_4K_PAGES is not set +# CONFIG_ARM64_16K_PAGES is not set +CONFIG_ARM64_64K_PAGES=y +# CONFIG_ARM64_VA_BITS_42 is not set +CONFIG_ARM64_VA_BITS_48=y +# CONFIG_ARM64_VA_BITS_52 is not set +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +# CONFIG_ARM64_PA_BITS_52 is not set +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_SCHED_MC=y +# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=512 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_CC_HAVE_SHADOW_CALL_STACK=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_TRANS_TABLE=y +CONFIG_XEN_DOM0=y +CONFIG_XEN=y +CONFIG_ARCH_FORCE_MAX_ORDER=13 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y +CONFIG_RODATA_FULL_DEFAULT_ENABLED=y +# CONFIG_ARM64_SW_TTBR0_PAN is not set +CONFIG_ARM64_TAGGED_ADDR_ABI=y +CONFIG_COMPAT=y +CONFIG_KUSER_HELPERS=y +# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_AS_HAS_LSE_ATOMICS=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_USE_LSE_ATOMICS=y +# end of ARMv8.1 architectural features + +# +# ARMv8.2 architectural features +# +CONFIG_AS_HAS_ARMV8_2=y +CONFIG_AS_HAS_SHA3=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_CNP=y +# end of ARMv8.2 architectural features + +# +# ARMv8.3 architectural features +# +CONFIG_ARM64_PTR_AUTH=y +CONFIG_ARM64_PTR_AUTH_KERNEL=y +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y +CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y +CONFIG_AS_HAS_ARMV8_3=y +CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y +CONFIG_AS_HAS_LDAPR=y +# end of ARMv8.3 architectural features + +# +# ARMv8.4 architectural features +# +CONFIG_ARM64_AMU_EXTN=y +CONFIG_AS_HAS_ARMV8_4=y +CONFIG_ARM64_TLB_RANGE=y +# end of ARMv8.4 architectural features + +# +# ARMv8.5 architectural features +# +CONFIG_AS_HAS_ARMV8_5=y +CONFIG_ARM64_BTI=y +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y +CONFIG_ARM64_E0PD=y +CONFIG_ARM64_AS_HAS_MTE=y +CONFIG_ARM64_MTE=y +# end of ARMv8.5 architectural features + +# +# ARMv8.7 architectural features +# +CONFIG_ARM64_EPAN=y +# end of ARMv8.7 architectural features + +CONFIG_ARM64_SVE=y +CONFIG_ARM64_PSEUDO_NMI=y +# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y +CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y +CONFIG_STACKPROTECTOR_PER_TASK=y +CONFIG_ARM64_CONTPTE=y +CONFIG_HAVE_LIVEPATCH=y +CONFIG_LIVEPATCH=y +# end of Kernel Features + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="" +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +# end of Boot options + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_DPM_WATCHDOG is not set +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +CONFIG_ENERGY_MODEL=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +# end of Power management options + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set + +# +# ARM CPU Idle Drivers +# +# CONFIG_ARM_PSCI_CPUIDLE is not set +# end of ARM CPU Idle Drivers +# end of CPU Idle + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_CPUFREQ_DT=m +CONFIG_CPUFREQ_DT_PLATDEV=y +CONFIG_ACPI_CPPC_CPUFREQ=m +CONFIG_ACPI_CPPC_CPUFREQ_FIE=y +CONFIG_ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM=m +CONFIG_ARM_ARMADA_37XX_CPUFREQ=m +# CONFIG_ARM_ARMADA_8K_CPUFREQ is not set +# CONFIG_ARM_QCOM_CPUFREQ_HW is not set +CONFIG_ARM_TEGRA20_CPUFREQ=m +CONFIG_ARM_TEGRA124_CPUFREQ=y +# CONFIG_ARM_TEGRA186_CPUFREQ is not set +CONFIG_ARM_TEGRA194_CPUFREQ=y +# end of CPU Frequency scaling +# end of CPU Power Management + +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +CONFIG_ACPI_THERMAL_LIB=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=m +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +# CONFIG_ACPI_HOTPLUG_MEMORY is not set +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_HMAT is not set +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +CONFIG_ACPI_WATCHDOG=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +# CONFIG_ACPI_AGDI is not set +CONFIG_ACPI_APMT=y +CONFIG_ACPI_PPTT=y +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_ACPI_PRMT=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_KVM_GENERIC_MMU_NOTIFIER=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +# CONFIG_NVHE_EL2_DEBUG is not set +CONFIG_CPU_MITIGATIONS=y + +# +# General architecture-dependent options +# +CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_KEEPINITRD=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y +# CONFIG_SHADOW_CALL_STACK is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_COMPILER_H=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_RELR=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_8B=y +CONFIG_FUNCTION_ALIGNMENT=8 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_MQ=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +CONFIG_BLK_SED_OPAL=y +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=m +CONFIG_IOSCHED_BFQ=m +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y +CONFIG_ARCH_HAVE_ELF_PROT=y +CONFIG_ARCH_USE_GNU_PROPERTY=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=y +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_SHRINKER_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD_DEPRECATED is not set +CONFIG_ZSMALLOC=m +# CONFIG_ZSMALLOC_STAT is not set +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLUB_TINY is not set +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +CONFIG_SLAB_FREELIST_HARDENED=y +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PCP_BATCH_SCALE_MAX=5 +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +# CONFIG_CMA is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ARCH_HAS_ZONE_DMA_SET=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y +CONFIG_DEVICE_PRIVATE=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_USES_PG_ARCH_X=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +# CONFIG_LRU_GEN is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +# CONFIG_DAMON is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +# CONFIG_XDP_SOCKETS_DIAG is not set +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_SEG6_BPF=y +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +# CONFIG_NETLABEL is not set +# CONFIG_MPTCP is not set +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +# CONFIG_NFT_XFRM is not set +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=m +CONFIG_NETFILTER_XTABLES_COMPAT=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# +# CONFIG_IP_DCCP_CCID2_DEBUG is not set +CONFIG_IP_DCCP_CCID3=y +# CONFIG_IP_DCCP_CCID3_DEBUG is not set +CONFIG_IP_DCCP_TFRC_LIB=y +# end of DCCP CCIDs Configuration + +# +# DCCP Kernel Hacking +# +# CONFIG_IP_DCCP_DEBUG is not set +# end of DCCP Kernel Hacking + +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +# CONFIG_RDS_DEBUG is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +CONFIG_NET_DSA=m +# CONFIG_NET_DSA_TAG_NONE is not set +# CONFIG_NET_DSA_TAG_AR9331 is not set +# CONFIG_NET_DSA_TAG_BRCM is not set +# CONFIG_NET_DSA_TAG_BRCM_LEGACY is not set +# CONFIG_NET_DSA_TAG_BRCM_PREPEND is not set +# CONFIG_NET_DSA_TAG_HELLCREEK is not set +# CONFIG_NET_DSA_TAG_GSWIP is not set +CONFIG_NET_DSA_TAG_DSA_COMMON=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +# CONFIG_NET_DSA_TAG_MTK is not set +# CONFIG_NET_DSA_TAG_KSZ is not set +# CONFIG_NET_DSA_TAG_OCELOT is not set +# CONFIG_NET_DSA_TAG_OCELOT_8021Q is not set +# CONFIG_NET_DSA_TAG_QCA is not set +# CONFIG_NET_DSA_TAG_RTL4_A is not set +# CONFIG_NET_DSA_TAG_RTL8_4 is not set +# CONFIG_NET_DSA_TAG_RZN1_A5PSW is not set +# CONFIG_NET_DSA_TAG_LAN9303 is not set +# CONFIG_NET_DSA_TAG_SJA1105 is not set +CONFIG_NET_DSA_TAG_TRAILER=m +# CONFIG_NET_DSA_TAG_XRS700X is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +CONFIG_6LOWPAN_NHC=m +CONFIG_6LOWPAN_NHC_DEST=m +CONFIG_6LOWPAN_NHC_FRAGMENT=m +CONFIG_6LOWPAN_NHC_HOP=m +CONFIG_6LOWPAN_NHC_IPV6=m +CONFIG_6LOWPAN_NHC_MOBILITY=m +CONFIG_6LOWPAN_NHC_ROUTING=m +CONFIG_6LOWPAN_NHC_UDP=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_CANID=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +CONFIG_BATMAN_ADV=m +# CONFIG_BATMAN_ADV_BATMAN_V is not set +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y +# CONFIG_BATMAN_ADV_DEBUG is not set +# CONFIG_BATMAN_ADV_TRACING is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +CONFIG_HAMRADIO=y + +# +# Packet Radio protocols +# +CONFIG_AX25=m +CONFIG_AX25_DAMA_SLAVE=y +CONFIG_NETROM=m +CONFIG_ROSE=m + +# +# AX.25 network device drivers +# +CONFIG_MKISS=m +CONFIG_6PACK=m +CONFIG_BPQETHER=m +CONFIG_BAYCOM_SER_FDX=m +CONFIG_BAYCOM_SER_HDX=m +CONFIG_BAYCOM_PAR=m +CONFIG_YAM=m +# end of AX.25 network device drivers + +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set +# CONFIG_CAN_ISOTP is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +CONFIG_BT_6LOWPAN=m +CONFIG_BT_LEDS=y +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_QCA=m +CONFIG_BT_MTK=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y +CONFIG_BT_HCIBTUSB_BCM=y +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_SERDEV=y +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_NOKIA=m +# CONFIG_BT_HCIUART_BCSP is not set +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIUART_LL=y +CONFIG_BT_HCIUART_3WIRE=y +CONFIG_BT_HCIUART_INTEL=y +CONFIG_BT_HCIUART_RTL=y +CONFIG_BT_HCIUART_QCA=y +CONFIG_BT_HCIUART_AG6XX=y +CONFIG_BT_HCIUART_MRVL=y +# CONFIG_BT_HCIBCM203X is not set +# CONFIG_BT_HCIBCM4377 is not set +# CONFIG_BT_HCIBPA10X is not set +# CONFIG_BT_HCIBFUSB is not set +# CONFIG_BT_HCIVHCI is not set +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +CONFIG_BT_MTKUART=m +CONFIG_BT_QCOMSMD=m +CONFIG_BT_HCIRSI=m +# CONFIG_BT_VIRTIO is not set +# CONFIG_BT_NXPUART is not set +# end of Bluetooth device drivers + +CONFIG_AF_RXRPC=m +CONFIG_AF_RXRPC_IPV6=y +# CONFIG_AF_RXRPC_INJECT_LOSS is not set +# CONFIG_AF_RXRPC_INJECT_RX_DELAY is not set +# CONFIG_AF_RXRPC_DEBUG is not set +CONFIG_RXKAD=y +# CONFIG_RXPERF is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_SPY=y +CONFIG_WEXT_PRIV=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +CONFIG_CFG80211_WEXT=y +CONFIG_CFG80211_WEXT_EXPORT=y +CONFIG_LIB80211=m +CONFIG_LIB80211_CRYPT_WEP=m +CONFIG_LIB80211_CRYPT_CCMP=m +CONFIG_LIB80211_CRYPT_TKIP=m +# CONFIG_LIB80211_DEBUG is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +CONFIG_MAC80211_MESH=y +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +CONFIG_NET_9P=m +CONFIG_NET_9P_FD=m +CONFIG_NET_9P_VIRTIO=m +CONFIG_NET_9P_XEN=m +CONFIG_NET_9P_RDMA=m +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set +CONFIG_NFC=m +CONFIG_NFC_DIGITAL=m +# CONFIG_NFC_NCI is not set +# CONFIG_NFC_HCI is not set + +# +# Near Field Communication (NFC) devices +# +# CONFIG_NFC_TRF7970A is not set +CONFIG_NFC_SIM=m +CONFIG_NFC_PORT100=m +CONFIG_NFC_PN533=m +CONFIG_NFC_PN533_USB=m +# CONFIG_NFC_PN533_I2C is not set +# CONFIG_NFC_PN532_UART is not set +# CONFIG_NFC_ST95HF is not set +# end of Near Field Communication (NFC) devices + +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_NET_SELFTESTS=m +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_PAGE_POOL_STATS=y +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y +CONFIG_TEGRA_AHB=y +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +# CONFIG_PCIE_ECRC is not set +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +CONFIG_PCIE_PTM=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +CONFIG_PCI_REALLOC_ENABLE_AUTO=y +CONFIG_PCI_STUB=m +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_BRIDGE_EMUL=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_P2PDMA=y +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +# CONFIG_PCIE_BUS_TUNE_OFF is not set +CONFIG_PCIE_BUS_DEFAULT=y +# CONFIG_PCIE_BUS_SAFE is not set +# CONFIG_PCIE_BUS_PERFORMANCE is not set +# CONFIG_PCIE_BUS_PEER2PEER is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +CONFIG_PCI_AARDVARK=y +# CONFIG_PCIE_ALTERA is not set +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_HISI_ERR is not set +# CONFIG_PCIE_MICROCHIP_HOST is not set +CONFIG_PCI_HYPERV_INTERFACE=m +CONFIG_PCI_TEGRA=y +CONFIG_PCIE_ROCKCHIP=y +CONFIG_PCIE_ROCKCHIP_HOST=y +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +# CONFIG_PCIE_XILINX is not set +CONFIG_PCIE_XILINX_NWL=y +# CONFIG_PCIE_XILINX_CPM is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_AL is not set +# CONFIG_PCI_MESON is not set +CONFIG_PCI_HISI=y +CONFIG_PCIE_KIRIN=y +# CONFIG_PCIE_HISI_STB is not set +CONFIG_PCIE_ARMADA_8K=y +# CONFIG_PCIE_TEGRA194_HOST is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +CONFIG_PCIE_QCOM=y +# CONFIG_PCIE_ROCKCHIP_DW_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# CONFIG_PCIE_MOBIVEIL_PLAT is not set +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +# CONFIG_DEVTMPFS_MOUNT is not set +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_SPMI=y +CONFIG_REGMAP_MMIO=y +CONFIG_REGMAP_IRQ=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_GENERIC_ARCH_TOPOLOGY=y +CONFIG_GENERIC_ARCH_NUMA=y +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +CONFIG_ARM_CCI=y +CONFIG_ARM_CCI400_COMMON=y +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_MOXTET is not set +CONFIG_HISILICON_LPC=y +CONFIG_QCOM_EBI2=y +# CONFIG_QCOM_SSC_BLOCK_BUS is not set +CONFIG_SUN50I_DE2_BUS=y +CONFIG_SUNXI_RSB=y +CONFIG_TEGRA_ACONNECT=y +# CONFIG_TEGRA_GMI is not set +CONFIG_VEXPRESS_CONFIG=y +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# CONFIG_ARM_SCMI_PROTOCOL is not set +# end of ARM System Control and Management Interface Protocol + +# CONFIG_ARM_SCPI_PROTOCOL is not set +CONFIG_ARM_SDE_INTERFACE=y +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +# CONFIG_ISCSI_IBFT is not set +CONFIG_FW_CFG_SYSFS=m +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_QCOM_SCM=y +# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +CONFIG_TURRIS_MOX_RWTM=m +CONFIG_ARM_FFA_TRANSPORT=y +CONFIG_ARM_FFA_SMCCC=y +CONFIG_GOOGLE_FIRMWARE=y +# CONFIG_GOOGLE_CBMEM is not set +CONFIG_GOOGLE_COREBOOT_TABLE=m +CONFIG_GOOGLE_MEMCONSOLE=m +# CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT is not set +CONFIG_GOOGLE_MEMCONSOLE_COREBOOT=m +# CONFIG_GOOGLE_VPD is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y +# CONFIG_EFI_ZBOOT is not set +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +CONFIG_EFI_BOOTLOADER_CONTROL=m +CONFIG_EFI_CAPSULE_LOADER=m +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_ARM=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +CONFIG_HAVE_ARM_SMCCC=y +CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y +CONFIG_ARM_SMCCC_SOC_ID=y + +# +# Tegra firmware driver +# +CONFIG_TEGRA_IVC=y +CONFIG_TEGRA_BPMP=y +# end of Tegra firmware driver + +# +# Zynq MPSoC Firmware Drivers +# +CONFIG_ZYNQMP_FIRMWARE=y +# CONFIG_ZYNQMP_FIRMWARE_DEBUG is not set +# end of Zynq MPSoC Firmware Drivers +# end of Firmware Drivers + +CONFIG_GNSS=m +CONFIG_GNSS_SERIAL=m +# CONFIG_GNSS_MTK_SERIAL is not set +CONFIG_GNSS_SIRF_SERIAL=m +CONFIG_GNSS_UBX_SERIAL=m +# CONFIG_GNSS_USB is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +CONFIG_MTD_AR7_PARTS=m +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_BLOCK_RO=m + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +CONFIG_RFD_FTL=m +CONFIG_SSFDC=m +# CONFIG_SM_FTL is not set +CONFIG_MTD_OOPS=m +CONFIG_MTD_SWAP=m +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_RAM=m +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +CONFIG_MTD_INTEL_VR_NOR=m +CONFIG_MTD_PLATRAM=m +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +CONFIG_MTD_DATAFLASH=m +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set +# CONFIG_MTD_DATAFLASH_OTP is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +CONFIG_MTD_SST25L=m +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +CONFIG_MTD_ONENAND=m +CONFIG_MTD_ONENAND_VERIFY_WRITE=y +# CONFIG_MTD_ONENAND_GENERIC is not set +# CONFIG_MTD_ONENAND_OTP is not set +CONFIG_MTD_ONENAND_2X_PROGRAM=y +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +CONFIG_MTD_LPDDR=m +CONFIG_MTD_QINFO_PROBE=m +# end of LPDDR & LPDDR2 PCM memory drivers + +CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set +CONFIG_SPI_HISI_SFC=m +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +CONFIG_MTD_UBI_BLOCK=y +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +CONFIG_OF_NUMA=y +CONFIG_PARPORT=m +# CONFIG_PARPORT_PC is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +CONFIG_ZRAM_MEMORY_TRACKING=y +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +# CONFIG_CDROM_PKTCDVD is not set +CONFIG_ATA_OVER_ETH=m +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_XEN_BLKDEV_BACKEND=m +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_UBLK is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +# CONFIG_NVME_TARGET_LOOP is not set +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +# CONFIG_NVME_TARGET_FCLOOP is not set +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +CONFIG_AD525X_DPOT=m +CONFIG_AD525X_DPOT_I2C=m +CONFIG_AD525X_DPOT_SPI=m +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +CONFIG_ICS932S401=m +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HI6421V600_IRQ is not set +# CONFIG_HP_ILO is not set +CONFIG_QCOM_COINCELL=m +# CONFIG_QCOM_FASTRPC is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +CONFIG_HMC6352=m +CONFIG_DS1682=m +# CONFIG_LATTICE_ECP3_CONFIG is not set +CONFIG_SRAM=y +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +CONFIG_C2PORT=m + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_AT25=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +CONFIG_TI_ST=m +# end of Texas Instruments shared transport line discipline + +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +# CONFIG_VMWARE_VMCI is not set +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +# CONFIG_UACCE is not set +CONFIG_PVPANIC=y +CONFIG_PVPANIC_MMIO=m +CONFIG_PVPANIC_PCI=m +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=m +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=m +CONFIG_SCSI=m +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +# CONFIG_SCSI_PROC_FS is not set + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_BLK_DEV_3W_XXXX_RAID=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_3W_9XXX=m +CONFIG_SCSI_3W_SAS=m +CONFIG_SCSI_ACARD=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_AIC7XXX=m +CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 +CONFIG_AIC7XXX_RESET_DELAY_MS=15000 +CONFIG_AIC7XXX_DEBUG_ENABLE=y +CONFIG_AIC7XXX_DEBUG_MASK=0 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y +CONFIG_SCSI_AIC79XX=m +CONFIG_AIC79XX_CMDS_PER_DEVICE=32 +CONFIG_AIC79XX_RESET_DELAY_MS=15000 +CONFIG_AIC79XX_DEBUG_ENABLE=y +CONFIG_AIC79XX_DEBUG_MASK=0 +CONFIG_AIC79XX_REG_PRETTY_PRINT=y +CONFIG_SCSI_AIC94XX=m +# CONFIG_AIC94XX_DEBUG is not set +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +# CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE is not set +CONFIG_SCSI_MVSAS=m +# CONFIG_SCSI_MVSAS_DEBUG is not set +# CONFIG_SCSI_MVSAS_TASKLET is not set +CONFIG_SCSI_MVUMI=m +CONFIG_SCSI_ADVANSYS=m +# CONFIG_SCSI_ARCMSR is not set +CONFIG_SCSI_ESAS2R=m +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_SMARTPQI=m +CONFIG_SCSI_HPTIOP=m +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_XEN_SCSI_FRONTEND=m +CONFIG_HYPERV_STORAGE=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_SCSI_SNIC=m +# CONFIG_SCSI_SNIC_DEBUG_FS is not set +CONFIG_SCSI_DMX3191D=m +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +CONFIG_SCSI_STEX=m +CONFIG_SCSI_SYM53C8XX_2=m +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 +CONFIG_SCSI_SYM53C8XX_MMIO=y +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +CONFIG_SCSI_WD719X=m +# CONFIG_SCSI_DEBUG is not set +CONFIG_SCSI_PMCRAID=m +CONFIG_SCSI_PM8001=m +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_HP_SW=m +CONFIG_SCSI_DH_EMC=m +CONFIG_SCSI_DH_ALUA=m +# end of SCSI device support + +CONFIG_ATA=m +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +CONFIG_SATA_ZPODD=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=3 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +CONFIG_AHCI_CEVA=m +CONFIG_AHCI_MVEBU=m +# CONFIG_AHCI_SUNXI is not set +CONFIG_AHCI_TEGRA=m +CONFIG_AHCI_XGENE=m +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +CONFIG_SATA_ACARD_AHCI=m +CONFIG_SATA_SIL24=m +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +CONFIG_PDC_ADMA=m +CONFIG_SATA_QSTOR=m +CONFIG_SATA_SX4=m +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +CONFIG_SATA_MV=m +CONFIG_SATA_NV=m +CONFIG_SATA_PROMISE=m +CONFIG_SATA_SIL=m +CONFIG_SATA_SIS=m +CONFIG_SATA_SVW=m +CONFIG_SATA_ULI=m +CONFIG_SATA_VIA=m +CONFIG_SATA_VITESSE=m + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +CONFIG_PATA_ARTOP=m +# CONFIG_PATA_ATIIXP is not set +CONFIG_PATA_ATP867X=m +CONFIG_PATA_CMD64X=m +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +CONFIG_PATA_IT8213=m +CONFIG_PATA_IT821X=m +CONFIG_PATA_JMICRON=m +CONFIG_PATA_MARVELL=m +# CONFIG_PATA_NETCELL is not set +CONFIG_PATA_NINJA32=m +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +CONFIG_PATA_RDC=m +CONFIG_PATA_SCH=m +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +CONFIG_PATA_SIS=m +CONFIG_PATA_TOSHIBA=m +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_DM_ZONED=m +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_SBP_TARGET=m +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_FC=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +CONFIG_FUSION_CTL=m +# CONFIG_FUSION_LOGGING is not set + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +CONFIG_FIREWIRE_NOSY=m +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_WIREGUARD is not set +CONFIG_EQUALIZER=m +# CONFIG_NET_FC is not set +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +CONFIG_GTP=m +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NETKIT=y +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +CONFIG_ATM_DRIVERS=y +CONFIG_ATM_DUMMY=m +# CONFIG_ATM_TCP is not set +# CONFIG_ATM_LANAI is not set +# CONFIG_ATM_ENI is not set +CONFIG_ATM_NICSTAR=m +CONFIG_ATM_NICSTAR_USE_SUNI=y +CONFIG_ATM_NICSTAR_USE_IDT77105=y +# CONFIG_ATM_IDT77252 is not set +CONFIG_ATM_IA=m +# CONFIG_ATM_IA_DEBUG is not set +CONFIG_ATM_FORE200E=m +# CONFIG_ATM_FORE200E_USE_TASKLET is not set +CONFIG_ATM_FORE200E_TX_RETRY=16 +CONFIG_ATM_FORE200E_DEBUG=0 +# CONFIG_ATM_HE is not set +CONFIG_ATM_SOLOS=m + +# +# Distributed Switch Architecture drivers +# +# CONFIG_B53 is not set +# CONFIG_NET_DSA_BCM_SF2 is not set +# CONFIG_NET_DSA_LOOP is not set +# CONFIG_NET_DSA_LANTIQ_GSWIP is not set +# CONFIG_NET_DSA_MT7530 is not set +CONFIG_NET_DSA_MV88E6060=m +# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set +CONFIG_NET_DSA_MV88E6XXX=m +# CONFIG_NET_DSA_MV88E6XXX_PTP is not set +# CONFIG_NET_DSA_MSCC_OCELOT_EXT is not set +# CONFIG_NET_DSA_MSCC_SEVILLE is not set +# CONFIG_NET_DSA_AR9331 is not set +# CONFIG_NET_DSA_QCA8K is not set +# CONFIG_NET_DSA_SJA1105 is not set +# CONFIG_NET_DSA_XRS700X_I2C is not set +# CONFIG_NET_DSA_XRS700X_MDIO is not set +# CONFIG_NET_DSA_REALTEK is not set +# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set +# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +CONFIG_NET_VENDOR_3COM=y +CONFIG_VORTEX=m +CONFIG_TYPHOON=m +CONFIG_NET_VENDOR_ADAPTEC=y +CONFIG_ADAPTEC_STARFIRE=m +CONFIG_NET_VENDOR_AGERE=y +CONFIG_ET131X=m +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +CONFIG_NET_VENDOR_ALLWINNER=y +# CONFIG_SUN4I_EMAC is not set +CONFIG_NET_VENDOR_ALTEON=y +CONFIG_ACENIC=m +# CONFIG_ACENIC_OMIT_TIGON_I is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +CONFIG_PCNET32=m +CONFIG_AMD_XGBE=m +CONFIG_AMD_XGBE_DCB=y +# CONFIG_PDS_CORE is not set +CONFIG_NET_XGENE=m +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_AQTION=m +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_CADENCE=y +CONFIG_MACB=m +CONFIG_MACB_USE_HWSTAMP=y +# CONFIG_MACB_PCI is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=m +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4_DCB=y +CONFIG_CHELSIO_T4_FCOE=y +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +# CONFIG_CHELSIO_IPSEC_INLINE is not set +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_GEMINI_ETHERNET is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +# CONFIG_DNET is not set +CONFIG_NET_VENDOR_DEC=y +CONFIG_NET_TULIP=y +CONFIG_DE2104X=m +CONFIG_DE2104X_DSL=0 +CONFIG_TULIP=m +# CONFIG_TULIP_MWI is not set +# CONFIG_TULIP_MMIO is not set +CONFIG_TULIP_NAPI=y +CONFIG_TULIP_NAPI_HW_MITIGATION=y +CONFIG_WINBOND_840=m +CONFIG_DM9102=m +CONFIG_ULI526X=m +CONFIG_NET_VENDOR_DLINK=y +CONFIG_DL2K=m +CONFIG_SUNDANCE=m +# CONFIG_SUNDANCE_MMIO is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +CONFIG_NET_VENDOR_EZCHIP=y +# CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +# CONFIG_GVE is not set +CONFIG_NET_VENDOR_HISILICON=y +CONFIG_HIX5HD2_GMAC=m +CONFIG_HISI_FEMAC=m +CONFIG_HIP04_ETH=m +# CONFIG_HI13X1_GMAC is not set +CONFIG_HNS_MDIO=m +# CONFIG_HNS_DSAF is not set +# CONFIG_HNS_ENET is not set +# CONFIG_HNS3 is not set +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E100=m +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +# CONFIG_FM10K is not set +# CONFIG_IGC is not set +CONFIG_JME=m +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_MVMDIO=m +CONFIG_MVNETA=m +CONFIG_MVPP2=m +# CONFIG_MVPP2_PTP is not set +CONFIG_SKGE=m +# CONFIG_SKGE_DEBUG is not set +CONFIG_SKGE_GENESIS=y +CONFIG_SKY2=m +# CONFIG_SKY2_DEBUG is not set +# CONFIG_OCTEONTX2_AF is not set +# CONFIG_OCTEONTX2_PF is not set +# CONFIG_OCTEON_EP is not set +# CONFIG_PRESTERA is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_CORE_GEN2=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +# CONFIG_MLXSW_CORE is not set +CONFIG_MLXFW=m +# CONFIG_MLXBF_GIGE is not set +CONFIG_NET_VENDOR_MICREL=y +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +CONFIG_KSZ884X_PCI=m +CONFIG_NET_VENDOR_MICROCHIP=y +# CONFIG_ENC28J60 is not set +# CONFIG_ENCX24J600 is not set +CONFIG_LAN743X=m +# CONFIG_LAN966X_SWITCH is not set +# CONFIG_VCAP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_MSCC_OCELOT_SWITCH is not set +CONFIG_NET_VENDOR_MICROSOFT=y +# CONFIG_MICROSOFT_MANA is not set +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_FEALNX=m +CONFIG_NET_VENDOR_NI=y +# CONFIG_NI_XGE_MANAGEMENT_ENET is not set +CONFIG_NET_VENDOR_NATSEMI=y +CONFIG_NATSEMI=m +CONFIG_NS83820=m +CONFIG_NET_VENDOR_NETERION=y +CONFIG_S2IO=m +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +# CONFIG_NFP_DEBUG is not set +CONFIG_NET_VENDOR_8390=y +CONFIG_NE2K_PCI=m +CONFIG_NET_VENDOR_NVIDIA=y +# CONFIG_FORCEDETH is not set +CONFIG_NET_VENDOR_OKI=y +# CONFIG_ETHOC is not set +CONFIG_NET_VENDOR_PACKET_ENGINES=y +CONFIG_HAMACHI=m +CONFIG_YELLOWFIN=m +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_QLCNIC_SRIOV=y +CONFIG_QLCNIC_DCB=y +CONFIG_QLCNIC_HWMON=y +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_BROCADE=y +CONFIG_BNA=m +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +# CONFIG_QCA7000_UART is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +CONFIG_NET_VENDOR_RDC=y +CONFIG_R6040=m +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_TUNE_TWISTER=y +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +CONFIG_NET_VENDOR_RENESAS=y +CONFIG_NET_VENDOR_ROCKER=y +# CONFIG_ROCKER is not set +CONFIG_NET_VENDOR_SAMSUNG=y +# CONFIG_SXGBE_ETH is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SILAN=y +CONFIG_SC92031=m +CONFIG_NET_VENDOR_SIS=y +# CONFIG_SIS900 is not set +CONFIG_SIS190=m +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +CONFIG_SFC_FALCON=m +CONFIG_SFC_FALCON_MTD=y +# CONFIG_SFC_SIENA is not set +CONFIG_NET_VENDOR_SMSC=y +CONFIG_SMC91X=m +CONFIG_EPIC100=m +CONFIG_SMSC911X=m +CONFIG_SMSC9420=m +CONFIG_NET_VENDOR_SOCIONEXT=y +CONFIG_SNI_NETSEC=m +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=m +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=m +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=m +CONFIG_DWMAC_IPQ806X=m +CONFIG_DWMAC_MESON=m +CONFIG_DWMAC_QCOM_ETHQOS=m +CONFIG_DWMAC_ROCKCHIP=m +CONFIG_DWMAC_SUNXI=m +CONFIG_DWMAC_SUN8I=m +# CONFIG_DWMAC_INTEL_PLAT is not set +# CONFIG_DWMAC_TEGRA is not set +# CONFIG_STMMAC_PCI is not set +CONFIG_NET_VENDOR_SUN=y +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +CONFIG_CASSINI=m +CONFIG_NIU=m +CONFIG_NET_VENDOR_SYNOPSYS=y +# CONFIG_DWC_XLGMAC is not set +CONFIG_NET_VENDOR_TEHUTI=y +CONFIG_TEHUTI=m +CONFIG_NET_VENDOR_TI=y +# CONFIG_TI_CPSW_PHY_SEL is not set +CONFIG_TLAN=m +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +CONFIG_NET_VENDOR_VIA=y +# CONFIG_VIA_RHINE is not set +CONFIG_VIA_VELOCITY=m +CONFIG_NET_VENDOR_WANGXUN=y +# CONFIG_NGBE is not set +# CONFIG_TXGBE is not set +CONFIG_NET_VENDOR_WIZNET=y +# CONFIG_WIZNET_W5100 is not set +# CONFIG_WIZNET_W5300 is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +CONFIG_FDDI=y +CONFIG_DEFXX=m +CONFIG_SKFP=m +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=m +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=m +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +CONFIG_MESON_GXL_PHY=m +CONFIG_ADIN_PHY=m +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +# CONFIG_INTEL_XWAY_PHY is not set +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_AT803X_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +# CONFIG_PSE_CONTROLLER is not set +CONFIG_CAN_DEV=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_VXCAN=m +CONFIG_CAN_NETLINK=y +CONFIG_CAN_CALC_BITTIMING=y +CONFIG_CAN_RX_OFFLOAD=y +# CONFIG_CAN_CAN327 is not set +# CONFIG_CAN_FLEXCAN is not set +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_KVASER_PCIEFD is not set +CONFIG_CAN_SLCAN=m +# CONFIG_CAN_XILINXCAN is not set +# CONFIG_CAN_C_CAN is not set +# CONFIG_CAN_CC770 is not set +# CONFIG_CAN_CTUCANFD_PCI is not set +# CONFIG_CAN_CTUCANFD_PLATFORM is not set +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +CONFIG_CAN_PEAK_PCIEFD=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +# CONFIG_CAN_F81601 is not set +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_ISA=m +# CONFIG_CAN_SJA1000_PLATFORM is not set +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set +# CONFIG_CAN_MCP251XFD is not set +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +# CONFIG_CAN_ESD_USB is not set +# CONFIG_CAN_ETAS_ES58X is not set +# CONFIG_CAN_F81604 is not set +CONFIG_CAN_GS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_MCBA_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_CAN_UCAN=m +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_MDIO_DEVICE=m +CONFIG_MDIO_BUS=m +CONFIG_FWNODE_MDIO=m +CONFIG_OF_MDIO=m +CONFIG_ACPI_MDIO=m +CONFIG_MDIO_DEVRES=m +# CONFIG_MDIO_SUN4I is not set +CONFIG_MDIO_XGENE=m +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_HISI_FEMAC=m +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +CONFIG_MDIO_BUS_MUX=m +CONFIG_MDIO_BUS_MUX_MESON_G12A=m +CONFIG_MDIO_BUS_MUX_MESON_GXL=m +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +CONFIG_MDIO_BUS_MUX_MMIOREG=m + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_SLIP_MODE_SLIP6=y + +# +# Host-side USB support is needed for USB Network Adapter support +# +CONFIG_USB_NET_DRIVERS=m +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +CONFIG_USB_NET_SR9800=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_CDC_PHONET=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y +CONFIG_WLAN_VENDOR_ADMTEK=y +CONFIG_ADM8211=m +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +CONFIG_ATH5K=m +# CONFIG_ATH5K_DEBUG is not set +# CONFIG_ATH5K_TRACER is not set +CONFIG_ATH5K_PCI=y +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +# CONFIG_ATH9K_AHB is not set +# CONFIG_ATH9K_DEBUGFS is not set +# CONFIG_ATH9K_DYNACK is not set +# CONFIG_ATH9K_WOW is not set +CONFIG_ATH9K_RFKILL=y +CONFIG_ATH9K_CHANNEL_CONTEXT=y +CONFIG_ATH9K_PCOEM=y +CONFIG_ATH9K_PCI_NO_EEPROM=m +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +CONFIG_CARL9170=m +CONFIG_CARL9170_LEDS=y +CONFIG_CARL9170_WPC=y +# CONFIG_CARL9170_HWRNG is not set +CONFIG_ATH6KL=m +CONFIG_ATH6KL_SDIO=m +CONFIG_ATH6KL_USB=m +# CONFIG_ATH6KL_DEBUG is not set +# CONFIG_ATH6KL_TRACING is not set +CONFIG_AR5523=m +CONFIG_WIL6210=m +CONFIG_WIL6210_ISR_COR=y +CONFIG_WIL6210_TRACING=y +CONFIG_WIL6210_DEBUGFS=y +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_AHB is not set +# CONFIG_ATH10K_SDIO is not set +CONFIG_ATH10K_USB=m +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +CONFIG_WCN36XX=m +# CONFIG_WCN36XX_DEBUGFS is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set +CONFIG_WLAN_VENDOR_ATMEL=y +# CONFIG_ATMEL is not set +CONFIG_AT76C50X_USB=m +CONFIG_WLAN_VENDOR_BROADCOM=y +CONFIG_B43=m +CONFIG_B43_BCMA=y +CONFIG_B43_SSB=y +CONFIG_B43_BUSES_BCMA_AND_SSB=y +# CONFIG_B43_BUSES_BCMA is not set +# CONFIG_B43_BUSES_SSB is not set +CONFIG_B43_PCI_AUTOSELECT=y +CONFIG_B43_PCICORE_AUTOSELECT=y +CONFIG_B43_SDIO=y +CONFIG_B43_BCMA_PIO=y +CONFIG_B43_PIO=y +CONFIG_B43_PHY_G=y +CONFIG_B43_PHY_N=y +CONFIG_B43_PHY_LP=y +CONFIG_B43_PHY_HT=y +CONFIG_B43_LEDS=y +CONFIG_B43_HWRNG=y +# CONFIG_B43_DEBUG is not set +CONFIG_B43LEGACY=m +CONFIG_B43LEGACY_PCI_AUTOSELECT=y +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y +CONFIG_B43LEGACY_LEDS=y +CONFIG_B43LEGACY_HWRNG=y +CONFIG_B43LEGACY_DEBUG=y +CONFIG_B43LEGACY_DMA=y +CONFIG_B43LEGACY_PIO=y +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y +# CONFIG_B43LEGACY_DMA_MODE is not set +# CONFIG_B43LEGACY_PIO_MODE is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +CONFIG_WLAN_VENDOR_CISCO=y +# CONFIG_AIRO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +CONFIG_IPW2200=m +CONFIG_IPW2200_MONITOR=y +CONFIG_IPW2200_RADIOTAP=y +CONFIG_IPW2200_PROMISCUOUS=y +CONFIG_IPW2200_QOS=y +# CONFIG_IPW2200_DEBUG is not set +CONFIG_LIBIPW=m +# CONFIG_LIBIPW_DEBUG is not set +CONFIG_IWLEGACY=m +CONFIG_IWL4965=m +CONFIG_IWL3945=m + +# +# iwl3945 / iwl4965 Debugging Options +# +# CONFIG_IWLEGACY_DEBUG is not set +# end of iwl3945 / iwl4965 Debugging Options + +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +# CONFIG_IWLWIFI_DEVICE_TRACING is not set +# end of Debugging Options + +CONFIG_WLAN_VENDOR_INTERSIL=y +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +# CONFIG_HOSTAP_FIRMWARE_NVRAM is not set +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m +# CONFIG_HERMES is not set +CONFIG_P54_COMMON=m +CONFIG_P54_USB=m +CONFIG_P54_PCI=m +# CONFIG_P54_SPI is not set +CONFIG_P54_LEDS=y +CONFIG_WLAN_VENDOR_MARVELL=y +CONFIG_LIBERTAS=m +CONFIG_LIBERTAS_USB=m +CONFIG_LIBERTAS_SDIO=m +# CONFIG_LIBERTAS_SPI is not set +# CONFIG_LIBERTAS_DEBUG is not set +CONFIG_LIBERTAS_MESH=y +CONFIG_LIBERTAS_THINFIRM=m +# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set +CONFIG_LIBERTAS_THINFIRM_USB=m +CONFIG_MWIFIEX=m +# CONFIG_MWIFIEX_SDIO is not set +CONFIG_MWIFIEX_PCIE=m +# CONFIG_MWIFIEX_USB is not set +CONFIG_MWL8K=m +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +CONFIG_MT76x2E=m +CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT61PCI=m +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +CONFIG_RT2500USB=m +CONFIG_RT73USB=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +# CONFIG_RT2800USB_UNKNOWN is not set +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +CONFIG_RTL8180=m +CONFIG_RTL8187=m +CONFIG_RTL8187_LEDS=y +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +CONFIG_RTW88=m +CONFIG_RTW88_CORE=m +CONFIG_RTW88_PCI=m +CONFIG_RTW88_8822B=m +CONFIG_RTW88_8822C=m +CONFIG_RTW88_8822BE=m +# CONFIG_RTW88_8822BS is not set +# CONFIG_RTW88_8822BU is not set +CONFIG_RTW88_8822CE=m +# CONFIG_RTW88_8822CS is not set +# CONFIG_RTW88_8822CU is not set +# CONFIG_RTW88_8723DE is not set +# CONFIG_RTW88_8723DS is not set +# CONFIG_RTW88_8723DU is not set +# CONFIG_RTW88_8821CE is not set +# CONFIG_RTW88_8821CS is not set +# CONFIG_RTW88_8821CU is not set +# CONFIG_RTW88_DEBUG is not set +# CONFIG_RTW88_DEBUGFS is not set +# CONFIG_RTW89 is not set +CONFIG_WLAN_VENDOR_RSI=y +CONFIG_RSI_91X=m +CONFIG_RSI_DEBUGFS=y +# CONFIG_RSI_SDIO is not set +CONFIG_RSI_USB=m +CONFIG_RSI_COEX=y +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set +CONFIG_WLAN_VENDOR_ST=y +# CONFIG_CW1200 is not set +CONFIG_WLAN_VENDOR_TI=y +CONFIG_WL1251=m +CONFIG_WL1251_SPI=m +CONFIG_WL1251_SDIO=m +CONFIG_WL12XX=m +CONFIG_WL18XX=m +CONFIG_WLCORE=m +CONFIG_WLCORE_SPI=m +CONFIG_WLCORE_SDIO=m +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set +CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set +# CONFIG_WAN is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_IEEE802154_AT86RF230=m +CONFIG_IEEE802154_MRF24J40=m +CONFIG_IEEE802154_CC2520=m +CONFIG_IEEE802154_ATUSB=m +CONFIG_IEEE802154_ADF7242=m +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +CONFIG_IEEE802154_HWSIM=m + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_XEN_NETDEV_BACKEND=m +# CONFIG_FUJITSU_ES is not set +CONFIG_HYPERV_NET=m +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +CONFIG_INPUT_MATRIXKMAP=m +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=m +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +CONFIG_KEYBOARD_ADP5588=m +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +CONFIG_KEYBOARD_QT2160=m +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=m +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +CONFIG_KEYBOARD_LM8323=m +# CONFIG_KEYBOARD_LM8333 is not set +CONFIG_KEYBOARD_MAX7359=m +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_KEYBOARD_TEGRA=m +CONFIG_KEYBOARD_OPENCORES=m +# CONFIG_KEYBOARD_PINEPHONE is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +CONFIG_KEYBOARD_STOWAWAY=m +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_SUN4I_LRADC is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_KEYBOARD_CROS_EC=m +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_HANWANG=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_USB_PEGASUS=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ADS7846=m +CONFIG_TOUCHSCREEN_AD7877=m +CONFIG_TOUCHSCREEN_AD7879=m +CONFIG_TOUCHSCREEN_AD7879_I2C=m +# CONFIG_TOUCHSCREEN_AD7879_SPI is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +CONFIG_TOUCHSCREEN_ATMEL_MXT=m +# CONFIG_TOUCHSCREEN_ATMEL_MXT_T37 is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +CONFIG_TOUCHSCREEN_DYNAPRO=m +CONFIG_TOUCHSCREEN_HAMPSHIRE=m +CONFIG_TOUCHSCREEN_EETI=m +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +CONFIG_TOUCHSCREEN_FUJITSU=m +CONFIG_TOUCHSCREEN_GOODIX=m +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +CONFIG_TOUCHSCREEN_GUNZE=m +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +CONFIG_TOUCHSCREEN_ELAN=m +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +# CONFIG_TOUCHSCREEN_WACOM_I2C is not set +# CONFIG_TOUCHSCREEN_MAX11801 is not set +CONFIG_TOUCHSCREEN_MCS5000=m +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +CONFIG_TOUCHSCREEN_MTOUCH=m +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +CONFIG_TOUCHSCREEN_INEXIO=m +CONFIG_TOUCHSCREEN_PENMOUNT=m +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +CONFIG_TOUCHSCREEN_TOUCHRIGHT=m +CONFIG_TOUCHSCREEN_TOUCHWIN=m +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +CONFIG_TOUCHSCREEN_WM97XX=m +CONFIG_TOUCHSCREEN_WM9705=y +CONFIG_TOUCHSCREEN_WM9712=y +CONFIG_TOUCHSCREEN_WM9713=y +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m +CONFIG_TOUCHSCREEN_USB_EGALAX=y +CONFIG_TOUCHSCREEN_USB_PANJIT=y +CONFIG_TOUCHSCREEN_USB_3M=y +CONFIG_TOUCHSCREEN_USB_ITM=y +CONFIG_TOUCHSCREEN_USB_ETURBO=y +CONFIG_TOUCHSCREEN_USB_GUNZE=y +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y +CONFIG_TOUCHSCREEN_USB_GOTOP=y +CONFIG_TOUCHSCREEN_USB_JASTEC=y +CONFIG_TOUCHSCREEN_USB_ELO=y +CONFIG_TOUCHSCREEN_USB_E2I=y +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y +CONFIG_TOUCHSCREEN_USB_NEXIO=y +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y +CONFIG_TOUCHSCREEN_TOUCHIT213=m +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +CONFIG_TOUCHSCREEN_TSC2007=m +# CONFIG_TOUCHSCREEN_TSC2007_IIO is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUN4I is not set +CONFIG_TOUCHSCREEN_SUR40=m +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +CONFIG_TOUCHSCREEN_TPS6507X=m +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +CONFIG_INPUT_PM8941_PWRKEY=m +# CONFIG_INPUT_PM8XXX_VIBRATOR is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +# CONFIG_INPUT_REGULATOR_HAPTIC is not set +CONFIG_INPUT_AXP20X_PEK=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IBM_PANEL is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y +# CONFIG_INPUT_SOC_BUTTON_ARRAY is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_INPUT_HISI_POWERKEY=m +CONFIG_RMI4_CORE=m +# CONFIG_RMI4_I2C is not set +# CONFIG_RMI4_SPI is not set +# CONFIG_RMI4_SMB is not set +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +# CONFIG_RMI4_F54 is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_AMBAKMI is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_APBPS2 is not set +CONFIG_HYPERV_KEYBOARD=m +# CONFIG_SERIO_SUN4I_PS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=m +CONFIG_SERIAL_8250_NR_UARTS=4 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +# CONFIG_SERIAL_8250_MANY_PORTS is not set +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +# CONFIG_SERIAL_8250_RSA is not set +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_PERICOM=y +CONFIG_SERIAL_8250_TEGRA=y +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_AMBA_PL010=y +CONFIG_SERIAL_AMBA_PL010_CONSOLE=y +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set +CONFIG_SERIAL_MESON=y +CONFIG_SERIAL_MESON_CONSOLE=y +CONFIG_SERIAL_TEGRA=y +# CONFIG_SERIAL_TEGRA_TCU is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_SERIAL_XILINX_PS_UART=y +CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y +# CONFIG_SERIAL_ARC is not set +CONFIG_SERIAL_RP2=m +CONFIG_SERIAL_RP2_NR_UARTS=32 +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +CONFIG_SERIAL_MVEBU_UART=y +CONFIG_SERIAL_MVEBU_CONSOLE=y +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +# CONFIG_SERIAL_NONSTANDARD is not set +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +# CONFIG_HVC_DCC is not set +# CONFIG_RPMSG_TTY is not set +CONFIG_SERIAL_DEV_BUS=m +CONFIG_TTY_PRINTK=m +CONFIG_TTY_PRINTK_LEVEL=6 +# CONFIG_PRINTER is not set +# CONFIG_PPDEV is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +# CONFIG_IPMI_IPMB is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +# CONFIG_SSIF_IPMI_BMC is not set +# CONFIG_IPMB_DEVICE_INTERFACE is not set +CONFIG_HW_RANDOM=m +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_OMAP=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=m +CONFIG_HW_RANDOM_HISTB=m +CONFIG_HW_RANDOM_XGENE=m +CONFIG_HW_RANDOM_MESON=m +CONFIG_HW_RANDOM_CAVIUM=m +CONFIG_HW_RANDOM_OPTEE=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=m +CONFIG_HW_RANDOM_CN10K=m +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +CONFIG_DEVPORT=y +CONFIG_TCG_TPM=m +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +CONFIG_TCG_TIS=m +CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_SYNQUACER is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +CONFIG_TCG_TIS_I2C_INFINEON=m +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +# CONFIG_TCG_XEN is not set +CONFIG_TCG_CRB=m +CONFIG_TCG_VTPM_PROXY=m +# CONFIG_TCG_FTPM_TEE is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_PINCTRL is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +CONFIG_I2C_ISCH=m +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +# CONFIG_I2C_HISI is not set +CONFIG_I2C_MESON=m +CONFIG_I2C_MV64XXX=m +# CONFIG_I2C_NOMADIK is not set +CONFIG_I2C_OCORES=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_PXA=m +# CONFIG_I2C_PXA_SLAVE is not set +# CONFIG_I2C_QCOM_CCI is not set +CONFIG_I2C_QUP=m +CONFIG_I2C_RK3X=m +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_SYNQUACER is not set +CONFIG_I2C_TEGRA=m +CONFIG_I2C_TEGRA_BPMP=y +# CONFIG_I2C_VERSATILE is not set +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +# CONFIG_I2C_PARPORT is not set +# CONFIG_I2C_PCI1XXXX is not set +CONFIG_I2C_ROBOTFUZZ_OSIF=m +CONFIG_I2C_TAOS_EVM=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +CONFIG_I2C_CROS_EC_TUNNEL=m +CONFIG_I2C_XGENE_SLIMPRO=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +CONFIG_I2C_SLAVE=y +# CONFIG_I2C_SLAVE_EEPROM is not set +# CONFIG_I2C_SLAVE_TESTUNIT is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AMLOGIC_SPIFC_A1 is not set +CONFIG_SPI_ARMADA_3700=m +# CONFIG_SPI_AXI_SPI_ENGINE is not set +CONFIG_SPI_BITBANG=m +CONFIG_SPI_BUTTERFLY=m +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_CADENCE_QUADSPI is not set +# CONFIG_SPI_CADENCE_XSPI is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_HISI_KUNPENG is not set +# CONFIG_SPI_HISI_SFC_V3XX is not set +# CONFIG_SPI_GPIO is not set +CONFIG_SPI_LM70_LLP=m +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MESON_SPICC is not set +CONFIG_SPI_MESON_SPIFC=m +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_ORION is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PL022 is not set +# CONFIG_SPI_PXA2XX is not set +CONFIG_SPI_ROCKCHIP=m +# CONFIG_SPI_ROCKCHIP_SFC is not set +# CONFIG_SPI_QCOM_QSPI is not set +CONFIG_SPI_QUP=m +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_SN_F_OSPI is not set +# CONFIG_SPI_SUN4I is not set +# CONFIG_SPI_SUN6I is not set +# CONFIG_SPI_SYNQUACER is not set +# CONFIG_SPI_MXIC is not set +CONFIG_SPI_TEGRA210_QUAD=y +CONFIG_SPI_TEGRA114=m +CONFIG_SPI_TEGRA20_SFLASH=m +CONFIG_SPI_TEGRA20_SLINK=m +CONFIG_SPI_THUNDERX=m +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=y +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +CONFIG_SPMI=y +# CONFIG_SPMI_HISI3670 is not set +CONFIG_SPMI_MSM_PMIC_ARB=y +# CONFIG_HSI is not set +CONFIG_PPS=m +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=m +CONFIG_PTP_1588_CLOCK_OPTIONAL=m + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_PTP_1588_CLOCK_KVM=m +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_GENERIC_PINCTRL_GROUPS=y +CONFIG_PINMUX=y +CONFIG_GENERIC_PINMUX_FUNCTIONS=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +CONFIG_PINCTRL_AMD=y +CONFIG_PINCTRL_AXP209=m +# CONFIG_PINCTRL_CY8C95X0 is not set +CONFIG_PINCTRL_MAX77620=y +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +CONFIG_PINCTRL_ROCKCHIP=y +CONFIG_PINCTRL_SINGLE=y +# CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_ZYNQMP=y +CONFIG_PINCTRL_MESON=y +CONFIG_PINCTRL_MESON_GXBB=y +CONFIG_PINCTRL_MESON_GXL=y +CONFIG_PINCTRL_MESON8_PMX=y +CONFIG_PINCTRL_MESON_AXG=y +CONFIG_PINCTRL_MESON_AXG_PMX=y +CONFIG_PINCTRL_MESON_G12A=y +CONFIG_PINCTRL_MESON_A1=y +CONFIG_PINCTRL_MESON_S4=y +CONFIG_PINCTRL_AMLOGIC_C3=y +CONFIG_PINCTRL_MVEBU=y +CONFIG_PINCTRL_ARMADA_AP806=y +CONFIG_PINCTRL_ARMADA_CP110=y +CONFIG_PINCTRL_AC5=y +CONFIG_PINCTRL_ARMADA_37XX=y +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_IPQ5018 is not set +# CONFIG_PINCTRL_IPQ5332 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_IPQ6018 is not set +# CONFIG_PINCTRL_IPQ9574 is not set +# CONFIG_PINCTRL_MDM9607 is not set +CONFIG_PINCTRL_MSM8916=y +# CONFIG_PINCTRL_MSM8953 is not set +# CONFIG_PINCTRL_MSM8976 is not set +# CONFIG_PINCTRL_MSM8994 is not set +CONFIG_PINCTRL_MSM8996=y +# CONFIG_PINCTRL_MSM8998 is not set +# CONFIG_PINCTRL_QCM2290 is not set +# CONFIG_PINCTRL_QCS404 is not set +# CONFIG_PINCTRL_QDF2XXX is not set +# CONFIG_PINCTRL_QDU1000 is not set +# CONFIG_PINCTRL_SA8775P is not set +# CONFIG_PINCTRL_SC7180 is not set +# CONFIG_PINCTRL_SC7280 is not set +# CONFIG_PINCTRL_SC8180X is not set +# CONFIG_PINCTRL_SC8280XP is not set +# CONFIG_PINCTRL_SDM660 is not set +# CONFIG_PINCTRL_SDM670 is not set +# CONFIG_PINCTRL_SDM845 is not set +# CONFIG_PINCTRL_SDX75 is not set +# CONFIG_PINCTRL_SM6115 is not set +# CONFIG_PINCTRL_SM6125 is not set +# CONFIG_PINCTRL_SM6350 is not set +# CONFIG_PINCTRL_SM6375 is not set +# CONFIG_PINCTRL_SM7150 is not set +# CONFIG_PINCTRL_SM8150 is not set +# CONFIG_PINCTRL_SM8250 is not set +# CONFIG_PINCTRL_SM8350 is not set +# CONFIG_PINCTRL_SM8450 is not set +# CONFIG_PINCTRL_SM8550 is not set +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_PINCTRL_QCOM_SSBI_PMIC=y +# CONFIG_PINCTRL_LPASS_LPI is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_PINCTRL_SUNXI=y +# CONFIG_PINCTRL_SUN4I_A10 is not set +# CONFIG_PINCTRL_SUN5I is not set +# CONFIG_PINCTRL_SUN6I_A31 is not set +# CONFIG_PINCTRL_SUN6I_A31_R is not set +# CONFIG_PINCTRL_SUN8I_A23 is not set +# CONFIG_PINCTRL_SUN8I_A33 is not set +# CONFIG_PINCTRL_SUN8I_A83T is not set +# CONFIG_PINCTRL_SUN8I_A83T_R is not set +# CONFIG_PINCTRL_SUN8I_A23_R is not set +# CONFIG_PINCTRL_SUN8I_H3 is not set +CONFIG_PINCTRL_SUN8I_H3_R=y +# CONFIG_PINCTRL_SUN8I_V3S is not set +# CONFIG_PINCTRL_SUN9I_A80 is not set +# CONFIG_PINCTRL_SUN9I_A80_R is not set +# CONFIG_PINCTRL_SUN20I_D1 is not set +CONFIG_PINCTRL_SUN50I_A64=y +CONFIG_PINCTRL_SUN50I_A64_R=y +CONFIG_PINCTRL_SUN50I_A100=y +CONFIG_PINCTRL_SUN50I_A100_R=y +CONFIG_PINCTRL_SUN50I_H5=y +CONFIG_PINCTRL_SUN50I_H6=y +CONFIG_PINCTRL_SUN50I_H6_R=y +CONFIG_PINCTRL_SUN50I_H616=y +CONFIG_PINCTRL_SUN50I_H616_R=y +CONFIG_PINCTRL_TEGRA=y +CONFIG_PINCTRL_TEGRA124=y +CONFIG_PINCTRL_TEGRA210=y +CONFIG_PINCTRL_TEGRA194=y +CONFIG_PINCTRL_TEGRA_XUSB=y +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=y +CONFIG_GPIO_REGMAP=m +CONFIG_GPIO_IDIO_16=m + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +# CONFIG_GPIO_AMDPT is not set +# CONFIG_GPIO_CADENCE is not set +# CONFIG_GPIO_DWAPB is not set +CONFIG_GPIO_EXAR=m +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=y +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HISI is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set +CONFIG_GPIO_MB86S7X=m +CONFIG_GPIO_MVEBU=y +CONFIG_GPIO_PL061=y +CONFIG_GPIO_ROCKCHIP=y +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +CONFIG_GPIO_TEGRA=y +CONFIG_GPIO_TEGRA186=y +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=y +CONFIG_GPIO_ZYNQ=m +CONFIG_GPIO_ZYNQMP_MODEPIN=y +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +CONFIG_GPIO_PCA953X=y +CONFIG_GPIO_PCA953X_IRQ=y +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +CONFIG_GPIO_MAX77620=y +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +CONFIG_GPIO_PCI_IDIO_16=m +CONFIG_GPIO_PCIE_IDIO_24=m +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +CONFIG_W1=m +CONFIG_W1_CON=y + +# +# 1-wire Bus Masters +# +# CONFIG_W1_MASTER_MATROX is not set +CONFIG_W1_MASTER_DS2490=m +CONFIG_W1_MASTER_DS2482=m +CONFIG_W1_MASTER_GPIO=m +# CONFIG_W1_MASTER_SGI is not set +# end of 1-wire Bus Masters + +# +# 1-wire Slaves +# +CONFIG_W1_SLAVE_THERM=m +CONFIG_W1_SLAVE_SMEM=m +CONFIG_W1_SLAVE_DS2405=m +CONFIG_W1_SLAVE_DS2408=m +CONFIG_W1_SLAVE_DS2408_READBACK=y +CONFIG_W1_SLAVE_DS2413=m +CONFIG_W1_SLAVE_DS2406=m +CONFIG_W1_SLAVE_DS2423=m +CONFIG_W1_SLAVE_DS2805=m +# CONFIG_W1_SLAVE_DS2430 is not set +CONFIG_W1_SLAVE_DS2431=m +CONFIG_W1_SLAVE_DS2433=m +# CONFIG_W1_SLAVE_DS2433_CRC is not set +CONFIG_W1_SLAVE_DS2438=m +# CONFIG_W1_SLAVE_DS250X is not set +CONFIG_W1_SLAVE_DS2780=m +CONFIG_W1_SLAVE_DS2781=m +CONFIG_W1_SLAVE_DS28E04=m +CONFIG_W1_SLAVE_DS28E17=m +# end of 1-wire Slaves + +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_LINKSTATION is not set +CONFIG_POWER_RESET_MSM=y +# CONFIG_POWER_RESET_QCOM_PON is not set +# CONFIG_POWER_RESET_ODROID_GO_ULTRA_POWEROFF is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_REGULATOR is not set +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_RESET_VEXPRESS=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_POWER_RESET_SYSCON_POWEROFF=y +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +CONFIG_BATTERY_DS2760=m +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +CONFIG_BATTERY_SBS=m +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +CONFIG_BATTERY_BQ27XXX=m +# CONFIG_BATTERY_BQ27XXX_I2C is not set +CONFIG_BATTERY_BQ27XXX_HDQ=m +CONFIG_CHARGER_AXP20X=m +CONFIG_BATTERY_AXP20X=m +CONFIG_AXP20X_POWER=m +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_BATTERY_MAX1721X is not set +# CONFIG_CHARGER_ISP1704 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +CONFIG_CHARGER_QCOM_SMBB=m +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_RT9467 is not set +# CONFIG_CHARGER_RT9471 is not set +CONFIG_CHARGER_CROS_USBPD=m +CONFIG_CHARGER_CROS_PCHG=y +# CONFIG_CHARGER_UCS1002 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +# CONFIG_CHARGER_QCOM_SMB2 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +CONFIG_SENSORS_ADM1029=m +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +CONFIG_SENSORS_ADM9240=m +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +CONFIG_SENSORS_DS620=m +# CONFIG_SENSORS_DS1621 is not set +CONFIG_SENSORS_I5K_AMB=m +# CONFIG_SENSORS_F71805F is not set +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_FTSTEUTATES=m +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +# CONFIG_SENSORS_IT87 is not set +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX1111=m +# CONFIG_SENSORS_MAX127 is not set +CONFIG_SENSORS_MAX16065=m +# CONFIG_SENSORS_MAX1619 is not set +CONFIG_SENSORS_MAX1668=m +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +CONFIG_SENSORS_ADCXX=m +# CONFIG_SENSORS_LM63 is not set +CONFIG_SENSORS_LM70=m +CONFIG_SENSORS_LM73=m +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +CONFIG_SENSORS_LM93=m +# CONFIG_SENSORS_LM95234 is not set +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +# CONFIG_SENSORS_PC87360 is not set +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6683=m +CONFIG_SENSORS_NCT6775_CORE=m +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +CONFIG_SENSORS_NPCM7XX=m +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_PWM_FAN is not set +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +# CONFIG_SENSORS_SHT15 is not set +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC2103=m +# CONFIG_SENSORS_EMC2305 is not set +CONFIG_SENSORS_EMC6W201=m +# CONFIG_SENSORS_SMSC47M1 is not set +CONFIG_SENSORS_SMSC47M192=m +# CONFIG_SENSORS_SMSC47B397 is not set +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_ADS7871=m +CONFIG_SENSORS_AMC6821=m +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +# CONFIG_SENSORS_VEXPRESS is not set +# CONFIG_SENSORS_VIA686A is not set +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83773G=m +# CONFIG_SENSORS_W83781D is not set +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +# CONFIG_SENSORS_W83L785TS is not set +CONFIG_SENSORS_W83L786NG=m +# CONFIG_SENSORS_W83627HF is not set +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +CONFIG_THERMAL_STATISTICS=y +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +CONFIG_CPU_THERMAL=y +CONFIG_CPU_FREQ_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_THERMAL_MMIO is not set +CONFIG_HISI_THERMAL=m +# CONFIG_MAX77620_THERMAL is not set +# CONFIG_SUN8I_THERMAL is not set +CONFIG_ROCKCHIP_THERMAL=m +CONFIG_ARMADA_THERMAL=m +CONFIG_AMLOGIC_THERMAL=y + +# +# NVIDIA Tegra thermal drivers +# +CONFIG_TEGRA_SOCTHERM=y +# CONFIG_TEGRA_BPMP_THERMAL is not set +# end of NVIDIA Tegra thermal drivers + +# CONFIG_GENERIC_ADC_THERMAL is not set + +# +# Qualcomm thermal drivers +# +# CONFIG_QCOM_SPMI_ADC_TM5 is not set +CONFIG_QCOM_SPMI_TEMP_ALARM=m +# CONFIG_QCOM_LMH is not set +# end of Qualcomm thermal drivers + +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +CONFIG_WATCHDOG_PRETIMEOUT_GOV=y +CONFIG_WATCHDOG_PRETIMEOUT_GOV_SEL=m +CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=m +CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=m +CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y +# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +# CONFIG_SOFT_WATCHDOG_PRETIMEOUT is not set +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_XILINX_WINDOW_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG=m +# CONFIG_ARMADA_37XX_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +CONFIG_DW_WATCHDOG=m +CONFIG_SUNXI_WATCHDOG=m +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_MAX77620_WATCHDOG is not set +CONFIG_TEGRA_WATCHDOG=m +CONFIG_QCOM_WDT=m +CONFIG_MESON_GXBB_WATCHDOG=m +CONFIG_MESON_WATCHDOG=m +# CONFIG_ARM_SMC_WATCHDOG is not set +# CONFIG_PM8916_WATCHDOG is not set +# CONFIG_ALIM7101_WDT is not set +# CONFIG_I6300ESB_WDT is not set +# CONFIG_HP_WATCHDOG is not set +CONFIG_MARVELL_GTI_WDT=y +# CONFIG_MEN_A21_WDT is not set +CONFIG_XEN_WDT=m + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +CONFIG_SSB=m +CONFIG_SSB_SPROM=y +CONFIG_SSB_BLOCKIO=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_B43_PCI_BRIDGE=y +CONFIG_SSB_SDIOHOST_POSSIBLE=y +CONFIG_SSB_SDIOHOST=y +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_DRIVER_PCICORE=y +# CONFIG_SSB_DRIVER_GPIO is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_BLOCKIO=y +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +# CONFIG_BCMA_DRIVER_GMAC_CMN is not set +# CONFIG_BCMA_DRIVER_GPIO is not set +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_SUN4I_GPADC is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AC100 is not set +CONFIG_MFD_AXP20X=m +# CONFIG_MFD_AXP20X_I2C is not set +CONFIG_MFD_AXP20X_RSB=m +CONFIG_MFD_CROS_EC_DEV=y +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI6421_SPMI is not set +CONFIG_MFD_HI655X_PMIC=m +# CONFIG_LPC_ICH is not set +CONFIG_LPC_SCH=m +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +CONFIG_MFD_MAX77620=y +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +CONFIG_MFD_QCOM_RPM=m +CONFIG_MFD_SPMI_PMIC=m +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SUN6I_PRCM=y +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_KHADAS_MCU is not set +# CONFIG_MFD_QCOM_PM8008 is not set +CONFIG_MFD_VEXPRESS_SYSREG=y +# CONFIG_RAVE_SP_CORE is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +CONFIG_REGULATOR_FIXED_VOLTAGE=m +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_AW37503 is not set +CONFIG_REGULATOR_AXP20X=m +# CONFIG_REGULATOR_CROS_EC is not set +# CONFIG_REGULATOR_DA9121 is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +CONFIG_REGULATOR_FAN53555=m +# CONFIG_REGULATOR_FAN53880 is not set +CONFIG_REGULATOR_GPIO=m +CONFIG_REGULATOR_HI655X=m +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +CONFIG_REGULATOR_MAX77620=m +# CONFIG_REGULATOR_MAX77857 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8893 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MAX20086 is not set +# CONFIG_REGULATOR_MAX20411 is not set +# CONFIG_REGULATOR_MAX77826 is not set +# CONFIG_REGULATOR_MCP16502 is not set +# CONFIG_REGULATOR_MP5416 is not set +# CONFIG_REGULATOR_MP8859 is not set +# CONFIG_REGULATOR_MP886X is not set +# CONFIG_REGULATOR_MPQ7920 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_MT6315 is not set +# CONFIG_REGULATOR_PCA9450 is not set +# CONFIG_REGULATOR_PF8X00 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +CONFIG_REGULATOR_PWM=m +# CONFIG_REGULATOR_QCOM_REFGEN is not set +CONFIG_REGULATOR_QCOM_RPM=m +CONFIG_REGULATOR_QCOM_SMD_RPM=m +CONFIG_REGULATOR_QCOM_SPMI=m +# CONFIG_REGULATOR_QCOM_USB_VBUS is not set +# CONFIG_REGULATOR_RAA215300 is not set +# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set +# CONFIG_REGULATOR_RT4801 is not set +# CONFIG_REGULATOR_RT4803 is not set +# CONFIG_REGULATOR_RT5190A is not set +# CONFIG_REGULATOR_RT5739 is not set +# CONFIG_REGULATOR_RT5759 is not set +# CONFIG_REGULATOR_RT6160 is not set +# CONFIG_REGULATOR_RT6190 is not set +# CONFIG_REGULATOR_RT6245 is not set +# CONFIG_REGULATOR_RTQ2134 is not set +# CONFIG_REGULATOR_RTMV20 is not set +# CONFIG_REGULATOR_RTQ6752 is not set +# CONFIG_REGULATOR_RTQ2208 is not set +# CONFIG_REGULATOR_SLG51000 is not set +# CONFIG_REGULATOR_SY8106A is not set +# CONFIG_REGULATOR_SY8824X is not set +# CONFIG_REGULATOR_SY8827N is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS6286X is not set +# CONFIG_REGULATOR_TPS6287X is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_REGULATOR_VCTRL=m +# CONFIG_REGULATOR_VEXPRESS is not set +# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set +# CONFIG_REGULATOR_QCOM_LABIBB is not set +CONFIG_RC_CORE=m +CONFIG_LIRC=y +CONFIG_RC_MAP=m +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +# CONFIG_IR_FINTEK is not set +# CONFIG_IR_GPIO_CIR is not set +# CONFIG_IR_GPIO_TX is not set +# CONFIG_IR_HIX5HD2 is not set +CONFIG_IR_IGORPLUGUSB=m +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +# CONFIG_IR_ITE_CIR is not set +CONFIG_IR_MCEUSB=m +# CONFIG_IR_MESON is not set +# CONFIG_IR_MESON_TX is not set +# CONFIG_IR_NUVOTON is not set +# CONFIG_IR_PWM_TX is not set +CONFIG_IR_REDRAT3=m +# CONFIG_IR_SERIAL is not set +# CONFIG_IR_SPI is not set +CONFIG_IR_STREAMZAP=m +# CONFIG_IR_SUNXI is not set +# CONFIG_IR_TOY is not set +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_RC_LOOPBACK=m +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set +# CONFIG_CEC_CROS_EC is not set +# CONFIG_CEC_MESON_AO is not set +# CONFIG_CEC_MESON_G12A_AO is not set +# CONFIG_CEC_GPIO is not set +# CONFIG_CEC_TEGRA is not set +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + +CONFIG_MEDIA_SUPPORT=m +# CONFIG_MEDIA_SUPPORT_FILTER is not set +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y + +# +# Media device types +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_SDR_SUPPORT=y +CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_TEST_SUPPORT=y +# end of Media device types + +# +# Media core support +# +CONFIG_VIDEO_DEV=m +CONFIG_MEDIA_CONTROLLER=y +CONFIG_DVB_CORE=m +# end of Media core support + +# +# Video4Linux options +# +CONFIG_VIDEO_V4L2_I2C=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_V4L2_FWNODE=m +CONFIG_V4L2_ASYNC=m +# end of Video4Linux options + +# +# Media controller options +# +CONFIG_MEDIA_CONTROLLER_DVB=y +CONFIG_MEDIA_CONTROLLER_REQUEST_API=y +# end of Media controller options + +# +# Digital TV options +# +# CONFIG_DVB_MMAP is not set +CONFIG_DVB_NET=y +CONFIG_DVB_MAX_ADAPTERS=16 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set +# end of Digital TV options + +# +# Media drivers +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_DTCS033=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KINECT=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STK1135=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TOUPTEK=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y +CONFIG_USB_S2255=m +CONFIG_VIDEO_USBTV=m +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y + +# +# Analog TV USB devices +# +CONFIG_VIDEO_GO7007=m +CONFIG_VIDEO_GO7007_USB=m +CONFIG_VIDEO_GO7007_LOADER=m +CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +CONFIG_VIDEO_STK1160=m + +# +# Analog/digital TV USB devices +# +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +CONFIG_VIDEO_AU0828_RC=y +CONFIG_VIDEO_CX231XX=m +CONFIG_VIDEO_CX231XX_RC=y +CONFIG_VIDEO_CX231XX_ALSA=m +CONFIG_VIDEO_CX231XX_DVB=m + +# +# Digital TV USB devices +# +CONFIG_DVB_AS102=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9015=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_DVBSKY=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB_RTL28XXU=m +CONFIG_DVB_USB_ZD1301=m +CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +# CONFIG_DVB_USB_CXUSB_ANALOG is not set +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIB3000MC=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m + +# +# Webcam, TV (analog/digital) USB devices +# +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_V4L2=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m + +# +# Software defined radio USB devices +# +CONFIG_USB_AIRSPY=m +CONFIG_USB_HACKRF=m +CONFIG_USB_MSI2500=m +CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +CONFIG_VIDEO_SOLO6X10=m +CONFIG_VIDEO_TW5864=m +CONFIG_VIDEO_TW68=m +CONFIG_VIDEO_TW686X=m +# CONFIG_VIDEO_ZORAN is not set + +# +# Media capture/analog TV support +# +CONFIG_VIDEO_DT3155=m +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_IVTV_ALSA=m +CONFIG_VIDEO_FB_IVTV=m +CONFIG_VIDEO_HEXIUM_GEMINI=m +CONFIG_VIDEO_HEXIUM_ORION=m +CONFIG_VIDEO_MXB=m + +# +# Media capture/analog/hybrid TV support +# +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX18_ALSA=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +# CONFIG_VIDEO_CX25821 is not set +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +CONFIG_VIDEO_CX88_ENABLE_VP3054=y +CONFIG_VIDEO_CX88_VP3054=m +CONFIG_VIDEO_CX88_MPEG=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y +CONFIG_VIDEO_SAA7134_DVB=m +# CONFIG_VIDEO_SAA7134_GO7007 is not set +CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set +CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NETUP_UNIDVB=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +CONFIG_DVB_PT3=m +CONFIG_DVB_SMIPCIE=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +# CONFIG_IPU_BRIDGE is not set +CONFIG_RADIO_ADAPTERS=m +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SAA7706H is not set +CONFIG_RADIO_SHARK=m +CONFIG_RADIO_SHARK2=m +# CONFIG_RADIO_SI4713 is not set +CONFIG_RADIO_TEA575X=m +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set +# CONFIG_USB_DSBR is not set +CONFIG_USB_KEENE=m +CONFIG_USB_MA901=m +CONFIG_USB_MR800=m +CONFIG_USB_RAREMONO=m +CONFIG_RADIO_SI470X=m +CONFIG_USB_SI470X=m +# CONFIG_I2C_SI470X is not set +# CONFIG_RADIO_WL128X is not set +CONFIG_MEDIA_PLATFORM_DRIVERS=y +CONFIG_V4L_PLATFORM_DRIVERS=y +# CONFIG_SDR_PLATFORM_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set +CONFIG_V4L_MEM2MEM_DRIVERS=y +# CONFIG_VIDEO_MEM2MEM_DEINTERLACE is not set +# CONFIG_VIDEO_MUX is not set + +# +# Allegro DVT media platform drivers +# +# CONFIG_VIDEO_ALLEGRO_DVT is not set + +# +# Amlogic media platform drivers +# +# CONFIG_VIDEO_MESON_GE2D is not set + +# +# Amphion drivers +# + +# +# Aspeed media platform drivers +# + +# +# Atmel media platform drivers +# + +# +# Cadence media platform drivers +# +# CONFIG_VIDEO_CADENCE_CSI2RX is not set +# CONFIG_VIDEO_CADENCE_CSI2TX is not set + +# +# Chips&Media media platform drivers +# + +# +# Intel media platform drivers +# + +# +# Marvell media platform drivers +# +CONFIG_VIDEO_CAFE_CCIC=m + +# +# Mediatek media platform drivers +# + +# +# Microchip Technology, Inc. media platform drivers +# + +# +# NVidia media platform drivers +# +# CONFIG_VIDEO_TEGRA_VDE is not set + +# +# NXP media platform drivers +# + +# +# Qualcomm media platform drivers +# +# CONFIG_VIDEO_QCOM_CAMSS is not set + +# +# Renesas media platform drivers +# + +# +# Rockchip media platform drivers +# +# CONFIG_VIDEO_ROCKCHIP_RGA is not set +# CONFIG_VIDEO_ROCKCHIP_ISP1 is not set + +# +# Samsung media platform drivers +# + +# +# STMicroelectronics media platform drivers +# + +# +# Sunxi media platform drivers +# +# CONFIG_VIDEO_SUN4I_CSI is not set +# CONFIG_VIDEO_SUN6I_CSI is not set +# CONFIG_VIDEO_SUN8I_A83T_MIPI_CSI2 is not set +# CONFIG_VIDEO_SUN8I_DEINTERLACE is not set +# CONFIG_VIDEO_SUN8I_ROTATE is not set + +# +# Texas Instruments drivers +# + +# +# Verisilicon media platform drivers +# +# CONFIG_VIDEO_HANTRO is not set + +# +# VIA media platform drivers +# + +# +# Xilinx media platform drivers +# +# CONFIG_VIDEO_XILINX is not set + +# +# MMC/SDIO DVB adapters +# +CONFIG_SMS_SDIO_DRV=m +CONFIG_V4L_TEST_DRIVERS=y +# CONFIG_VIDEO_VIM2M is not set +# CONFIG_VIDEO_VICODEC is not set +# CONFIG_VIDEO_VIMC is not set +CONFIG_VIDEO_VIVID=m +CONFIG_VIDEO_VIVID_CEC=y +CONFIG_VIDEO_VIVID_MAX_DEVS=64 +# CONFIG_VIDEO_VISL is not set +# CONFIG_DVB_TEST_DRIVERS is not set + +# +# FireWire (IEEE 1394) Adapters +# +CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_TTPCI_EEPROM=m +CONFIG_UVC_COMMON=m +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set +CONFIG_VIDEO_V4L2_TPG=m +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_DMA_CONTIG=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +# end of Media drivers + +# +# Media ancillary drivers +# +CONFIG_MEDIA_ATTACH=y + +# +# IR I2C driver auto-selected by 'Autoselect ancillary drivers' +# +CONFIG_VIDEO_IR_I2C=m +CONFIG_VIDEO_CAMERA_SENSOR=y +# CONFIG_VIDEO_AR0521 is not set +# CONFIG_VIDEO_HI556 is not set +# CONFIG_VIDEO_HI846 is not set +# CONFIG_VIDEO_HI847 is not set +# CONFIG_VIDEO_IMX208 is not set +# CONFIG_VIDEO_IMX214 is not set +# CONFIG_VIDEO_IMX219 is not set +# CONFIG_VIDEO_IMX258 is not set +# CONFIG_VIDEO_IMX274 is not set +# CONFIG_VIDEO_IMX290 is not set +# CONFIG_VIDEO_IMX296 is not set +# CONFIG_VIDEO_IMX319 is not set +# CONFIG_VIDEO_IMX334 is not set +# CONFIG_VIDEO_IMX335 is not set +# CONFIG_VIDEO_IMX355 is not set +# CONFIG_VIDEO_IMX412 is not set +# CONFIG_VIDEO_IMX415 is not set +# CONFIG_VIDEO_MT9M001 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T112 is not set +CONFIG_VIDEO_MT9V011=m +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_MT9V111 is not set +# CONFIG_VIDEO_OG01A1B is not set +# CONFIG_VIDEO_OV01A10 is not set +# CONFIG_VIDEO_OV02A10 is not set +# CONFIG_VIDEO_OV08D10 is not set +# CONFIG_VIDEO_OV08X40 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_OV13B10 is not set +CONFIG_VIDEO_OV2640=m +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV2680 is not set +# CONFIG_VIDEO_OV2685 is not set +# CONFIG_VIDEO_OV2740 is not set +# CONFIG_VIDEO_OV4689 is not set +# CONFIG_VIDEO_OV5640 is not set +# CONFIG_VIDEO_OV5645 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV5648 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV5675 is not set +# CONFIG_VIDEO_OV5693 is not set +# CONFIG_VIDEO_OV5695 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV7251 is not set +CONFIG_VIDEO_OV7640=m +CONFIG_VIDEO_OV7670=m +# CONFIG_VIDEO_OV772X is not set +# CONFIG_VIDEO_OV7740 is not set +# CONFIG_VIDEO_OV8856 is not set +# CONFIG_VIDEO_OV8858 is not set +# CONFIG_VIDEO_OV8865 is not set +# CONFIG_VIDEO_OV9282 is not set +# CONFIG_VIDEO_OV9640 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV9734 is not set +# CONFIG_VIDEO_RDACM20 is not set +# CONFIG_VIDEO_RDACM21 is not set +# CONFIG_VIDEO_RJ54N1 is not set +# CONFIG_VIDEO_S5C73M3 is not set +# CONFIG_VIDEO_S5K5BAF is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_ST_VGXY61 is not set +# CONFIG_VIDEO_CCS is not set +# CONFIG_VIDEO_ET8EK8 is not set + +# +# Lens drivers +# +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_AK7375 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9719 is not set +# CONFIG_VIDEO_DW9768 is not set +# CONFIG_VIDEO_DW9807_VCM is not set +# end of Lens drivers + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set +# end of Flash devices + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_MSP3400=m +CONFIG_VIDEO_SONY_BTF_MPX=m +# CONFIG_VIDEO_TDA1997X is not set +CONFIG_VIDEO_TDA7432=m +CONFIG_VIDEO_TDA9840=m +CONFIG_VIDEO_TEA6415C=m +CONFIG_VIDEO_TEA6420=m +CONFIG_VIDEO_TLV320AIC23B=m +CONFIG_VIDEO_TVAUDIO=m +CONFIG_VIDEO_UDA1342=m +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_WM8775=m +# end of Audio decoders, processors and mixers + +# +# RDS decoders +# +CONFIG_VIDEO_SAA6588=m +# end of RDS decoders + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV748X is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +CONFIG_VIDEO_BT819=m +CONFIG_VIDEO_BT856=m +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_ISL7998X is not set +CONFIG_VIDEO_KS0127=m +# CONFIG_VIDEO_MAX9286 is not set +# CONFIG_VIDEO_ML86V7667 is not set +CONFIG_VIDEO_SAA7110=m +CONFIG_VIDEO_SAA711X=m +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TC358746 is not set +# CONFIG_VIDEO_TVP514X is not set +CONFIG_VIDEO_TVP5150=m +# CONFIG_VIDEO_TVP7002 is not set +CONFIG_VIDEO_TW2804=m +CONFIG_VIDEO_TW9903=m +CONFIG_VIDEO_TW9906=m +# CONFIG_VIDEO_TW9910 is not set +CONFIG_VIDEO_VPX3220=m + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m +# end of Video decoders + +# +# Video encoders +# +CONFIG_VIDEO_ADV7170=m +CONFIG_VIDEO_ADV7175=m +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_AK881X is not set +CONFIG_VIDEO_SAA7127=m +CONFIG_VIDEO_SAA7185=m +# CONFIG_VIDEO_THS8200 is not set +# end of Video encoders + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m +# end of Video improvement chips + +# +# Audio/Video compression chips +# +CONFIG_VIDEO_SAA6752HS=m +# end of Audio/Video compression chips + +# +# SDR tuner chips +# +# CONFIG_SDR_MAX2175 is not set +# end of SDR tuner chips + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_I2C is not set +CONFIG_VIDEO_M52790=m +# CONFIG_VIDEO_ST_MIPID02 is not set +# CONFIG_VIDEO_THS7303 is not set +# end of Miscellaneous helper chips + +# +# Video serializers and deserializers +# +# CONFIG_VIDEO_DS90UB913 is not set +# CONFIG_VIDEO_DS90UB953 is not set +# CONFIG_VIDEO_DS90UB960 is not set +# end of Video serializers and deserializers + +# +# Media SPI Adapters +# +# CONFIG_CXD2880_SPI_DRV is not set +# CONFIG_VIDEO_GS1662 is not set +# end of Media SPI Adapters + +CONFIG_MEDIA_TUNER=m + +# +# Customize TV tuners +# +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MSI001=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MXL301RF=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_XC5000=m +# end of Customize TV tuners + +# +# Customise DVB Frontends +# + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_M88DS3103=m +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_TDA18271C2DD=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_MT312=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_TDA10071=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_AF9013=m +CONFIG_DVB_AS102_FE=m +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +# CONFIG_DVB_DIB9000 is not set +CONFIG_DVB_DRXD=m +CONFIG_DVB_EC100=m +CONFIG_DVB_GP8PSK_FE=m +CONFIG_DVB_L64781=m +CONFIG_DVB_MT352=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_RTL2830=m +CONFIG_DVB_RTL2832=m +CONFIG_DVB_RTL2832_SDR=m +# CONFIG_DVB_S5H1432 is not set +CONFIG_DVB_SI2168=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_ZD1301_DEMOD=m +CONFIG_DVB_ZL10353=m +# CONFIG_DVB_CXD2880 is not set + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_STV0297=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_VES1820=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT3306A=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_MXL692=m +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m +CONFIG_DVB_S921=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +# CONFIG_DVB_MN88443X is not set +CONFIG_DVB_TC90522=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_A8293=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_ASCOT2E=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_HELENE=m +CONFIG_DVB_HORUS3A=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_IX2505V=m +# CONFIG_DVB_LGS8GL5 is not set +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_LNBH25=m +# CONFIG_DVB_LNBH29 is not set +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_DRX39XYJ=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m +CONFIG_DVB_SP2=m +# end of Customise DVB Frontends + +# +# Tools to develop new frontends +# +CONFIG_DVB_DUMMY_FE=m +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_SCREEN_INFO=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_TEGRA_HOST1X_CONTEXT_BUS=y +CONFIG_TEGRA_HOST1X=m +CONFIG_TEGRA_HOST1X_FIREWALL=y +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +CONFIG_DRM_KMS_HELPER=m +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set +# CONFIG_DRM_DEBUG_MODESET_LOCK is not set +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_AUX_BUS=m +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +CONFIG_DRM_DP_AUX_CHARDEV=y +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_DMA_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +CONFIG_DRM_HDLCD=m +# CONFIG_DRM_HDLCD_SHOW_UNDERRUN is not set +CONFIG_DRM_MALI_DISPLAY=m +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=m +# CONFIG_DRM_RADEON_USERPTR is not set +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +# CONFIG_DRM_AMDGPU_USERPTR is not set +# CONFIG_DRM_AMDGPU_WERROR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DRM_AMD_DC_SI is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +# CONFIG_HSA_AMD is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_PLATFORM_DRIVER=y +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_NOUVEAU_SVM is not set +CONFIG_DRM_VGEM=m +# CONFIG_DRM_VKMS is not set +CONFIG_DRM_ROCKCHIP=m +CONFIG_ROCKCHIP_VOP=y +# CONFIG_ROCKCHIP_VOP2 is not set +CONFIG_ROCKCHIP_ANALOGIX_DP=y +CONFIG_ROCKCHIP_CDN_DP=y +CONFIG_ROCKCHIP_DW_HDMI=y +CONFIG_ROCKCHIP_DW_MIPI_DSI=y +# CONFIG_ROCKCHIP_INNO_HDMI is not set +# CONFIG_ROCKCHIP_LVDS is not set +# CONFIG_ROCKCHIP_RGB is not set +# CONFIG_ROCKCHIP_RK3066_HDMI is not set +# CONFIG_DRM_VMWGFX is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +# CONFIG_DRM_MGAG200 is not set +CONFIG_DRM_SUN4I=m +# CONFIG_DRM_SUN6I_DSI is not set +CONFIG_DRM_SUN8I_DW_HDMI=m +# CONFIG_DRM_SUN8I_MIXER is not set +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_MSM=m +CONFIG_DRM_MSM_GPU_STATE=y +# CONFIG_DRM_MSM_GPU_SUDO is not set +CONFIG_DRM_MSM_MDSS=y +CONFIG_DRM_MSM_MDP4=y +CONFIG_DRM_MSM_MDP5=y +CONFIG_DRM_MSM_DPU=y +CONFIG_DRM_MSM_DP=y +CONFIG_DRM_MSM_DSI=y +CONFIG_DRM_MSM_DSI_28NM_PHY=y +CONFIG_DRM_MSM_DSI_20NM_PHY=y +CONFIG_DRM_MSM_DSI_28NM_8960_PHY=y +CONFIG_DRM_MSM_DSI_14NM_PHY=y +CONFIG_DRM_MSM_DSI_10NM_PHY=y +CONFIG_DRM_MSM_DSI_7NM_PHY=y +CONFIG_DRM_MSM_HDMI=y +CONFIG_DRM_MSM_HDMI_HDCP=y +CONFIG_DRM_TEGRA=m +# CONFIG_DRM_TEGRA_DEBUG is not set +CONFIG_DRM_TEGRA_STAGING=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596 is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0 is not set +# CONFIG_DRM_PANEL_BOE_HIMAX8279D is not set +# CONFIG_DRM_PANEL_BOE_TV101WUM_NL6 is not set +# CONFIG_DRM_PANEL_DSI_CM is not set +# CONFIG_DRM_PANEL_LVDS is not set +CONFIG_DRM_PANEL_SIMPLE=m +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_EBBG_FT8719 is not set +# CONFIG_DRM_PANEL_ELIDA_KD35T133 is not set +# CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02 is not set +# CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D is not set +# CONFIG_DRM_PANEL_HIMAX_HX8394 is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9881C is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_INNOLUX_P079ZCA is not set +# CONFIG_DRM_PANEL_JADARD_JD9365DA_H3 is not set +# CONFIG_DRM_PANEL_JDI_LT070ME05000 is not set +# CONFIG_DRM_PANEL_JDI_R63452 is not set +# CONFIG_DRM_PANEL_KHADAS_TS050 is not set +# CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04 is not set +# CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W is not set +# CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829 is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3051D is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT35510 is not set +# CONFIG_DRM_PANEL_NOVATEK_NT35560 is not set +# CONFIG_DRM_PANEL_NOVATEK_NT35950 is not set +# CONFIG_DRM_PANEL_NOVATEK_NT36523 is not set +# CONFIG_DRM_PANEL_NOVATEK_NT36672A is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_MANTIX_MLAF057WE51 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_ORISETECH_OTM8009A is not set +# CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS is not set +# CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00 is not set +CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m +# CONFIG_DRM_PANEL_RAYDIUM_RM67191 is not set +# CONFIG_DRM_PANEL_RAYDIUM_RM68200 is not set +# CONFIG_DRM_PANEL_RONBO_RB070D30 is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D16D0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_SOFEF00 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set +# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set +# CONFIG_DRM_PANEL_SHARP_LS043T1LE01 is not set +# CONFIG_DRM_PANEL_SHARP_LS060T1SX01 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7701 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7703 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_SONY_TD4353_JDI is not set +# CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521 is not set +# CONFIG_DRM_PANEL_STARTEK_KD070FHFID015 is not set +# CONFIG_DRM_PANEL_TDO_TL070WSH30 is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA is not set +# CONFIG_DRM_PANEL_VISIONOX_RM69299 is not set +# CONFIG_DRM_PANEL_VISIONOX_VTDR6130 is not set +# CONFIG_DRM_PANEL_VISIONOX_R66451 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# CONFIG_DRM_PANEL_XINPENG_XPP055C272 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_CROS_EC_ANX7688 is not set +CONFIG_DRM_DISPLAY_CONNECTOR=m +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +CONFIG_DRM_ANALOGIX_DP=m +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +CONFIG_DRM_I2C_ADV7511=m +CONFIG_DRM_I2C_ADV7511_AUDIO=y +CONFIG_DRM_I2C_ADV7511_CEC=y +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +CONFIG_DRM_DW_HDMI=m +# CONFIG_DRM_DW_HDMI_AHB_AUDIO is not set +CONFIG_DRM_DW_HDMI_I2S_AUDIO=m +# CONFIG_DRM_DW_HDMI_GP_AUDIO is not set +# CONFIG_DRM_DW_HDMI_CEC is not set +CONFIG_DRM_DW_MIPI_DSI=m +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_HISI_HIBMC=m +CONFIG_DRM_HISI_KIRIN=m +# CONFIG_DRM_LOGICVC is not set +CONFIG_DRM_MESON=m +CONFIG_DRM_MESON_DW_HDMI=m +CONFIG_DRM_MESON_DW_MIPI_DSI=m +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_XEN_FRONTEND is not set +CONFIG_DRM_LIMA=m +CONFIG_DRM_PANFROST=m +# CONFIG_DRM_TIDSS is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_HYPERV is not set +CONFIG_DRM_LEGACY=y +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +CONFIG_FB_SVGALIB=m +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +CONFIG_FB_ARMCLCD=y +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +CONFIG_FB_S3=m +CONFIG_FB_S3_DDC=y +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +CONFIG_FB_3DFX=m +# CONFIG_FB_3DFX_ACCEL is not set +CONFIG_FB_3DFX_I2C=y +# CONFIG_FB_VOODOO1 is not set +CONFIG_FB_VT8623=m +# CONFIG_FB_TRIDENT is not set +CONFIG_FB_ARK=m +CONFIG_FB_PM3=m +# CONFIG_FB_CARMINE is not set +CONFIG_FB_SMSCUFX=m +CONFIG_FB_UDL=m +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_XILINX is not set +# CONFIG_FB_VIRTUAL is not set +CONFIG_XEN_FBDEV_FRONTEND=y +# CONFIG_FB_METRONOME is not set +CONFIG_FB_MB862XX=m +CONFIG_FB_MB862XX_PCI_GDC=y +CONFIG_FB_MB862XX_I2C=y +CONFIG_FB_HYPERV=m +CONFIG_FB_SIMPLE=y +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_DEVICE=y +CONFIG_FB_DDC=m +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_DMAMEM_HELPERS=y +CONFIG_FB_IOMEM_FOPS=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_VGASTATE=m +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +# CONFIG_LOGO is not set +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_PCM_ELD=y +CONFIG_SND_PCM_IEC958=y +CONFIG_SND_DMAENGINE_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_PCM_TIMER=y +CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +CONFIG_SND_CTL_FAST_LOOKUP=y +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_CTL_INPUT_VALIDATION is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_CTL_LED=m +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +# CONFIG_SND_SEQUENCER_OSS is not set +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +# CONFIG_SND_SEQ_UMP is not set +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y +# CONFIG_SND_DUMMY is not set +CONFIG_SND_ALOOP=m +# CONFIG_SND_PCMTEST is not set +# CONFIG_SND_VIRMIDI is not set +# CONFIG_SND_MTPAV is not set +CONFIG_SND_MTS64=m +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_SERIAL_GENERIC is not set +# CONFIG_SND_MPU401 is not set +CONFIG_SND_PORTMAN2X4=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0 +CONFIG_SND_PCI=y +CONFIG_SND_AD1889=m +# CONFIG_SND_ALS300 is not set +# CONFIG_SND_ALI5451 is not set +# CONFIG_SND_ATIIXP is not set +# CONFIG_SND_ATIIXP_MODEM is not set +# CONFIG_SND_AU8810 is not set +# CONFIG_SND_AU8820 is not set +# CONFIG_SND_AU8830 is not set +# CONFIG_SND_AW2 is not set +# CONFIG_SND_AZT3328 is not set +# CONFIG_SND_BT87X is not set +# CONFIG_SND_CA0106 is not set +# CONFIG_SND_CMIPCI is not set +CONFIG_SND_OXYGEN_LIB=m +CONFIG_SND_OXYGEN=m +# CONFIG_SND_CS4281 is not set +# CONFIG_SND_CS46XX is not set +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +# CONFIG_SND_EMU10K1 is not set +# CONFIG_SND_EMU10K1X is not set +# CONFIG_SND_ENS1370 is not set +# CONFIG_SND_ENS1371 is not set +# CONFIG_SND_ES1938 is not set +# CONFIG_SND_ES1968 is not set +# CONFIG_SND_FM801 is not set +# CONFIG_SND_HDSP is not set +CONFIG_SND_HDSPM=m +# CONFIG_SND_ICE1712 is not set +# CONFIG_SND_ICE1724 is not set +# CONFIG_SND_INTEL8X0 is not set +# CONFIG_SND_INTEL8X0M is not set +# CONFIG_SND_KORG1212 is not set +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +# CONFIG_SND_MAESTRO3 is not set +# CONFIG_SND_MIXART is not set +# CONFIG_SND_NM256 is not set +CONFIG_SND_PCXHR=m +CONFIG_SND_RIPTIDE=m +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_SONICVIBES is not set +# CONFIG_SND_TRIDENT is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VIA82XX_MODEM is not set +CONFIG_SND_VIRTUOSO=m +# CONFIG_SND_VX222 is not set +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_GENERIC_LEDS=y +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_TEGRA=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=1 +CONFIG_SND_HDA_PATCH_LOADER=y +# CONFIG_SND_HDA_SCODEC_CS35L41_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L41_SPI is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_SPI is not set +# CONFIG_SND_HDA_SCODEC_TAS2781_I2C is not set +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +# CONFIG_SND_HDA_CODEC_CS8409 is not set +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=1 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set +# CONFIG_SND_HDA_CTL_DEV_ID is not set +# end of HD-Audio + +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_ALIGNED_MMIO=y +CONFIG_SND_HDA_COMPONENT=y +CONFIG_SND_HDA_PREALLOC_SIZE=2048 +CONFIG_SND_INTEL_NHLT=y +CONFIG_SND_INTEL_DSP_CONFIG=m +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m +CONFIG_SND_SPI=y +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +# CONFIG_SND_USB_AUDIO_MIDI_V2 is not set +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y +# CONFIG_SND_SOC_ADI is not set +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_AMD_ACP_CONFIG is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_AUDMIX is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_FSL_MICFIL is not set +# CONFIG_SND_SOC_FSL_XCVR is not set +# CONFIG_SND_SOC_FSL_RPMSG is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# end of SoC Audio for Freescale CPUs + +# CONFIG_SND_SOC_CHV3_I2S is not set +CONFIG_SND_I2S_HI6210_I2S=m +# CONFIG_SND_KIRKWOOD_SOC is not set +# CONFIG_SND_SOC_IMG is not set +# CONFIG_SND_SOC_MTK_BTCVSD is not set + +# +# ASoC support for Amlogic platforms +# +# CONFIG_SND_MESON_AIU is not set +# CONFIG_SND_MESON_AXG_FRDDR is not set +# CONFIG_SND_MESON_AXG_TODDR is not set +# CONFIG_SND_MESON_AXG_TDMIN is not set +# CONFIG_SND_MESON_AXG_TDMOUT is not set +# CONFIG_SND_MESON_AXG_SOUND_CARD is not set +# CONFIG_SND_MESON_AXG_SPDIFOUT is not set +# CONFIG_SND_MESON_AXG_SPDIFIN is not set +# CONFIG_SND_MESON_AXG_PDM is not set +# CONFIG_SND_MESON_GX_SOUND_CARD is not set +# CONFIG_SND_MESON_G12A_TOACODEC is not set +# CONFIG_SND_MESON_G12A_TOHDMITX is not set +# CONFIG_SND_SOC_MESON_T9015 is not set +# end of ASoC support for Amlogic platforms + +CONFIG_SND_SOC_QCOM=m +CONFIG_SND_SOC_LPASS_CPU=m +CONFIG_SND_SOC_LPASS_PLATFORM=m +CONFIG_SND_SOC_LPASS_APQ8016=m +# CONFIG_SND_SOC_STORM is not set +CONFIG_SND_SOC_APQ8016_SBC=m +CONFIG_SND_SOC_QCOM_COMMON=m +# CONFIG_SND_SOC_SC7180 is not set +CONFIG_SND_SOC_ROCKCHIP=m +CONFIG_SND_SOC_ROCKCHIP_I2S=m +# CONFIG_SND_SOC_ROCKCHIP_I2S_TDM is not set +# CONFIG_SND_SOC_ROCKCHIP_PDM is not set +CONFIG_SND_SOC_ROCKCHIP_SPDIF=m +# CONFIG_SND_SOC_ROCKCHIP_MAX98090 is not set +CONFIG_SND_SOC_ROCKCHIP_RT5645=m +# CONFIG_SND_SOC_RK3288_HDMI_ANALOG is not set +CONFIG_SND_SOC_RK3399_GRU_SOUND=m +# CONFIG_SND_SOC_SOF_TOPLEVEL is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# end of STMicroelectronics STM32 SOC audio support + +# +# Allwinner SoC Audio support +# +# CONFIG_SND_SUN4I_CODEC is not set +CONFIG_SND_SUN8I_CODEC=m +# CONFIG_SND_SUN8I_CODEC_ANALOG is not set +CONFIG_SND_SUN50I_CODEC_ANALOG=m +CONFIG_SND_SUN4I_I2S=m +# CONFIG_SND_SUN4I_SPDIF is not set +# CONFIG_SND_SUN50I_DMIC is not set +CONFIG_SND_SUN8I_ADDA_PR_REGMAP=m +# end of Allwinner SoC Audio support + +CONFIG_SND_SOC_TEGRA=m +# CONFIG_SND_SOC_TEGRA20_AC97 is not set +# CONFIG_SND_SOC_TEGRA20_DAS is not set +# CONFIG_SND_SOC_TEGRA20_I2S is not set +CONFIG_SND_SOC_TEGRA20_SPDIF=m +# CONFIG_SND_SOC_TEGRA30_AHUB is not set +# CONFIG_SND_SOC_TEGRA30_I2S is not set +# CONFIG_SND_SOC_TEGRA210_AHUB is not set +# CONFIG_SND_SOC_TEGRA210_DMIC is not set +# CONFIG_SND_SOC_TEGRA210_I2S is not set +# CONFIG_SND_SOC_TEGRA210_OPE is not set +# CONFIG_SND_SOC_TEGRA186_ASRC is not set +# CONFIG_SND_SOC_TEGRA186_DSPK is not set +# CONFIG_SND_SOC_TEGRA210_ADMAIF is not set +# CONFIG_SND_SOC_TEGRA210_MVC is not set +# CONFIG_SND_SOC_TEGRA210_SFC is not set +# CONFIG_SND_SOC_TEGRA210_AMX is not set +# CONFIG_SND_SOC_TEGRA210_ADX is not set +# CONFIG_SND_SOC_TEGRA210_MIXER is not set +# CONFIG_SND_SOC_TEGRA_AUDIO_GRAPH_CARD is not set +CONFIG_SND_SOC_TEGRA_MACHINE_DRV=m +# CONFIG_SND_SOC_TEGRA_RT5631 is not set +CONFIG_SND_SOC_TEGRA_RT5640=m +CONFIG_SND_SOC_TEGRA_WM8753=m +CONFIG_SND_SOC_TEGRA_WM8903=m +# CONFIG_SND_SOC_TEGRA_WM9712 is not set +CONFIG_SND_SOC_TEGRA_TRIMSLICE=m +CONFIG_SND_SOC_TEGRA_ALC5632=m +CONFIG_SND_SOC_TEGRA_MAX98090=m +# CONFIG_SND_SOC_TEGRA_MAX98088 is not set +CONFIG_SND_SOC_TEGRA_RT5677=m +# CONFIG_SND_SOC_TEGRA_SGTL5000 is not set +# CONFIG_SND_SOC_XILINX_I2S is not set +# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set +# CONFIG_SND_SOC_XILINX_SPDIF is not set +# CONFIG_SND_SOC_XTFPGA_I2S is not set +CONFIG_SND_SOC_I2C_AND_SPI=m + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1372_I2C is not set +# CONFIG_SND_SOC_ADAU1372_SPI is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_ADAU7118_HW is not set +# CONFIG_SND_SOC_ADAU7118_I2C is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4118 is not set +# CONFIG_SND_SOC_AK4375 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +CONFIG_SND_SOC_ALC5632=m +# CONFIG_SND_SOC_AUDIO_IIO_AUX is not set +# CONFIG_SND_SOC_AW8738 is not set +# CONFIG_SND_SOC_AW88395 is not set +# CONFIG_SND_SOC_AW88261 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CHV3_CODEC is not set +# CONFIG_SND_SOC_CROS_EC_CODEC is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS35L36 is not set +# CONFIG_SND_SOC_CS35L41_SPI is not set +# CONFIG_SND_SOC_CS35L41_I2C is not set +# CONFIG_SND_SOC_CS35L45_SPI is not set +# CONFIG_SND_SOC_CS35L45_I2C is not set +# CONFIG_SND_SOC_CS35L56_I2C is not set +# CONFIG_SND_SOC_CS35L56_SPI is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS42L83 is not set +# CONFIG_SND_SOC_CS4234 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4341 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +# CONFIG_SND_SOC_CX2072X is not set +# CONFIG_SND_SOC_DA7213 is not set +CONFIG_SND_SOC_DA7219=m +CONFIG_SND_SOC_DMIC=m +CONFIG_SND_SOC_HDMI_CODEC=m +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8326 is not set +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_HDA is not set +# CONFIG_SND_SOC_ICS43432 is not set +# CONFIG_SND_SOC_IDT821034 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +# CONFIG_SND_SOC_MAX98088 is not set +CONFIG_SND_SOC_MAX98090=m +CONFIG_SND_SOC_MAX98357A=m +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX98520 is not set +# CONFIG_SND_SOC_MAX98373_I2C is not set +# CONFIG_SND_SOC_MAX98388 is not set +# CONFIG_SND_SOC_MAX98390 is not set +# CONFIG_SND_SOC_MAX98396 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_ANALOG is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3060_I2C is not set +# CONFIG_SND_SOC_PCM3060_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM5102A is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_PEB2466 is not set +# CONFIG_SND_SOC_RK3328 is not set +CONFIG_SND_SOC_RL6231=m +CONFIG_SND_SOC_RT5514=m +CONFIG_SND_SOC_RT5514_SPI=m +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +CONFIG_SND_SOC_RT5640=m +CONFIG_SND_SOC_RT5645=m +# CONFIG_SND_SOC_RT5659 is not set +CONFIG_SND_SOC_RT5677=m +CONFIG_SND_SOC_RT5677_SPI=m +# CONFIG_SND_SOC_RT9120 is not set +# CONFIG_SND_SOC_SGTL5000 is not set +CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m +# CONFIG_SND_SOC_SIMPLE_MUX is not set +# CONFIG_SND_SOC_SMA1303 is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SRC4XXX_I2C is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2518 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM3515 is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS2562 is not set +# CONFIG_SND_SOC_TAS2764 is not set +# CONFIG_SND_SOC_TAS2770 is not set +# CONFIG_SND_SOC_TAS2780 is not set +# CONFIG_SND_SOC_TAS2781_I2C is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS5805M is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TFA989X is not set +# CONFIG_SND_SOC_TLV320ADC3XXX is not set +CONFIG_SND_SOC_TLV320AIC23=m +CONFIG_SND_SOC_TLV320AIC23_I2C=m +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set +# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set +# CONFIG_SND_SOC_TLV320ADCX140 is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_UDA1334 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731_I2C is not set +# CONFIG_SND_SOC_WM8731_SPI is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +CONFIG_SND_SOC_WM8753=m +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +CONFIG_SND_SOC_WM8903=m +# CONFIG_SND_SOC_WM8904 is not set +# CONFIG_SND_SOC_WM8940 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8961 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZL38060 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_MT6358 is not set +# CONFIG_SND_SOC_MT6660 is not set +# CONFIG_SND_SOC_NAU8315 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8821 is not set +# CONFIG_SND_SOC_NAU8822 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set +# CONFIG_SND_SOC_LPASS_VA_MACRO is not set +# CONFIG_SND_SOC_LPASS_RX_MACRO is not set +# CONFIG_SND_SOC_LPASS_TX_MACRO is not set +# end of CODEC drivers + +CONFIG_SND_SIMPLE_CARD_UTILS=m +CONFIG_SND_SIMPLE_CARD=m +CONFIG_SND_AUDIO_GRAPH_CARD=m +# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set +# CONFIG_SND_TEST_COMPONENT is not set +CONFIG_SND_XEN_FRONTEND=m +# CONFIG_SND_VIRTIO is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=m +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=m + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +CONFIG_HID_ACCUTOUCH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=m +# CONFIG_HID_APPLEIR is not set +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_BIGBEN_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_COUGAR=m +CONFIG_HID_MACALLY=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CP2112=m +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +CONFIG_HOLTEK_FF=y +# CONFIG_HID_GOOGLE_HAMMER is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_VIEWSONIC=m +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_LOGIWHEELS_FF=y +CONFIG_HID_MAGICMOUSE=m +CONFIG_HID_MALTRON=m +CONFIG_HID_MAYFLASH=m +# CONFIG_HID_MEGAWORLD_FF is not set +CONFIG_HID_REDRAGON=m +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=m +# CONFIG_HID_NVIDIA_SHIELD is not set +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +CONFIG_HID_RETRODE=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEAM=m +# CONFIG_STEAM_FF is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_GREENASIA_FF=y +# CONFIG_HID_HYPERV_MOUSE is not set +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_THRUSTMASTER_FF=y +CONFIG_HID_UDRAW_PS3=m +CONFIG_HID_U2FZERO=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_ZEROPLUS_FF=y +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2200 is not set +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# USB HID Boot Protocol drivers +# +# CONFIG_USB_KBD is not set +# CONFIG_USB_MOUSE is not set +# end of USB HID Boot Protocol drivers +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=m +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +CONFIG_USB_CONN_GPIO=m +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=m +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +CONFIG_USB_DYNAMIC_MINORS=y +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=m + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=m +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=m +CONFIG_USB_XHCI_PCI_RENESAS=m +CONFIG_USB_XHCI_PLATFORM=m +# CONFIG_USB_XHCI_HISTB is not set +# CONFIG_USB_XHCI_MVEBU is not set +CONFIG_USB_XHCI_TEGRA=m +CONFIG_USB_EHCI_HCD=m +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=m +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_ORION=m +CONFIG_USB_EHCI_TEGRA=m +CONFIG_USB_EHCI_HCD_PLATFORM=m +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=m +CONFIG_USB_OHCI_HCD_PCI=m +# CONFIG_USB_OHCI_HCD_SSB is not set +CONFIG_USB_OHCI_HCD_PLATFORM=m +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_SSB is not set +# CONFIG_USB_HCD_TEST_MODE is not set +# CONFIG_USB_XEN_HCD is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USBIP_CORE=m +CONFIG_USBIP_VHCI_HCD=m +CONFIG_USBIP_VHCI_HC_PORTS=15 +CONFIG_USBIP_VHCI_NR_HCS=8 +CONFIG_USBIP_HOST=m +CONFIG_USBIP_VUDC=m +# CONFIG_USBIP_DEBUG is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +CONFIG_USB_MUSB_HDRC=m +# CONFIG_USB_MUSB_HOST is not set +# CONFIG_USB_MUSB_GADGET is not set +CONFIG_USB_MUSB_DUAL_ROLE=y + +# +# Platform Glue Layer +# +CONFIG_USB_MUSB_SUNXI=m + +# +# MUSB DMA mode +# +# CONFIG_MUSB_PIO_ONLY is not set +CONFIG_USB_DWC3=m +CONFIG_USB_DWC3_ULPI=y +# CONFIG_USB_DWC3_HOST is not set +# CONFIG_USB_DWC3_GADGET is not set +CONFIG_USB_DWC3_DUAL_ROLE=y + +# +# Platform Glue Driver Support +# +CONFIG_USB_DWC3_PCI=m +CONFIG_USB_DWC3_HAPS=m +CONFIG_USB_DWC3_MESON_G12A=m +CONFIG_USB_DWC3_OF_SIMPLE=m +CONFIG_USB_DWC3_QCOM=m +CONFIG_USB_DWC3_XILINX=m +CONFIG_USB_DWC2=m +# CONFIG_USB_DWC2_HOST is not set + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +CONFIG_USB_DWC2_DUAL_ROLE=y +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +CONFIG_USB_CHIPIDEA=m +CONFIG_USB_CHIPIDEA_UDC=y +CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_CHIPIDEA_PCI=m +CONFIG_USB_CHIPIDEA_MSM=m +CONFIG_USB_CHIPIDEA_IMX=m +CONFIG_USB_CHIPIDEA_GENERIC=m +CONFIG_USB_CHIPIDEA_TEGRA=m +CONFIG_USB_ISP1760=m +CONFIG_USB_ISP1760_HCD=y +CONFIG_USB_ISP1761_UDC=y +# CONFIG_USB_ISP1760_HOST_ROLE is not set +# CONFIG_USB_ISP1760_GADGET_ROLE is not set +CONFIG_USB_ISP1760_DUAL_ROLE=y + +# +# USB port drivers +# +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F81232=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_METRO=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +# CONFIG_USB_SERIAL_SAFE_PADDED is not set +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_WISHBONE=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_USS720 is not set +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_CYPRESS_CY7C63=m +CONFIG_USB_CYTHERM=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_USB_QCOM_EUD is not set +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_TRANCEVIBRATOR=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_TEST=m +CONFIG_USB_EHSET_TEST_FIXTURE=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_YUREX=m +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +# CONFIG_USB_ONBOARD_HUB is not set +# CONFIG_USB_ATM is not set + +# +# USB Physical Layer drivers +# +CONFIG_USB_PHY=y +CONFIG_NOP_USB_XCEIV=m +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +CONFIG_USB_TEGRA_PHY=m +CONFIG_USB_ULPI=y +CONFIG_USB_ULPI_VIEWPORT=y +# end of USB Physical Layer drivers + +CONFIG_USB_GADGET=m +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 +# CONFIG_U_SERIAL_CONSOLE is not set + +# +# USB Peripheral Controller +# +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +CONFIG_USB_NET2280=m +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_MAX3420_UDC is not set +# CONFIG_USB_TEGRA_XUDC is not set +# CONFIG_USB_CDNS2_UDC is not set +# CONFIG_USB_DUMMY_HCD is not set +# end of USB Peripheral Controller + +CONFIG_USB_LIBCOMPOSITE=m +CONFIG_USB_F_ACM=m +CONFIG_USB_F_SS_LB=m +CONFIG_USB_U_SERIAL=m +CONFIG_USB_U_ETHER=m +CONFIG_USB_U_AUDIO=m +CONFIG_USB_F_SERIAL=m +CONFIG_USB_F_OBEX=m +CONFIG_USB_F_NCM=m +CONFIG_USB_F_ECM=m +CONFIG_USB_F_PHONET=m +CONFIG_USB_F_EEM=m +CONFIG_USB_F_SUBSET=m +CONFIG_USB_F_RNDIS=m +CONFIG_USB_F_MASS_STORAGE=m +CONFIG_USB_F_FS=m +CONFIG_USB_F_UAC1=m +CONFIG_USB_F_UAC2=m +CONFIG_USB_F_UVC=m +CONFIG_USB_F_MIDI=m +CONFIG_USB_F_HID=m +CONFIG_USB_F_PRINTER=m +CONFIG_USB_CONFIGFS=m +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_ACM=y +CONFIG_USB_CONFIGFS_OBEX=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_ECM=y +CONFIG_USB_CONFIGFS_ECM_SUBSET=y +CONFIG_USB_CONFIGFS_RNDIS=y +CONFIG_USB_CONFIGFS_EEM=y +CONFIG_USB_CONFIGFS_PHONET=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_LB_SS=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_UAC1=y +# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set +CONFIG_USB_CONFIGFS_F_UAC2=y +CONFIG_USB_CONFIGFS_F_MIDI=y +# CONFIG_USB_CONFIGFS_F_MIDI2 is not set +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_UVC=y +CONFIG_USB_CONFIGFS_F_PRINTER=y +# CONFIG_USB_CONFIGFS_F_TCM is not set + +# +# USB Gadget precomposed configurations +# +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +CONFIG_USB_ETH=m +CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set +# CONFIG_USB_G_NCM is not set +CONFIG_USB_GADGETFS=m +CONFIG_USB_FUNCTIONFS=m +CONFIG_USB_FUNCTIONFS_ETH=y +CONFIG_USB_FUNCTIONFS_RNDIS=y +CONFIG_USB_FUNCTIONFS_GENERIC=y +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_GADGET_TARGET is not set +CONFIG_USB_G_SERIAL=m +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +# CONFIG_USB_RAW_GADGET is not set +# end of USB Gadget precomposed configurations + +# CONFIG_TYPEC is not set +CONFIG_USB_ROLE_SWITCH=m +CONFIG_MMC=y +CONFIG_PWRSEQ_EMMC=y +# CONFIG_PWRSEQ_SD8787 is not set +CONFIG_PWRSEQ_SIMPLE=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=256 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_QCOM_DML=y +CONFIG_MMC_STM32_SDMMC=y +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_SDHCI_OF_ARASAN=m +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +CONFIG_MMC_SDHCI_TEGRA=m +# CONFIG_MMC_SDHCI_PXAV3 is not set +CONFIG_MMC_SDHCI_F_SDH30=m +# CONFIG_MMC_SDHCI_MILBEAUT is not set +CONFIG_MMC_MESON_GX=m +# CONFIG_MMC_MESON_MX_SDIO is not set +CONFIG_MMC_SDHCI_MSM=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_SPI=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_DW=m +CONFIG_MMC_DW_PLTFM=m +# CONFIG_MMC_DW_BLUEFIELD is not set +# CONFIG_MMC_DW_EXYNOS is not set +# CONFIG_MMC_DW_HI3798CV200 is not set +CONFIG_MMC_DW_K3=m +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_DW_ROCKCHIP=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_SUNXI=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +CONFIG_MMC_TOSHIBA_PCI=m +# CONFIG_MMC_MTK is not set +CONFIG_MMC_SDHCI_XENON=m +CONFIG_SCSI_UFSHCD=m +# CONFIG_SCSI_UFS_BSG is not set +# CONFIG_SCSI_UFS_FAULT_INJECTION is not set +# CONFIG_SCSI_UFS_HWMON is not set +CONFIG_SCSI_UFSHCD_PCI=m +# CONFIG_SCSI_UFS_DWC_TC_PCI is not set +# CONFIG_SCSI_UFSHCD_PLATFORM is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +CONFIG_LEDS_BRIGHTNESS_HW_CHANGED=y + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=m +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +CONFIG_LEDS_PCA955X=m +# CONFIG_LEDS_PCA955X_GPIO is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +CONFIG_LEDS_DAC124S085=m +CONFIG_LEDS_PWM=m +CONFIG_LEDS_REGULATOR=m +# CONFIG_LEDS_BD2606MVV is not set +CONFIG_LEDS_BD2802=m +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_BLINKM is not set +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_MTD=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_CPU=y +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_PANIC=y +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +CONFIG_LEDS_TRIGGER_AUDIO=m +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +CONFIG_ACCESSIBILITY=y +CONFIG_A11Y_BRAILLE_CONSOLE=y + +# +# Speakup console speech +# +CONFIG_SPEAKUP=m +CONFIG_SPEAKUP_SYNTH_ACNTSA=m +CONFIG_SPEAKUP_SYNTH_APOLLO=m +CONFIG_SPEAKUP_SYNTH_AUDPTR=m +CONFIG_SPEAKUP_SYNTH_BNS=m +CONFIG_SPEAKUP_SYNTH_DECTLK=m +CONFIG_SPEAKUP_SYNTH_DECEXT=m +CONFIG_SPEAKUP_SYNTH_LTLK=m +CONFIG_SPEAKUP_SYNTH_SOFT=m +CONFIG_SPEAKUP_SYNTH_SPKOUT=m +CONFIG_SPEAKUP_SYNTH_TXPRT=m +CONFIG_SPEAKUP_SYNTH_DUMMY=m +# end of Speakup console speech + +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +# CONFIG_INFINIBAND_BNXT_RE is not set +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_ERDMA is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_MTHCA=m +CONFIG_INFINIBAND_MTHCA_DEBUG=y +CONFIG_INFINIBAND_OCRDMA=m +CONFIG_INFINIBAND_QEDR=m +CONFIG_RDMA_RXE=m +# CONFIG_RDMA_SIW is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +CONFIG_EDAC_DEBUG=y +CONFIG_EDAC_GHES=y +CONFIG_EDAC_THUNDERX=m +# CONFIG_EDAC_SYNOPSYS is not set +CONFIG_EDAC_XGENE=m +# CONFIG_EDAC_DMC520 is not set +# CONFIG_EDAC_ZYNQMP is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=y +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_HYM8563 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +CONFIG_RTC_DRV_MAX77686=y +# CONFIG_RTC_DRV_NCT3018Y is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_ISL12026 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=y +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +CONFIG_RTC_DRV_EFI=y +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_OPTEE is not set +# CONFIG_RTC_DRV_ZYNQMP is not set +CONFIG_RTC_DRV_CROS_EC=m + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MESON_VRTC=m +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +CONFIG_RTC_DRV_SUN6I=y +CONFIG_RTC_DRV_MV=m +CONFIG_RTC_DRV_ARMADA38X=m +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +CONFIG_RTC_DRV_PM8XXX=m +CONFIG_RTC_DRV_TEGRA=y +CONFIG_RTC_DRV_XGENE=y +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=y +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_AXI_DMAC is not set +# CONFIG_BCM_SBA_RAID is not set +CONFIG_DMA_SUN6I=m +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_FSL_QDMA is not set +# CONFIG_HISI_DMA is not set +# CONFIG_INTEL_IDMA64 is not set +CONFIG_K3_DMA=m +CONFIG_MV_XOR=y +CONFIG_MV_XOR_V2=y +CONFIG_PL330_DMA=m +# CONFIG_PLX_DMA is not set +# CONFIG_TEGRA186_GPC_DMA is not set +CONFIG_TEGRA20_APB_DMA=y +CONFIG_TEGRA210_ADMA=y +CONFIG_XGENE_DMA=m +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +CONFIG_QCOM_BAM_DMA=m +# CONFIG_QCOM_GPI_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +CONFIG_DMABUF_HEAPS=y +# CONFIG_DMABUF_SYSFS_STATS is not set +CONFIG_DMABUF_HEAPS_SYSTEM=y +# end of DMABUF options + +CONFIG_UIO=m +CONFIG_UIO_CIF=m +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_UIO_NETX=m +# CONFIG_UIO_PRUSS is not set +CONFIG_UIO_MF624=m +# CONFIG_UIO_HV_GENERIC is not set +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +# +# VFIO support for platform devices +# +CONFIG_VFIO_PLATFORM_BASE=m +CONFIG_VFIO_PLATFORM=m +# CONFIG_VFIO_AMBA is not set + +# +# VFIO platform reset drivers +# +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +# end of VFIO platform reset drivers +# end of VFIO support for platform devices + +CONFIG_VIRT_DRIVERS=y +CONFIG_VMGENID=y +# CONFIG_NITRO_ENCLAVES is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=m +CONFIG_VIRTIO_PCI_LIB=m +CONFIG_VIRTIO_PCI_LIB_LEGACY=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +# CONFIG_VIRTIO_PMEM is not set +CONFIG_VIRTIO_BALLOON=m +# CONFIG_VIRTIO_MEM is not set +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=m +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m +# end of Microsoft Hyper-V guest support + +# +# Xen driver support +# +CONFIG_XEN_BALLOON=y +CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y +CONFIG_XEN_DEV_EVTCHN=m +CONFIG_XEN_BACKEND=y +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +CONFIG_XEN_GNTDEV=m +CONFIG_XEN_GRANT_DEV_ALLOC=m +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +CONFIG_SWIOTLB_XEN=y +CONFIG_XEN_PCI_STUB=y +CONFIG_XEN_PCIDEV_STUB=m +# CONFIG_XEN_PVCALLS_FRONTEND is not set +# CONFIG_XEN_PVCALLS_BACKEND is not set +CONFIG_XEN_SCSI_BACKEND=m +CONFIG_XEN_PRIVCMD=m +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_FRONT_PGDIR_SHBUF=m +CONFIG_XEN_UNPOPULATED_ALLOC=y +# CONFIG_XEN_VIRTIO is not set +# end of Xen driver support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +CONFIG_RTL8723BS=m +CONFIG_R8712U=m +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7816 is not set +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set +# end of Analog digital bi-direction converters + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set +# end of Network Analyzer, Impedance Converters + +# +# Resolver to digital converters +# +# CONFIG_AD2S1210 is not set +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set +# CONFIG_MFD_NVEC is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +CONFIG_QLGE=m +# CONFIG_VME_BUS is not set +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_ACPI is not set +# CONFIG_CHROMEOS_TBMC is not set +CONFIG_CROS_EC=y +CONFIG_CROS_EC_I2C=m +# CONFIG_CROS_EC_RPMSG is not set +CONFIG_CROS_EC_SPI=m +# CONFIG_CROS_EC_UART is not set +CONFIG_CROS_EC_PROTO=y +CONFIG_CROS_KBD_LED_BACKLIGHT=m +CONFIG_CROS_EC_CHARDEV=y +CONFIG_CROS_EC_LIGHTBAR=m +CONFIG_CROS_EC_VBC=m +CONFIG_CROS_EC_DEBUGFS=m +CONFIG_CROS_EC_SENSORHUB=y +CONFIG_CROS_EC_SYSFS=m +# CONFIG_CROS_HPS_I2C is not set +CONFIG_CROS_USBPD_LOGGER=m +CONFIG_CROS_USBPD_NOTIFY=y +# CONFIG_CHROMEOS_PRIVACY_SCREEN is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +# CONFIG_SURFACE_AGGREGATOR is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Clock driver for ARM Reference designs +# +# CONFIG_CLK_ICST is not set +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# end of Clock driver for ARM Reference designs + +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX77686 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_HI655X=m +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3559A=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3670=y +CONFIG_COMMON_CLK_HI3798CV200=y +CONFIG_COMMON_CLK_HI6220=y +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI6220=y +CONFIG_STUB_CLK_HI3660=y + +# +# Clock support for Amlogic platforms +# +CONFIG_COMMON_CLK_MESON_REGMAP=y +CONFIG_COMMON_CLK_MESON_DUALDIV=y +CONFIG_COMMON_CLK_MESON_MPLL=y +CONFIG_COMMON_CLK_MESON_PLL=y +CONFIG_COMMON_CLK_MESON_VID_PLL_DIV=y +CONFIG_COMMON_CLK_MESON_CLKC_UTILS=y +CONFIG_COMMON_CLK_MESON_AO_CLKC=y +CONFIG_COMMON_CLK_MESON_EE_CLKC=y +CONFIG_COMMON_CLK_MESON_CPU_DYNDIV=y +CONFIG_COMMON_CLK_GXBB=y +CONFIG_COMMON_CLK_AXG=y +# CONFIG_COMMON_CLK_AXG_AUDIO is not set +# CONFIG_COMMON_CLK_A1_PLL is not set +# CONFIG_COMMON_CLK_A1_PERIPHERALS is not set +CONFIG_COMMON_CLK_G12A=y +# end of Clock support for Amlogic platforms + +CONFIG_ARMADA_AP_CP_HELPER=y +CONFIG_ARMADA_37XX_CLK=y +CONFIG_ARMADA_AP806_SYSCON=y +CONFIG_ARMADA_CP110_SYSCON=y +CONFIG_QCOM_GDSC=y +CONFIG_QCOM_RPMCC=y +CONFIG_COMMON_CLK_QCOM=y +CONFIG_QCOM_A53PLL=y +# CONFIG_QCOM_A7PLL is not set +CONFIG_QCOM_CLK_APCS_MSM8916=m +# CONFIG_QCOM_CLK_APCC_MSM8996 is not set +CONFIG_QCOM_CLK_RPM=m +CONFIG_QCOM_CLK_SMD_RPM=m +# CONFIG_IPQ_APSS_PLL is not set +# CONFIG_IPQ_GCC_4019 is not set +# CONFIG_IPQ_GCC_5018 is not set +# CONFIG_IPQ_GCC_5332 is not set +# CONFIG_IPQ_GCC_6018 is not set +# CONFIG_IPQ_GCC_8074 is not set +# CONFIG_IPQ_GCC_9574 is not set +CONFIG_MSM_GCC_8916=y +# CONFIG_MSM_GCC_8917 is not set +# CONFIG_MSM_GCC_8939 is not set +# CONFIG_MSM_GCC_8953 is not set +# CONFIG_MSM_GCC_8976 is not set +# CONFIG_MSM_MMCC_8994 is not set +# CONFIG_MSM_GCC_8994 is not set +CONFIG_MSM_GCC_8996=y +CONFIG_MSM_MMCC_8996=y +# CONFIG_MSM_GCC_8998 is not set +# CONFIG_MSM_GPUCC_8998 is not set +# CONFIG_MSM_MMCC_8998 is not set +# CONFIG_QCM_GCC_2290 is not set +# CONFIG_QCM_DISPCC_2290 is not set +# CONFIG_QCS_GCC_404 is not set +# CONFIG_SC_CAMCC_7180 is not set +# CONFIG_SC_CAMCC_7280 is not set +# CONFIG_SC_DISPCC_7180 is not set +# CONFIG_SC_DISPCC_7280 is not set +# CONFIG_SC_DISPCC_8280XP is not set +# CONFIG_SA_GCC_8775P is not set +# CONFIG_SA_GPUCC_8775P is not set +# CONFIG_SC_GCC_7180 is not set +# CONFIG_SC_GCC_7280 is not set +# CONFIG_SC_GCC_8180X is not set +# CONFIG_SC_GCC_8280XP is not set +# CONFIG_SC_GPUCC_7180 is not set +# CONFIG_SC_GPUCC_7280 is not set +# CONFIG_SC_GPUCC_8280XP is not set +# CONFIG_SC_LPASSCC_7280 is not set +# CONFIG_SC_LPASSCC_8280XP is not set +# CONFIG_SC_LPASS_CORECC_7180 is not set +# CONFIG_SC_LPASS_CORECC_7280 is not set +# CONFIG_SC_MSS_7180 is not set +# CONFIG_SC_VIDEOCC_7180 is not set +# CONFIG_SC_VIDEOCC_7280 is not set +# CONFIG_SDM_CAMCC_845 is not set +# CONFIG_SDM_GCC_660 is not set +# CONFIG_SDM_MMCC_660 is not set +# CONFIG_SDM_GPUCC_660 is not set +# CONFIG_QCS_TURING_404 is not set +# CONFIG_QCS_Q6SSTOP_404 is not set +# CONFIG_QDU_GCC_1000 is not set +# CONFIG_SDM_GCC_845 is not set +# CONFIG_SDM_GPUCC_845 is not set +# CONFIG_SDM_VIDEOCC_845 is not set +# CONFIG_SDM_DISPCC_845 is not set +# CONFIG_SDM_LPASSCC_845 is not set +# CONFIG_SDX_GCC_75 is not set +# CONFIG_SM_CAMCC_6350 is not set +# CONFIG_SM_CAMCC_8250 is not set +# CONFIG_SM_CAMCC_8450 is not set +# CONFIG_SM_GCC_6115 is not set +# CONFIG_SM_GCC_6125 is not set +# CONFIG_SM_GCC_6350 is not set +# CONFIG_SM_GCC_6375 is not set +# CONFIG_SM_GCC_7150 is not set +# CONFIG_SM_GCC_8150 is not set +# CONFIG_SM_GCC_8250 is not set +# CONFIG_SM_GCC_8350 is not set +# CONFIG_SM_GCC_8450 is not set +# CONFIG_SM_GCC_8550 is not set +# CONFIG_SM_GPUCC_6115 is not set +# CONFIG_SM_GPUCC_6125 is not set +# CONFIG_SM_GPUCC_6375 is not set +# CONFIG_SM_GPUCC_6350 is not set +# CONFIG_SM_GPUCC_8150 is not set +# CONFIG_SM_GPUCC_8250 is not set +# CONFIG_SM_GPUCC_8350 is not set +# CONFIG_SM_GPUCC_8450 is not set +# CONFIG_SM_GPUCC_8550 is not set +# CONFIG_SM_TCSRCC_8550 is not set +# CONFIG_SM_VIDEOCC_8150 is not set +# CONFIG_SM_VIDEOCC_8250 is not set +# CONFIG_SM_VIDEOCC_8350 is not set +# CONFIG_SM_VIDEOCC_8550 is not set +# CONFIG_SPMI_PMIC_CLKDIV is not set +# CONFIG_QCOM_HFPLL is not set +# CONFIG_KPSS_XCC is not set +# CONFIG_CLK_GFM_LPASS_SM8250 is not set +# CONFIG_SM_VIDEOCC_8450 is not set +CONFIG_COMMON_CLK_ROCKCHIP=y +CONFIG_CLK_PX30=y +CONFIG_CLK_RK3308=y +CONFIG_CLK_RK3328=y +CONFIG_CLK_RK3368=y +CONFIG_CLK_RK3399=y +CONFIG_CLK_RK3568=y +CONFIG_CLK_RK3588=y +CONFIG_SUNXI_CCU=y +CONFIG_SUN50I_A64_CCU=y +CONFIG_SUN50I_A100_CCU=y +CONFIG_SUN50I_A100_R_CCU=y +CONFIG_SUN50I_H6_CCU=y +CONFIG_SUN50I_H616_CCU=y +CONFIG_SUN50I_H6_R_CCU=y +CONFIG_SUN6I_RTC_CCU=y +CONFIG_SUN8I_H3_CCU=y +CONFIG_SUN8I_DE2_CCU=y +CONFIG_SUN8I_R_CCU=y +CONFIG_CLK_TEGRA_BPMP=y +CONFIG_TEGRA_CLK_DFLL=y +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_COMMON_CLK_ZYNQMP is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ROCKCHIP_TIMER=y +CONFIG_SUN4I_TIMER=y +CONFIG_TEGRA_TIMER=y +# CONFIG_TEGRA186_TIMER is not set +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_SUN50I_ERRATUM_UNKNOWN1=y +CONFIG_ARM_TIMER_SP804=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +# CONFIG_ARM_MHU is not set +# CONFIG_ARM_MHU_V2 is not set +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_ARMADA_37XX_RWTM_MBOX=m +# CONFIG_ROCKCHIP_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +# CONFIG_MAILBOX_TEST is not set +CONFIG_QCOM_APCS_IPC=m +CONFIG_TEGRA_HSP_MBOX=y +CONFIG_XGENE_SLIMPRO_MBOX=m +CONFIG_ZYNQMP_IPI_MBOX=y +CONFIG_SUN6I_MSGBOX=y +# CONFIG_QCOM_IPCC is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_IO_PGTABLE_DART is not set +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +# CONFIG_IOMMUFD is not set +CONFIG_ROCKCHIP_IOMMU=y +# CONFIG_SUN50I_IOMMU is not set +CONFIG_TEGRA_IOMMU_SMMU=y +CONFIG_ARM_SMMU=y +# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y +CONFIG_ARM_SMMU_QCOM=y +# CONFIG_ARM_SMMU_QCOM_DEBUG is not set +CONFIG_ARM_SMMU_V3=y +CONFIG_ARM_SMMU_V3_SVA=y +CONFIG_QCOM_IOMMU=y +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +CONFIG_RPMSG=m +# CONFIG_RPMSG_CHAR is not set +# CONFIG_RPMSG_CTRL is not set +# CONFIG_RPMSG_NS is not set +CONFIG_RPMSG_QCOM_GLINK=m +CONFIG_RPMSG_QCOM_GLINK_RPM=m +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +CONFIG_MESON_CANVAS=m +CONFIG_MESON_CLK_MEASURE=y +CONFIG_MESON_GX_SOCINFO=y +CONFIG_MESON_GX_PM_DOMAINS=y +CONFIG_MESON_EE_PM_DOMAINS=y +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# CONFIG_QUICC_ENGINE is not set +# CONFIG_FSL_RCPM is not set +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# CONFIG_A64FX_DIAG is not set +# end of fujitsu SoC drivers + +# +# Hisilicon SoC drivers +# +# CONFIG_KUNPENG_HCCS is not set +# end of Hisilicon SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_AOSS_QMP is not set +CONFIG_QCOM_COMMAND_DB=y +# CONFIG_QCOM_CPR is not set +# CONFIG_QCOM_GENI_SE is not set +CONFIG_QCOM_GSBI=m +# CONFIG_QCOM_LLCC is not set +CONFIG_QCOM_KRYO_L2_ACCESSORS=y +CONFIG_QCOM_MDT_LOADER=m +# CONFIG_QCOM_OCMEM is not set +# CONFIG_QCOM_RAMP_CTRL is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPM_MASTER_STATS is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_RPMPD is not set +CONFIG_QCOM_SMD_RPM=m +# CONFIG_QCOM_SPM is not set +CONFIG_QCOM_WCNSS_CTRL=m +# CONFIG_QCOM_APR is not set +# CONFIG_QCOM_ICC_BWMON is not set +# end of Qualcomm SoC drivers + +CONFIG_ROCKCHIP_GRF=y +CONFIG_ROCKCHIP_IODOMAIN=m +CONFIG_ROCKCHIP_PM_DOMAINS=y +CONFIG_SUNXI_MBUS=y +CONFIG_SUNXI_SRAM=y +# CONFIG_SUN20I_PPU is not set +CONFIG_ARCH_TEGRA_132_SOC=y +CONFIG_ARCH_TEGRA_210_SOC=y +CONFIG_ARCH_TEGRA_186_SOC=y +CONFIG_ARCH_TEGRA_194_SOC=y +# CONFIG_ARCH_TEGRA_234_SOC is not set +CONFIG_SOC_TEGRA_FUSE=y +CONFIG_SOC_TEGRA_FLOWCTRL=y +CONFIG_SOC_TEGRA_PMC=y +CONFIG_SOC_TEGRA_POWERGATE_BPMP=y +CONFIG_SOC_TEGRA_CBB=y +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +CONFIG_ZYNQMP_POWER=y +CONFIG_ZYNQMP_PM_DOMAINS=y +CONFIG_XLNX_EVENT_MANAGER=y +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=m +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_ARM_TEGRA_DEVFREQ is not set +CONFIG_ARM_RK3399_DMC_DEVFREQ=m +# CONFIG_ARM_SUN8I_A33_MBUS_DEVFREQ is not set +CONFIG_PM_DEVFREQ_EVENT=y +CONFIG_DEVFREQ_EVENT_ROCKCHIP_DFI=m +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_ADC_JACK is not set +# CONFIG_EXTCON_FSA9480 is not set +# CONFIG_EXTCON_GPIO is not set +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_PTN5150 is not set +CONFIG_EXTCON_QCOM_SPMI_MISC=m +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +CONFIG_EXTCON_USB_GPIO=m +CONFIG_EXTCON_USBC_CROS_EC=m +CONFIG_MEMORY=y +# CONFIG_ARM_PL172_MPMC is not set +CONFIG_TEGRA_MC=y +# CONFIG_TEGRA210_EMC is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD4130 is not set +# CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_AD9467 is not set +# CONFIG_ADI_AXI_ADC is not set +CONFIG_AXP20X_ADC=m +CONFIG_AXP288_ADC=m +# CONFIG_CC10001_ADC is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +CONFIG_MESON_SARADC=m +# CONFIG_NAU7802 is not set +CONFIG_QCOM_VADC_COMMON=m +# CONFIG_QCOM_SPMI_RRADC is not set +CONFIG_QCOM_SPMI_IADC=m +CONFIG_QCOM_SPMI_VADC=m +# CONFIG_QCOM_SPMI_ADC5 is not set +CONFIG_ROCKCHIP_SARADC=m +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_SUN20I_GPADC is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set +# CONFIG_VF610_ADC is not set +CONFIG_VIPERBOARD_ADC=m +# CONFIG_XILINX_XADC is not set +# CONFIG_XILINX_AMS is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_PMS7003 is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SPS30_SERIAL is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +CONFIG_IIO_CROS_EC_SENSORS_CORE=m +CONFIG_IIO_CROS_EC_SENSORS=m +# CONFIG_IIO_CROS_EC_SENSORS_LID_ANGLE is not set + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD3552R is not set +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +CONFIG_AD5446=m +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# CONFIG_ADMV8818 is not set +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +CONFIG_DHT11=m +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set +# CONFIG_HID_SENSOR_HUMIDITY is not set +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_SERIAL is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +CONFIG_ACPI_ALS=m +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +CONFIG_BH1780=m +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +CONFIG_IIO_CROS_EC_LIGHT_PROX=m +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +CONFIG_IIO_CROS_EC_BARO=m +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_CROS_EC_MKBP_PROXIMITY is not set +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set +# CONFIG_HID_SENSOR_TEMP is not set +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set +# end of Temperature sensors + +# CONFIG_NTB is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +CONFIG_PWM_CROS_EC=m +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +CONFIG_PWM_MESON=m +# CONFIG_PWM_PCA9685 is not set +CONFIG_PWM_ROCKCHIP=m +CONFIG_PWM_SUN4I=m +CONFIG_PWM_TEGRA=m +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_PM=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +# CONFIG_AL_FIC is not set +CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_SUN6I_R_INTC=y +CONFIG_SUNXI_NMI_INTC=y +# CONFIG_XILINX_INTC is not set +CONFIG_MVEBU_GICP=y +CONFIG_MVEBU_ICU=y +CONFIG_MVEBU_ODMI=y +CONFIG_MVEBU_PIC=y +CONFIG_MVEBU_SEI=y +CONFIG_PARTITION_PERCPU=y +CONFIG_QCOM_IRQ_COMBINER=y +CONFIG_MESON_IRQ_GPIO=y +# CONFIG_QCOM_PDC is not set +# CONFIG_QCOM_MPM is not set +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +CONFIG_RESET_CONTROLLER=y +CONFIG_RESET_MESON=y +# CONFIG_RESET_MESON_AUDIO_ARB is not set +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_QCOM_PDC is not set +CONFIG_RESET_SIMPLE=y +CONFIG_RESET_SUNXI=y +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set +CONFIG_COMMON_RESET_HI3660=y +CONFIG_COMMON_RESET_HI6220=y +CONFIG_RESET_TEGRA_BPMP=y + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_GENERIC_PHY_MIPI_DPHY=y +CONFIG_PHY_XGENE=m +# CONFIG_PHY_CAN_TRANSCEIVER is not set +CONFIG_PHY_SUN4I_USB=m +# CONFIG_PHY_SUN6I_MIPI_DPHY is not set +# CONFIG_PHY_SUN9I_USB is not set +# CONFIG_PHY_SUN50I_USB3 is not set +CONFIG_PHY_MESON8B_USB2=m +CONFIG_PHY_MESON_GXL_USB2=y +CONFIG_PHY_MESON_G12A_MIPI_DPHY_ANALOG=y +CONFIG_PHY_MESON_G12A_USB2=y +CONFIG_PHY_MESON_G12A_USB3_PCIE=y +CONFIG_PHY_MESON_AXG_PCIE=y +CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG=y +CONFIG_PHY_MESON_AXG_MIPI_DPHY=y + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_HI3660_USB is not set +# CONFIG_PHY_HI3670_USB is not set +# CONFIG_PHY_HI3670_PCIE is not set +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +CONFIG_PHY_MVEBU_A3700_COMPHY=y +CONFIG_PHY_MVEBU_A3700_UTMI=y +# CONFIG_PHY_MVEBU_A38X_COMPHY is not set +CONFIG_PHY_MVEBU_CP110_COMPHY=m +# CONFIG_PHY_MVEBU_CP110_UTMI is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +CONFIG_PHY_QCOM_APQ8064_SATA=m +# CONFIG_PHY_QCOM_EDP is not set +# CONFIG_PHY_QCOM_IPQ4019_USB is not set +CONFIG_PHY_QCOM_IPQ806X_SATA=m +# CONFIG_PHY_QCOM_PCIE2 is not set +CONFIG_PHY_QCOM_QMP=m +CONFIG_PHY_QCOM_QMP_COMBO=m +CONFIG_PHY_QCOM_QMP_PCIE=m +CONFIG_PHY_QCOM_QMP_PCIE_8996=m +CONFIG_PHY_QCOM_QMP_UFS=m +CONFIG_PHY_QCOM_QMP_USB=m +# CONFIG_PHY_QCOM_QMP_USB_LEGACY is not set +CONFIG_PHY_QCOM_QUSB2=m +# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set +# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set +# CONFIG_PHY_QCOM_M31_USB is not set +CONFIG_PHY_QCOM_USB_HS=m +# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set +CONFIG_PHY_QCOM_USB_HSIC=m +# CONFIG_PHY_QCOM_USB_HS_28NM is not set +# CONFIG_PHY_QCOM_USB_SS is not set +# CONFIG_PHY_QCOM_IPQ806X_USB is not set +# CONFIG_PHY_QCOM_SGMII_ETH is not set +CONFIG_PHY_ROCKCHIP_DP=m +# CONFIG_PHY_ROCKCHIP_DPHY_RX0 is not set +CONFIG_PHY_ROCKCHIP_EMMC=m +CONFIG_PHY_ROCKCHIP_INNO_HDMI=m +CONFIG_PHY_ROCKCHIP_INNO_USB2=m +# CONFIG_PHY_ROCKCHIP_INNO_CSIDPHY is not set +# CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY is not set +# CONFIG_PHY_ROCKCHIP_NANENG_COMBO_PHY is not set +CONFIG_PHY_ROCKCHIP_PCIE=m +# CONFIG_PHY_ROCKCHIP_SNPS_PCIE3 is not set +CONFIG_PHY_ROCKCHIP_TYPEC=m +CONFIG_PHY_ROCKCHIP_USB=m +# CONFIG_PHY_SAMSUNG_USB2 is not set +CONFIG_PHY_TEGRA_XUSB=m +# CONFIG_PHY_TEGRA194_P2U is not set +# CONFIG_PHY_TUSB1210 is not set +# CONFIG_PHY_XILINX_ZYNQMP is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +CONFIG_ARM_CCI_PMU=y +CONFIG_ARM_CCI400_PMU=y +CONFIG_ARM_CCI5xx_PMU=y +CONFIG_ARM_CCN=y +CONFIG_ARM_CMN=m +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +CONFIG_ARM_SMMU_V3_PMU=y +CONFIG_ARM_PMUV3=y +CONFIG_ARM_DSU_PMU=m +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_THUNDERX2_PMU=m +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=m +CONFIG_ARM_DMC620_PMU=y +# CONFIG_MARVELL_CN10K_TAD_PMU is not set +# CONFIG_ALIBABA_UNCORE_DRW_PMU is not set +CONFIG_HISI_PMU=y +# CONFIG_HISI_PCIE_PMU is not set +# CONFIG_HNS3_PMU is not set +# CONFIG_MARVELL_CN10K_DDR_PMU is not set +CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU=m +CONFIG_NVIDIA_CORESIGHT_PMU_ARCH_SYSTEM_PMU=m +# CONFIG_MESON_DDR_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_ND_PFN=m +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_OF_PMEM=m +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_MESON_MX_EFUSE is not set +# CONFIG_NVMEM_QCOM_QFPROM is not set +# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_ROCKCHIP_EFUSE is not set +# CONFIG_NVMEM_ROCKCHIP_OTP is not set +# CONFIG_NVMEM_SPMI_SDAM is not set +CONFIG_NVMEM_SUNXI_SID=m +# CONFIG_NVMEM_U_BOOT_ENV is not set +# CONFIG_NVMEM_ZYNQMP is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_HISI_PTT is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=m +CONFIG_OPTEE=m +# CONFIG_OPTEE_INSECURE_LOAD_IMAGE is not set +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +CONFIG_INTERCONNECT=y +# CONFIG_INTERCONNECT_QCOM is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# CONFIG_CDX_BUS is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +CONFIG_NILFS2_FS=m +CONFIG_F2FS_FS=m +CONFIG_F2FS_STAT_FS=y +CONFIG_F2FS_FS_XATTR=y +CONFIG_F2FS_FS_POSIX_ACL=y +CONFIG_F2FS_FS_SECURITY=y +# CONFIG_F2FS_CHECK_FS is not set +# CONFIG_F2FS_FAULT_INJECTION is not set +# CONFIG_F2FS_FS_COMPRESSION is not set +CONFIG_F2FS_IOSTAT=y +# CONFIG_F2FS_UNFAIR_RWSEM is not set +# CONFIG_ZONEFS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=m +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=m +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=m +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +# CONFIG_VIRTIO_FS is not set +CONFIG_OVERLAY_FS=m +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +# CONFIG_CACHEFILES_ONDEMAND is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +CONFIG_FAT_DEFAULT_UTF8=y +# CONFIG_EXFAT_FS is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS3_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=m +CONFIG_EFIVAR_FS=m +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +CONFIG_ORANGEFS_FS=m +CONFIG_ADFS_FS=m +# CONFIG_ADFS_FS_RW is not set +CONFIG_AFFS_FS=m +CONFIG_ECRYPT_FS=m +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +# CONFIG_BEFS_DEBUG is not set +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +CONFIG_JFFS2_SUMMARY=y +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_FS_POSIX_ACL=y +CONFIG_JFFS2_FS_SECURITY=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_LZO=y +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +# CONFIG_JFFS2_CMODE_NONE is not set +CONFIG_JFFS2_CMODE_PRIORITY=y +# CONFIG_JFFS2_CMODE_SIZE is not set +# CONFIG_JFFS2_CMODE_FAVOURLZO is not set +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_FS_ZSTD=y +# CONFIG_UBIFS_ATIME_SUPPORT is not set +CONFIG_UBIFS_FS_XATTR=y +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZSTD=y +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +CONFIG_VXFS_FS=m +CONFIG_MINIX_FS=m +CONFIG_OMFS_FS=m +CONFIG_HPFS_FS=m +CONFIG_QNX4FS_FS=m +CONFIG_QNX6FS_FS=m +# CONFIG_QNX6FS_DEBUG is not set +CONFIG_ROMFS_FS=m +# CONFIG_ROMFS_BACKED_BY_BLOCK is not set +# CONFIG_ROMFS_BACKED_BY_MTD is not set +CONFIG_ROMFS_BACKED_BY_BOTH=y +CONFIG_ROMFS_ON_BLOCK=y +CONFIG_ROMFS_ON_MTD=y +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +# CONFIG_PSTORE_BLK is not set +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set +# CONFIG_EROFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +CONFIG_NFSD_BLOCKLAYOUT=y +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_SUNRPC_SWAP=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +CONFIG_CIFS_FSCACHE=y +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +CONFIG_CODA_FS=m +CONFIG_AFS_FS=m +# CONFIG_AFS_DEBUG is not set +CONFIG_AFS_FSCACHE=y +# CONFIG_AFS_DEBUG_CURSOR is not set +CONFIG_9P_FS=m +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_TRUSTED_KEYS is not set +# CONFIG_ENCRYPTED_KEYS is not set +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_SECURITY_DMESG_RESTRICT=y +CONFIG_PROC_MEM_ALWAYS_FORCE=y +# CONFIG_PROC_MEM_FORCE_PTRACE is not set +# CONFIG_PROC_MEM_NO_FORCE is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +# CONFIG_SECURITY_INFINIBAND is not set +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=32768 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +CONFIG_SECURITY_TOMOYO=y +CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048 +CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024 +# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set +CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init" +CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init" +# CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING is not set +CONFIG_SECURITY_APPARMOR=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +CONFIG_SECURITY_APPARMOR_EXPORT_BINARY=y +CONFIG_SECURITY_APPARMOR_PARANOID_LOAD=y +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +# CONFIG_INTEGRITY_TRUSTED_KEYRING is not set +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +# CONFIG_IMA is not set +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_EVM is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set +CONFIG_DEFAULT_SECURITY_APPARMOR=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_RANDSTRUCT_NONE=y +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_NULL2=m +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ENGINE=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +# CONFIG_CRYPTO_SM2 is not set +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +# CONFIG_CRYPTO_SM4_GENERIC is not set +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=m +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +CONFIG_CRYPTO_AEGIS128=m +CONFIG_CRYPTO_AEGIS128_SIMD=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLYVAL=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_SM3=m +# CONFIG_CRYPTO_SM3_GENERIC is not set +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=m +CONFIG_CRYPTO_JITTERENTROPY=m +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +CONFIG_CRYPTO_KDF800108_CTR=y +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set +# CONFIG_CRYPTO_CHACHA20_NEON is not set + +# +# Accelerated Cryptographic Algorithms for CPU (arm64) +# +CONFIG_CRYPTO_GHASH_ARM64_CE=m +# CONFIG_CRYPTO_POLY1305_NEON is not set +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA256_ARM64=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +CONFIG_CRYPTO_SHA512_ARM64=m +CONFIG_CRYPTO_SHA512_ARM64_CE=m +CONFIG_CRYPTO_SHA3_ARM64=m +CONFIG_CRYPTO_SM3_NEON=m +CONFIG_CRYPTO_SM3_ARM64_CE=m +CONFIG_CRYPTO_POLYVAL_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64=m +CONFIG_CRYPTO_AES_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64_CE_BLK=m +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m +# CONFIG_CRYPTO_AES_ARM64_BS is not set +CONFIG_CRYPTO_SM4_ARM64_CE=m +CONFIG_CRYPTO_SM4_ARM64_CE_BLK=m +CONFIG_CRYPTO_SM4_ARM64_NEON_BLK=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=m +CONFIG_CRYPTO_SM4_ARM64_CE_CCM=m +CONFIG_CRYPTO_SM4_ARM64_CE_GCM=m +# CONFIG_CRYPTO_CRCT10DIF_ARM64_CE is not set +# end of Accelerated Cryptographic Algorithms for CPU (arm64) + +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_ALLWINNER=y +# CONFIG_CRYPTO_DEV_SUN4I_SS is not set +# CONFIG_CRYPTO_DEV_SUN8I_CE is not set +# CONFIG_CRYPTO_DEV_SUN8I_SS is not set +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +# CONFIG_CRYPTO_DEV_CCP is not set +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_MARVELL=m +CONFIG_CRYPTO_DEV_MARVELL_CESA=m +# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set +# CONFIG_CRYPTO_DEV_OCTEONTX2_CPT is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +# CONFIG_CRYPTO_DEV_CAVIUM_ZIP is not set +CONFIG_CRYPTO_DEV_QCE=m +CONFIG_CRYPTO_DEV_QCE_SKCIPHER=y +CONFIG_CRYPTO_DEV_QCE_SHA=y +CONFIG_CRYPTO_DEV_QCE_AEAD=y +CONFIG_CRYPTO_DEV_QCE_ENABLE_ALL=y +# CONFIG_CRYPTO_DEV_QCE_ENABLE_SKCIPHER is not set +# CONFIG_CRYPTO_DEV_QCE_ENABLE_SHA is not set +# CONFIG_CRYPTO_DEV_QCE_ENABLE_AEAD is not set +CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN=512 +CONFIG_CRYPTO_DEV_QCOM_RNG=m +# CONFIG_CRYPTO_DEV_ROCKCHIP is not set +# CONFIG_CRYPTO_DEV_ZYNQMP_AES is not set +# CONFIG_CRYPTO_DEV_ZYNQMP_SHA3 is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_CRYPTO_DEV_SAFEXCEL=m +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC is not set +# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +# CONFIG_CRYPTO_DEV_HISI_HPRE is not set +# CONFIG_CRYPTO_DEV_HISI_TRNG is not set +CONFIG_CRYPTO_DEV_AMLOGIC_GXL=m +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS8_PRIVATE_KEY_PARSER=m +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_LINEAR_RANGES=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y +CONFIG_INDIRECT_PIO=y +# CONFIG_TRACE_MMIO_ACCESS is not set + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=m +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +# CONFIG_CRYPTO_LIB_CHACHA is not set +# CONFIG_CRYPTO_LIB_CURVE25519 is not set +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +# CONFIG_CRYPTO_LIB_POLY1305 is not set +# CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=m +CONFIG_CRC16=m +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=y +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +# CONFIG_XZ_DEC_X86 is not set +# CONFIG_XZ_DEC_POWERPC is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +# CONFIG_XZ_DEC_MICROLZMA is not set +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y +CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y +# CONFIG_DMA_RESTRICTED_POOL is not set +CONFIG_DMA_NONCOHERENT_MMAP=y +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_DIRECT_REMAP=y +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +# end of Library routines + +CONFIG_GENERIC_IOREMAP=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +CONFIG_PAHOLE_HAS_SPLIT_BTF=y +CONFIG_PAHOLE_HAS_LANG_EXCLUDE=y +CONFIG_DEBUG_INFO_BTF_MODULES=y +# CONFIG_MODULE_ALLOW_BTF_MISMATCH is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B is not set +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_VMLINUX_MAP is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x01b6 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_HAVE_ARCH_KCSAN=y +CONFIG_HAVE_KCSAN_COMPILER=y +# CONFIG_KCSAN is not set +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +CONFIG_PAGE_EXTENSION=y +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +CONFIG_PAGE_OWNER=y +# CONFIG_PAGE_TABLE_CHECK is not set +CONFIG_PAGE_POISONING=y +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_PER_VMA_LOCK_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +CONFIG_SCHED_STACK_END_CHECK=y +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=0 +CONFIG_KFENCE_NUM_OBJECTS=511 +# CONFIG_KFENCE_DEFERRABLE is not set +# CONFIG_KFENCE_STATIC_KEYS is not set +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +# end of Memory Debugging + +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Oops, Lockups and Hangs +# +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_HARDLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_OSNOISE_TRACER is not set +# CONFIG_TIMERLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y +# CONFIG_SYNTH_EVENTS is not set +# CONFIG_USER_EVENTS is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# arm64 Debugging +# +# CONFIG_PID_IN_CONTEXTIDR is not set +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set +# end of arm64 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +CONFIG_NOTIFIER_ERROR_INJECTION=m +CONFIG_PM_NOTIFIER_ERROR_INJECT=m +# CONFIG_NETDEV_NOTIFIER_ERROR_INJECT is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +CONFIG_FAULT_INJECTION=y +# CONFIG_FAILSLAB is not set +# CONFIG_FAIL_PAGE_ALLOC is not set +# CONFIG_FAULT_INJECTION_USERCOPY is not set +# CONFIG_FAIL_MAKE_REQUEST is not set +# CONFIG_FAIL_IO_TIMEOUT is not set +# CONFIG_FAIL_FUTEX is not set +# CONFIG_FAULT_INJECTION_DEBUG_FS is not set +# CONFIG_FAULT_INJECTION_CONFIGFS is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +CONFIG_TEST_FIRMWARE=m +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +CONFIG_TEST_STATIC_KEYS=m +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +CONFIG_TEST_LIVEPATCH=m +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_HMM is not set +# CONFIG_TEST_FREE_PAGES is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# CONFIG_HYPERV_TESTING is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 92e86ae9db29..fd67a4834bf0 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -659,7 +659,7 @@ static int idxd_enable_system_pasid(struct idxd_device *idxd) * DMA domain is owned by the driver, it should support all valid * types such as DMA-FQ, identity, etc. */ - ret = iommu_attach_device_pasid(domain, dev, pasid); + ret = iommu_attach_device_pasid(domain, dev, pasid, NULL); if (ret) { dev_err(dev, "failed to attach device pasid %d, domain type %d", pasid, domain->type); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 373bcd79257e..03d1c76aec2d 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -960,7 +960,8 @@ int host1x_client_iommu_attach(struct host1x_client *client) * not the shared IOMMU domain, don't try to attach it to a different * domain. This allows using the IOMMU-backed DMA API. */ - if (domain && domain != tegra->domain) + if (domain && domain->type != IOMMU_DOMAIN_IDENTITY && + domain != tegra->domain) return 0; if (tegra->domain) { diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 7c6699aed7d2..2485167ed361 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -342,6 +342,10 @@ static bool host1x_wants_iommu(struct host1x *host1x) return true; } +/* + * Returns ERR_PTR on failure, NULL if the translation is IDENTITY, otherwise a + * valid paging domain. + */ static struct iommu_domain *host1x_iommu_attach(struct host1x *host) { struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev); @@ -366,6 +370,8 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host) * Similarly, if host1x is already attached to an IOMMU (via the DMA * API), don't try to attach again. */ + if (domain && domain->type == IOMMU_DOMAIN_IDENTITY) + domain = NULL; if (!host1x_wants_iommu(host) || domain) return domain; diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 20aa5ed80aa3..0a6f73945064 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -7,6 +7,10 @@ config IOMMU_IOVA config IOMMU_API bool +config IOMMUFD_DRIVER + bool + default n + menuconfig IOMMU_SUPPORT bool "IOMMU Hardware Support" depends on MMU @@ -156,6 +160,10 @@ config IOMMU_DMA # Shared Virtual Addressing config IOMMU_SVA + select IOMMU_MM_DATA + bool + +config IOMMU_IOPF bool config FSL_PAMU @@ -235,17 +243,6 @@ config SUN50I_IOMMU help Support for the IOMMU introduced in the Allwinner H6 SoCs. -config TEGRA_IOMMU_GART - bool "Tegra GART IOMMU Support" - depends on ARCH_TEGRA_2x_SOC - depends on TEGRA_MC - select IOMMU_API - help - Enables support for remapping discontiguous physical memory - shared with the operating system into contiguous I/O virtual - space through the GART (Graphics Address Relocation Table) - hardware included on Tegra SoCs. - config TEGRA_IOMMU_SMMU bool "NVIDIA Tegra SMMU Support" depends on ARCH_TEGRA @@ -399,10 +396,11 @@ config ARM_SMMU_V3 Say Y here if your system includes an IOMMU device implementing the ARM SMMUv3 architecture. +if ARM_SMMU_V3 config ARM_SMMU_V3_SVA bool "Shared Virtual Addressing support for the ARM SMMUv3" - depends on ARM_SMMU_V3 select IOMMU_SVA + select IOMMU_IOPF select MMU_NOTIFIER help Support for sharing process address spaces with devices using the @@ -411,6 +409,17 @@ config ARM_SMMU_V3_SVA Say Y here if your system supports SVA extensions such as PCIe PASID and PRI. +config ARM_SMMU_V3_KUNIT_TEST + tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS + depends on KUNIT + depends on ARM_SMMU_V3_SVA + default KUNIT_ALL_TESTS + help + Enable this option to unit-test arm-smmu-v3 driver functions. + + If unsure, say N. +endif + config S390_IOMMU def_bool y if S390 && PCI depends on S390 && PCI diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 769e43d780ce..542760d963ec 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -20,13 +20,13 @@ obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o -obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o obj-$(CONFIG_S390_IOMMU) += s390-iommu.o obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o -obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o +obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o +obj-$(CONFIG_IOMMU_IOPF) += io-pgfault.o obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o obj-$(CONFIG_APPLE_DART) += apple-dart.o diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 23cfb98fe90a..97aec5c21203 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2460,6 +2460,10 @@ static int amd_iommu_def_domain_type(struct device *dev) if (!dev_data) return 0; + /* Always use DMA domain for untrusted device */ + if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) + return IOMMU_DOMAIN_DMA; + /* * Do not identity map IOMMUv2 capable devices when: * - memory encryption is active, because some of those devices diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 0b8927508427..821b4a3465df 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -196,7 +196,6 @@ struct apple_dart_hw { * @lock: lock for hardware operations involving this dart * @pgsize: pagesize supported by this DART * @supports_bypass: indicates if this DART supports bypass mode - * @force_bypass: force bypass mode due to pagesize mismatch? * @sid2group: maps stream ids to iommu_groups * @iommu: iommu core device */ @@ -217,7 +216,6 @@ struct apple_dart { u32 pgsize; u32 num_streams; u32 supports_bypass : 1; - u32 force_bypass : 1; struct iommu_group *sid2group[DART_MAX_STREAMS]; struct iommu_device iommu; @@ -568,15 +566,17 @@ apple_dart_setup_translation(struct apple_dart_domain *domain, stream_map->dart->hw->invalidate_tlb(stream_map); } -static int apple_dart_finalize_domain(struct iommu_domain *domain, +static int apple_dart_finalize_domain(struct apple_dart_domain *dart_domain, struct apple_dart_master_cfg *cfg) { - struct apple_dart_domain *dart_domain = to_dart_domain(domain); struct apple_dart *dart = cfg->stream_maps[0].dart; struct io_pgtable_cfg pgtbl_cfg; int ret = 0; int i, j; + if (dart->pgsize > PAGE_SIZE) + return -EINVAL; + mutex_lock(&dart_domain->init_lock); if (dart_domain->finalized) @@ -597,17 +597,18 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain, .iommu_dev = dart->dev, }; - dart_domain->pgtbl_ops = - alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, domain); + dart_domain->pgtbl_ops = alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, + &dart_domain->domain); if (!dart_domain->pgtbl_ops) { ret = -ENOMEM; goto done; } - domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; - domain->geometry.aperture_start = 0; - domain->geometry.aperture_end = (dma_addr_t)DMA_BIT_MASK(dart->ias); - domain->geometry.force_aperture = true; + dart_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; + dart_domain->domain.geometry.aperture_start = 0; + dart_domain->domain.geometry.aperture_end = + (dma_addr_t)DMA_BIT_MASK(dart->ias); + dart_domain->domain.geometry.force_aperture = true; dart_domain->finalized = true; @@ -651,47 +652,72 @@ static int apple_dart_domain_add_streams(struct apple_dart_domain *domain, true); } -static int apple_dart_attach_dev(struct iommu_domain *domain, - struct device *dev) +static int apple_dart_attach_dev_paging(struct iommu_domain *domain, + struct device *dev) { int ret, i; struct apple_dart_stream_map *stream_map; struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); struct apple_dart_domain *dart_domain = to_dart_domain(domain); - if (cfg->stream_maps[0].dart->force_bypass && - domain->type != IOMMU_DOMAIN_IDENTITY) - return -EINVAL; - if (!cfg->stream_maps[0].dart->supports_bypass && - domain->type == IOMMU_DOMAIN_IDENTITY) - return -EINVAL; + ret = apple_dart_finalize_domain(dart_domain, cfg); + if (ret) + return ret; - ret = apple_dart_finalize_domain(domain, cfg); + ret = apple_dart_domain_add_streams(dart_domain, cfg); if (ret) return ret; - switch (domain->type) { - default: - ret = apple_dart_domain_add_streams(dart_domain, cfg); - if (ret) - return ret; + for_each_stream_map(i, cfg, stream_map) + apple_dart_setup_translation(dart_domain, stream_map); + return 0; +} - for_each_stream_map(i, cfg, stream_map) - apple_dart_setup_translation(dart_domain, stream_map); - break; - case IOMMU_DOMAIN_BLOCKED: - for_each_stream_map(i, cfg, stream_map) - apple_dart_hw_disable_dma(stream_map); - break; - case IOMMU_DOMAIN_IDENTITY: - for_each_stream_map(i, cfg, stream_map) - apple_dart_hw_enable_bypass(stream_map); - break; - } +static int apple_dart_attach_dev_identity(struct iommu_domain *domain, + struct device *dev) +{ + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); + struct apple_dart_stream_map *stream_map; + int i; - return ret; + if (!cfg->stream_maps[0].dart->supports_bypass) + return -EINVAL; + + for_each_stream_map(i, cfg, stream_map) + apple_dart_hw_enable_bypass(stream_map); + return 0; } +static const struct iommu_domain_ops apple_dart_identity_ops = { + .attach_dev = apple_dart_attach_dev_identity, +}; + +static struct iommu_domain apple_dart_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &apple_dart_identity_ops, +}; + +static int apple_dart_attach_dev_blocked(struct iommu_domain *domain, + struct device *dev) +{ + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); + struct apple_dart_stream_map *stream_map; + int i; + + for_each_stream_map(i, cfg, stream_map) + apple_dart_hw_disable_dma(stream_map); + return 0; +} + +static const struct iommu_domain_ops apple_dart_blocked_ops = { + .attach_dev = apple_dart_attach_dev_blocked, +}; + +static struct iommu_domain apple_dart_blocked_domain = { + .type = IOMMU_DOMAIN_BLOCKED, + .ops = &apple_dart_blocked_ops, +}; + static struct iommu_device *apple_dart_probe_device(struct device *dev) { struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); @@ -717,24 +743,26 @@ static void apple_dart_release_device(struct device *dev) kfree(cfg); } -static struct iommu_domain *apple_dart_domain_alloc(unsigned int type) +static struct iommu_domain *apple_dart_domain_alloc_paging(struct device *dev) { struct apple_dart_domain *dart_domain; - if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED && - type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_BLOCKED) - return NULL; - dart_domain = kzalloc(sizeof(*dart_domain), GFP_KERNEL); if (!dart_domain) return NULL; mutex_init(&dart_domain->init_lock); - /* no need to allocate pgtbl_ops or do any other finalization steps */ - if (type == IOMMU_DOMAIN_IDENTITY || type == IOMMU_DOMAIN_BLOCKED) - dart_domain->finalized = true; + if (dev) { + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); + int ret; + ret = apple_dart_finalize_domain(dart_domain, cfg); + if (ret) { + kfree(dart_domain); + return ERR_PTR(ret); + } + } return &dart_domain->domain; } @@ -770,8 +798,6 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args) if (cfg_dart) { if (cfg_dart->supports_bypass != dart->supports_bypass) return -EINVAL; - if (cfg_dart->force_bypass != dart->force_bypass) - return -EINVAL; if (cfg_dart->pgsize != dart->pgsize) return -EINVAL; } @@ -913,7 +939,7 @@ static int apple_dart_def_domain_type(struct device *dev) { struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); - if (cfg->stream_maps[0].dart->force_bypass) + if (cfg->stream_maps[0].dart->pgsize > PAGE_SIZE) return IOMMU_DOMAIN_IDENTITY; if (!cfg->stream_maps[0].dart->supports_bypass) return IOMMU_DOMAIN_DMA; @@ -947,7 +973,9 @@ static void apple_dart_get_resv_regions(struct device *dev, } static const struct iommu_ops apple_dart_iommu_ops = { - .domain_alloc = apple_dart_domain_alloc, + .identity_domain = &apple_dart_identity_domain, + .blocked_domain = &apple_dart_blocked_domain, + .domain_alloc_paging = apple_dart_domain_alloc_paging, .probe_device = apple_dart_probe_device, .release_device = apple_dart_release_device, .device_group = apple_dart_device_group, @@ -957,7 +985,7 @@ static const struct iommu_ops apple_dart_iommu_ops = { .pgsize_bitmap = -1UL, /* Restricted during dart probe */ .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { - .attach_dev = apple_dart_attach_dev, + .attach_dev = apple_dart_attach_dev_paging, .map_pages = apple_dart_map_pages, .unmap_pages = apple_dart_unmap_pages, .flush_iotlb_all = apple_dart_flush_iotlb_all, @@ -1111,8 +1139,6 @@ static int apple_dart_probe(struct platform_device *pdev) goto err_clk_disable; } - dart->force_bypass = dart->pgsize > PAGE_SIZE; - ret = apple_dart_hw_reset(dart); if (ret) goto err_clk_disable; @@ -1136,7 +1162,8 @@ static int apple_dart_probe(struct platform_device *pdev) dev_info( &pdev->dev, "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n", - dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass); + dart->pgsize, dart->num_streams, dart->supports_bypass, + dart->pgsize > PAGE_SIZE); return 0; err_sysfs_remove: diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile index 54feb1ecccad..014a997753a8 100644 --- a/drivers/iommu/arm/arm-smmu-v3/Makefile +++ b/drivers/iommu/arm/arm-smmu-v3/Makefile @@ -3,3 +3,5 @@ obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o arm_smmu_v3-objs-y += arm-smmu-v3.o arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o arm_smmu_v3-objs := $(arm_smmu_v3-objs-y) + +obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 8a16cd3ef487..a7c36654dee5 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -8,183 +8,112 @@ #include #include #include +#include #include "arm-smmu-v3.h" -#include "../../iommu-sva.h" #include "../../io-pgtable-arm.h" -struct arm_smmu_mmu_notifier { - struct mmu_notifier mn; - struct arm_smmu_ctx_desc *cd; - bool cleared; - refcount_t refs; - struct list_head list; - struct arm_smmu_domain *domain; -}; - -#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn) - -struct arm_smmu_bond { - struct iommu_sva sva; - struct mm_struct *mm; - struct arm_smmu_mmu_notifier *smmu_mn; - struct list_head list; - refcount_t refs; -}; - -#define sva_to_bond(handle) \ - container_of(handle, struct arm_smmu_bond, sva) - static DEFINE_MUTEX(sva_lock); -/* - * Check if the CPU ASID is available on the SMMU side. If a private context - * descriptor is using it, try to replace it. - */ -static struct arm_smmu_ctx_desc * -arm_smmu_share_asid(struct mm_struct *mm, u16 asid) +static void __maybe_unused +arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain) { - int ret; - u32 new_asid; - struct arm_smmu_ctx_desc *cd; - struct arm_smmu_device *smmu; - struct arm_smmu_domain *smmu_domain; - - cd = xa_load(&arm_smmu_asid_xa, asid); - if (!cd) - return NULL; - - if (cd->mm) { - if (WARN_ON(cd->mm != mm)) - return ERR_PTR(-EINVAL); - /* All devices bound to this mm use the same cd struct. */ - refcount_inc(&cd->refs); - return cd; + struct arm_smmu_master_domain *master_domain; + struct arm_smmu_cd target_cd; + unsigned long flags; + + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + list_for_each_entry(master_domain, &smmu_domain->devices, devices_elm) { + struct arm_smmu_master *master = master_domain->master; + struct arm_smmu_cd *cdptr; + + cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid); + if (WARN_ON(!cdptr)) + continue; + + arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); + arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr, + &target_cd); } - - smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd); - smmu = smmu_domain->smmu; - - ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd, - XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); - if (ret) - return ERR_PTR(-ENOSPC); - /* - * Race with unmap: TLB invalidations will start targeting the new ASID, - * which isn't assigned yet. We'll do an invalidate-all on the old ASID - * later, so it doesn't matter. - */ - cd->asid = new_asid; - /* - * Update ASID and invalidate CD in all associated masters. There will - * be some overlap between use of both ASIDs, until we invalidate the - * TLB. - */ - arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd); - - /* Invalidate TLB entries previously associated with that context */ - arm_smmu_tlb_inv_asid(smmu, asid); - - xa_erase(&arm_smmu_asid_xa, asid); - return NULL; + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); } -static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) +static u64 page_size_to_cd(void) { - u16 asid; - int err = 0; - u64 tcr, par, reg; - struct arm_smmu_ctx_desc *cd; - struct arm_smmu_ctx_desc *ret = NULL; - - /* Don't free the mm until we release the ASID */ - mmgrab(mm); - - asid = arm64_mm_context_get(mm); - if (!asid) { - err = -ESRCH; - goto out_drop_mm; - } - - cd = kzalloc(sizeof(*cd), GFP_KERNEL); - if (!cd) { - err = -ENOMEM; - goto out_put_context; - } - - refcount_set(&cd->refs, 1); + static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K || + PAGE_SIZE == SZ_64K); + if (PAGE_SIZE == SZ_64K) + return ARM_LPAE_TCR_TG0_64K; + if (PAGE_SIZE == SZ_16K) + return ARM_LPAE_TCR_TG0_16K; + return ARM_LPAE_TCR_TG0_4K; +} - mutex_lock(&arm_smmu_asid_lock); - ret = arm_smmu_share_asid(mm, asid); - if (ret) { - mutex_unlock(&arm_smmu_asid_lock); - goto out_free_cd; - } +VISIBLE_IF_KUNIT +void arm_smmu_make_sva_cd(struct arm_smmu_cd *target, + struct arm_smmu_master *master, struct mm_struct *mm, + u16 asid) +{ + u64 par; + + memset(target, 0, sizeof(*target)); + + par = cpuid_feature_extract_unsigned_field( + read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1), + ID_AA64MMFR0_EL1_PARANGE_SHIFT); + + target->data[0] = cpu_to_le64( + CTXDESC_CD_0_TCR_EPD1 | +#ifdef __BIG_ENDIAN + CTXDESC_CD_0_ENDI | +#endif + CTXDESC_CD_0_V | + FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par) | + CTXDESC_CD_0_AA64 | + (master->stall_enabled ? CTXDESC_CD_0_S : 0) | + CTXDESC_CD_0_R | + CTXDESC_CD_0_A | + CTXDESC_CD_0_ASET | + FIELD_PREP(CTXDESC_CD_0_ASID, asid)); - err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL); - mutex_unlock(&arm_smmu_asid_lock); - - if (err) - goto out_free_asid; - - tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) | - FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) | - FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) | - FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) | - CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64; - - switch (PAGE_SIZE) { - case SZ_4K: - tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K); - break; - case SZ_16K: - tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K); - break; - case SZ_64K: - tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K); - break; - default: - WARN_ON(1); - err = -EINVAL; - goto out_free_asid; + /* + * If no MM is passed then this creates a SVA entry that faults + * everything. arm_smmu_write_cd_entry() can hitlessly go between these + * two entries types since TTB0 is ignored by HW when EPD0 is set. + */ + if (mm) { + target->data[0] |= cpu_to_le64( + FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, + 64ULL - vabits_actual) | + FIELD_PREP(CTXDESC_CD_0_TCR_TG0, page_size_to_cd()) | + FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, + ARM_LPAE_TCR_RGN_WBWA) | + FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, + ARM_LPAE_TCR_RGN_WBWA) | + FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS)); + + target->data[1] = cpu_to_le64(virt_to_phys(mm->pgd) & + CTXDESC_CD_1_TTB0_MASK); + } else { + target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_EPD0); + + /* + * Disable stall and immediately generate an abort if stall + * disable is permitted. This speeds up cleanup for an unclean + * exit if the device is still doing a lot of DMA. + */ + if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) + target->data[0] &= + cpu_to_le64(~(CTXDESC_CD_0_S | CTXDESC_CD_0_R)); } - reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); - par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT); - tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par); - - cd->ttbr = virt_to_phys(mm->pgd); - cd->tcr = tcr; /* * MAIR value is pretty much constant and global, so we can just get it * from the current CPU register */ - cd->mair = read_sysreg(mair_el1); - cd->asid = asid; - cd->mm = mm; - - return cd; - -out_free_asid: - arm_smmu_free_asid(cd); -out_free_cd: - kfree(cd); -out_put_context: - arm64_mm_context_put(mm); -out_drop_mm: - mmdrop(mm); - return err < 0 ? ERR_PTR(err) : ret; -} - -static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) -{ - if (arm_smmu_free_asid(cd)) { - /* Unpin ASID */ - arm64_mm_context_put(cd->mm); - mmdrop(cd->mm); - kfree(cd); - } + target->data[3] = cpu_to_le64(read_sysreg(mair_el1)); } +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd); /* * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this @@ -200,8 +129,8 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, unsigned long start, unsigned long end) { - struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); - struct arm_smmu_domain *smmu_domain = smmu_mn->domain; + struct arm_smmu_domain *smmu_domain = + container_of(mn, struct arm_smmu_domain, mmu_notifier); size_t size; /* @@ -218,47 +147,50 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, size = 0; } - if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) { - if (!size) - arm_smmu_tlb_inv_asid(smmu_domain->smmu, - smmu_mn->cd->asid); - else - arm_smmu_tlb_inv_range_asid(start, size, - smmu_mn->cd->asid, - PAGE_SIZE, false, - smmu_domain); - } + if (!size) + arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); + else + arm_smmu_tlb_inv_range_asid(start, size, smmu_domain->cd.asid, + PAGE_SIZE, false, smmu_domain); - arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size); + arm_smmu_atc_inv_domain(smmu_domain, start, size); } static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) { - struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); - struct arm_smmu_domain *smmu_domain = smmu_mn->domain; - - mutex_lock(&sva_lock); - if (smmu_mn->cleared) { - mutex_unlock(&sva_lock); - return; - } + struct arm_smmu_domain *smmu_domain = + container_of(mn, struct arm_smmu_domain, mmu_notifier); + struct arm_smmu_master_domain *master_domain; + unsigned long flags; /* * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events, * but disable translation. */ - arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd); - - arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); - arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + list_for_each_entry(master_domain, &smmu_domain->devices, + devices_elm) { + struct arm_smmu_master *master = master_domain->master; + struct arm_smmu_cd target; + struct arm_smmu_cd *cdptr; + + cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid); + if (WARN_ON(!cdptr)) + continue; + arm_smmu_make_sva_cd(&target, master, NULL, + smmu_domain->cd.asid); + arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr, + &target); + } + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - smmu_mn->cleared = true; - mutex_unlock(&sva_lock); + arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); + arm_smmu_atc_inv_domain(smmu_domain, 0, 0); } static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn) { - kfree(mn_to_smmu(mn)); + kfree(container_of(mn, struct arm_smmu_domain, mmu_notifier)); } static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = { @@ -267,126 +199,6 @@ static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = { .free_notifier = arm_smmu_mmu_notifier_free, }; -/* Allocate or get existing MMU notifier for this {domain, mm} pair */ -static struct arm_smmu_mmu_notifier * -arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain, - struct mm_struct *mm) -{ - int ret; - struct arm_smmu_ctx_desc *cd; - struct arm_smmu_mmu_notifier *smmu_mn; - - list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) { - if (smmu_mn->mn.mm == mm) { - refcount_inc(&smmu_mn->refs); - return smmu_mn; - } - } - - cd = arm_smmu_alloc_shared_cd(mm); - if (IS_ERR(cd)) - return ERR_CAST(cd); - - smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL); - if (!smmu_mn) { - ret = -ENOMEM; - goto err_free_cd; - } - - refcount_set(&smmu_mn->refs, 1); - smmu_mn->cd = cd; - smmu_mn->domain = smmu_domain; - smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops; - - ret = mmu_notifier_register(&smmu_mn->mn, mm); - if (ret) { - kfree(smmu_mn); - goto err_free_cd; - } - - ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd); - if (ret) - goto err_put_notifier; - - list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers); - return smmu_mn; - -err_put_notifier: - /* Frees smmu_mn */ - mmu_notifier_put(&smmu_mn->mn); -err_free_cd: - arm_smmu_free_shared_cd(cd); - return ERR_PTR(ret); -} - -static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn) -{ - struct mm_struct *mm = smmu_mn->mn.mm; - struct arm_smmu_ctx_desc *cd = smmu_mn->cd; - struct arm_smmu_domain *smmu_domain = smmu_mn->domain; - - if (!refcount_dec_and_test(&smmu_mn->refs)) - return; - - list_del(&smmu_mn->list); - arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL); - - /* - * If we went through clear(), we've already invalidated, and no - * new TLB entry can have been formed. - */ - if (!smmu_mn->cleared) { - arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); - arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); - } - - /* Frees smmu_mn */ - mmu_notifier_put(&smmu_mn->mn); - arm_smmu_free_shared_cd(cd); -} - -static struct iommu_sva * -__arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm) -{ - int ret; - struct arm_smmu_bond *bond; - struct arm_smmu_master *master = dev_iommu_priv_get(dev); - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - - if (!master || !master->sva_enabled) - return ERR_PTR(-ENODEV); - - /* If bind() was already called for this {dev, mm} pair, reuse it. */ - list_for_each_entry(bond, &master->bonds, list) { - if (bond->mm == mm) { - refcount_inc(&bond->refs); - return &bond->sva; - } - } - - bond = kzalloc(sizeof(*bond), GFP_KERNEL); - if (!bond) - return ERR_PTR(-ENOMEM); - - bond->mm = mm; - bond->sva.dev = dev; - refcount_set(&bond->refs, 1); - - bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm); - if (IS_ERR(bond->smmu_mn)) { - ret = PTR_ERR(bond->smmu_mn); - goto err_free_bond; - } - - list_add(&bond->list, &master->bonds); - return &bond->sva; - -err_free_bond: - kfree(bond); - return ERR_PTR(ret); -} - bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) { unsigned long reg, fld; @@ -462,7 +274,6 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master) { - int ret; struct device *dev = master->dev; /* @@ -475,16 +286,7 @@ static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master) if (!master->iopf_enabled) return -EINVAL; - ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev); - if (ret) - return ret; - - ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev); - if (ret) { - iopf_queue_remove_device(master->smmu->evtq.iopf, dev); - return ret; - } - return 0; + return iopf_queue_add_device(master->smmu->evtq.iopf, dev); } static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master) @@ -494,7 +296,6 @@ static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master) if (!master->iopf_enabled) return; - iommu_unregister_device_fault_handler(dev); iopf_queue_remove_device(master->smmu->evtq.iopf, dev); } @@ -514,11 +315,6 @@ int arm_smmu_master_enable_sva(struct arm_smmu_master *master) int arm_smmu_master_disable_sva(struct arm_smmu_master *master) { mutex_lock(&sva_lock); - if (!list_empty(&master->bonds)) { - dev_err(master->dev, "cannot disable SVA, device is bound\n"); - mutex_unlock(&sva_lock); - return -EBUSY; - } arm_smmu_master_sva_disable_iopf(master); master->sva_enabled = false; mutex_unlock(&sva_lock); @@ -535,48 +331,51 @@ void arm_smmu_sva_notifier_synchronize(void) mmu_notifier_synchronize(); } -void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t id) -{ - struct mm_struct *mm = domain->mm; - struct arm_smmu_bond *bond = NULL, *t; - struct arm_smmu_master *master = dev_iommu_priv_get(dev); - - mutex_lock(&sva_lock); - list_for_each_entry(t, &master->bonds, list) { - if (t->mm == mm) { - bond = t; - break; - } - } - - if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) { - list_del(&bond->list); - arm_smmu_mmu_notifier_put(bond->smmu_mn); - kfree(bond); - } - mutex_unlock(&sva_lock); -} - static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t id) { - int ret = 0; - struct iommu_sva *handle; - struct mm_struct *mm = domain->mm; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_cd target; + int ret; - mutex_lock(&sva_lock); - handle = __arm_smmu_sva_bind(dev, mm); - if (IS_ERR(handle)) - ret = PTR_ERR(handle); - mutex_unlock(&sva_lock); + /* Prevent arm_smmu_mm_release from being called while we are attaching */ + if (!mmget_not_zero(domain->mm)) + return -EINVAL; + + /* + * This does not need the arm_smmu_asid_lock because SVA domains never + * get reassigned + */ + arm_smmu_make_sva_cd(&target, master, domain->mm, smmu_domain->cd.asid); + ret = arm_smmu_set_pasid(master, smmu_domain, id, &target); + mmput(domain->mm); return ret; } static void arm_smmu_sva_domain_free(struct iommu_domain *domain) { - kfree(domain); + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + /* + * Ensure the ASID is empty in the iommu cache before allowing reuse. + */ + arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); + + /* + * Notice that the arm_smmu_mm_arch_invalidate_secondary_tlbs op can + * still be called/running at this point. We allow the ASID to be + * reused, and if there is a race then it just suffers harmless + * unnecessary invalidation. + */ + xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); + + /* + * Actual free is defered to the SRCU callback + * arm_smmu_mmu_notifier_free() + */ + mmu_notifier_put(&smmu_domain->mmu_notifier); } static const struct iommu_domain_ops arm_smmu_sva_domain_ops = { @@ -584,14 +383,38 @@ static const struct iommu_domain_ops arm_smmu_sva_domain_ops = { .free = arm_smmu_sva_domain_free }; -struct iommu_domain *arm_smmu_sva_domain_alloc(void) +struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev, + struct mm_struct *mm) { - struct iommu_domain *domain; + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_device *smmu = master->smmu; + struct arm_smmu_domain *smmu_domain; + u32 asid; + int ret; + + smmu_domain = arm_smmu_domain_alloc(); + if (IS_ERR(smmu_domain)) + return ERR_CAST(smmu_domain); + smmu_domain->domain.type = IOMMU_DOMAIN_SVA; + smmu_domain->domain.ops = &arm_smmu_sva_domain_ops; + smmu_domain->smmu = smmu; + + ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain, + XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); + if (ret) + goto err_free; + + smmu_domain->cd.asid = asid; + smmu_domain->mmu_notifier.ops = &arm_smmu_mmu_notifier_ops; + ret = mmu_notifier_register(&smmu_domain->mmu_notifier, mm); + if (ret) + goto err_asid; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); - if (!domain) - return NULL; - domain->ops = &arm_smmu_sva_domain_ops; + return &smmu_domain->domain; - return domain; +err_asid: + xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); +err_free: + kfree(smmu_domain); + return ERR_PTR(ret); } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c new file mode 100644 index 000000000000..e0fce31eba54 --- /dev/null +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c @@ -0,0 +1,570 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2024 Google LLC. + */ +#include +#include + +#include "arm-smmu-v3.h" + +struct arm_smmu_test_writer { + struct arm_smmu_entry_writer writer; + struct kunit *test; + const __le64 *init_entry; + const __le64 *target_entry; + __le64 *entry; + + bool invalid_entry_written; + unsigned int num_syncs; +}; + +#define NUM_ENTRY_QWORDS 8 +#define NUM_EXPECTED_SYNCS(x) x + +static struct arm_smmu_ste bypass_ste; +static struct arm_smmu_ste abort_ste; +static struct arm_smmu_device smmu = { + .features = ARM_SMMU_FEAT_STALLS | ARM_SMMU_FEAT_ATTR_TYPES_OVR +}; +static struct mm_struct sva_mm = { + .pgd = (void *)0xdaedbeefdeadbeefULL, +}; + +static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry, + const __le64 *used_bits, + const __le64 *target, + unsigned int length) +{ + bool differs = false; + unsigned int i; + + for (i = 0; i < length; i++) { + if ((entry[i] & used_bits[i]) != target[i]) + differs = true; + } + return differs; +} + +static void +arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer) +{ + struct arm_smmu_test_writer *test_writer = + container_of(writer, struct arm_smmu_test_writer, writer); + __le64 *entry_used_bits; + + entry_used_bits = kunit_kzalloc( + test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS, + GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits); + + pr_debug("STE value is now set to: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, + test_writer->entry, + NUM_ENTRY_QWORDS * sizeof(*test_writer->entry), + false); + + test_writer->num_syncs += 1; + if (!test_writer->entry[0]) { + test_writer->invalid_entry_written = true; + } else { + /* + * At any stage in a hitless transition, the entry must be + * equivalent to either the initial entry or the target entry + * when only considering the bits used by the current + * configuration. + */ + writer->ops->get_used(test_writer->entry, entry_used_bits); + KUNIT_EXPECT_FALSE( + test_writer->test, + arm_smmu_entry_differs_in_used_bits( + test_writer->entry, entry_used_bits, + test_writer->init_entry, NUM_ENTRY_QWORDS) && + arm_smmu_entry_differs_in_used_bits( + test_writer->entry, entry_used_bits, + test_writer->target_entry, + NUM_ENTRY_QWORDS)); + } +} + +static void +arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer *writer, + const __le64 *ste) +{ + __le64 used_bits[NUM_ENTRY_QWORDS] = {}; + + arm_smmu_get_ste_used(ste, used_bits); + pr_debug("STE used bits: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, used_bits, + sizeof(used_bits), false); +} + +static const struct arm_smmu_entry_writer_ops test_ste_ops = { + .sync = arm_smmu_test_writer_record_syncs, + .get_used = arm_smmu_get_ste_used, +}; + +static const struct arm_smmu_entry_writer_ops test_cd_ops = { + .sync = arm_smmu_test_writer_record_syncs, + .get_used = arm_smmu_get_cd_used, +}; + +static void arm_smmu_v3_test_ste_expect_transition( + struct kunit *test, const struct arm_smmu_ste *cur, + const struct arm_smmu_ste *target, unsigned int num_syncs_expected, + bool hitless) +{ + struct arm_smmu_ste cur_copy = *cur; + struct arm_smmu_test_writer test_writer = { + .writer = { + .ops = &test_ste_ops, + }, + .test = test, + .init_entry = cur->data, + .target_entry = target->data, + .entry = cur_copy.data, + .num_syncs = 0, + .invalid_entry_written = false, + + }; + + pr_debug("STE initial value: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data, + sizeof(cur_copy), false); + arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data); + pr_debug("STE target value: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, target->data, + sizeof(cur_copy), false); + arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, + target->data); + + arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data); + + KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless); + KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected); + KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy)); +} + +static void arm_smmu_v3_test_ste_expect_non_hitless_transition( + struct kunit *test, const struct arm_smmu_ste *cur, + const struct arm_smmu_ste *target, unsigned int num_syncs_expected) +{ + arm_smmu_v3_test_ste_expect_transition(test, cur, target, + num_syncs_expected, false); +} + +static void arm_smmu_v3_test_ste_expect_hitless_transition( + struct kunit *test, const struct arm_smmu_ste *cur, + const struct arm_smmu_ste *target, unsigned int num_syncs_expected) +{ + arm_smmu_v3_test_ste_expect_transition(test, cur, target, + num_syncs_expected, true); +} + +static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0; + +static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste, + unsigned int s1dss, + const dma_addr_t dma_addr) +{ + struct arm_smmu_master master = { + .cd_table.cdtab_dma = dma_addr, + .cd_table.s1cdmax = 0xFF, + .cd_table.s1fmt = STRTAB_STE_0_S1FMT_64K_L2, + .smmu = &smmu, + }; + + arm_smmu_make_cdtable_ste(ste, &master, true, s1dss); +} + +static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test) +{ + /* + * Bypass STEs has used bits in the first two Qwords, while abort STEs + * only have used bits in the first QWord. Transitioning from bypass to + * abort requires two syncs: the first to set the first qword and make + * the STE into an abort, the second to clean up the second qword. + */ + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &bypass_ste, &abort_ste, NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_abort_to_bypass(struct kunit *test) +{ + /* + * Transitioning from abort to bypass also requires two syncs: the first + * to set the second qword data required by the bypass STE, and the + * second to set the first qword and switch to bypass. + */ + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &abort_ste, &bypass_ste, NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste, + NUM_EXPECTED_SYNCS(3)); +} + +static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste, + NUM_EXPECTED_SYNCS(3)); +} + +static void arm_smmu_v3_write_ste_test_cdtable_s1dss_change(struct kunit *test) +{ + struct arm_smmu_ste ste; + struct arm_smmu_ste s1dss_bypass; + + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS, + fake_cdtab_dma_addr); + + /* + * Flipping s1dss on a CD table STE only involves changes to the second + * qword of an STE and can be done in a single write. + */ + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(1)); + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &s1dss_bypass, &ste, NUM_EXPECTED_SYNCS(1)); +} + +static void +arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass(struct kunit *test) +{ + struct arm_smmu_ste s1dss_bypass; + + arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &s1dss_bypass, &bypass_ste, NUM_EXPECTED_SYNCS(2)); +} + +static void +arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass(struct kunit *test) +{ + struct arm_smmu_ste s1dss_bypass; + + arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &bypass_ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste, + bool ats_enabled) +{ + struct arm_smmu_master master = { + .smmu = &smmu, + }; + struct io_pgtable io_pgtable = {}; + struct arm_smmu_domain smmu_domain = { + .pgtbl_ops = &io_pgtable.ops, + }; + + io_pgtable.cfg.arm_lpae_s2_cfg.vttbr = 0xdaedbeefdeadbeefULL; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.ps = 1; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tg = 2; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sh = 3; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.orgn = 1; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.irgn = 2; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4; + + arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain, ats_enabled); +} + +static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_s1_to_s2(struct kunit *test) +{ + struct arm_smmu_ste s1_ste; + struct arm_smmu_ste s2_ste; + + arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_test_make_s2_ste(&s2_ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste, + NUM_EXPECTED_SYNCS(3)); +} + +static void arm_smmu_v3_write_ste_test_s2_to_s1(struct kunit *test) +{ + struct arm_smmu_ste s1_ste; + struct arm_smmu_ste s2_ste; + + arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_test_make_s2_ste(&s2_ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste, + NUM_EXPECTED_SYNCS(3)); +} + +static void arm_smmu_v3_write_ste_test_non_hitless(struct kunit *test) +{ + struct arm_smmu_ste ste; + struct arm_smmu_ste ste_2; + + /* + * Although no flow resembles this in practice, one way to force an STE + * update to be non-hitless is to change its CD table pointer as well as + * s1 dss field in the same update. + */ + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_test_make_cdtable_ste(&ste_2, STRTAB_STE_1_S1DSS_BYPASS, + 0x4B4B4b4B4B); + arm_smmu_v3_test_ste_expect_non_hitless_transition( + test, &ste, &ste_2, NUM_EXPECTED_SYNCS(3)); +} + +static void arm_smmu_v3_test_cd_expect_transition( + struct kunit *test, const struct arm_smmu_cd *cur, + const struct arm_smmu_cd *target, unsigned int num_syncs_expected, + bool hitless) +{ + struct arm_smmu_cd cur_copy = *cur; + struct arm_smmu_test_writer test_writer = { + .writer = { + .ops = &test_cd_ops, + }, + .test = test, + .init_entry = cur->data, + .target_entry = target->data, + .entry = cur_copy.data, + .num_syncs = 0, + .invalid_entry_written = false, + + }; + + pr_debug("CD initial value: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data, + sizeof(cur_copy), false); + arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data); + pr_debug("CD target value: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, target->data, + sizeof(cur_copy), false); + arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, + target->data); + + arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data); + + KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless); + KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected); + KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy)); +} + +static void arm_smmu_v3_test_cd_expect_non_hitless_transition( + struct kunit *test, const struct arm_smmu_cd *cur, + const struct arm_smmu_cd *target, unsigned int num_syncs_expected) +{ + arm_smmu_v3_test_cd_expect_transition(test, cur, target, + num_syncs_expected, false); +} + +static void arm_smmu_v3_test_cd_expect_hitless_transition( + struct kunit *test, const struct arm_smmu_cd *cur, + const struct arm_smmu_cd *target, unsigned int num_syncs_expected) +{ + arm_smmu_v3_test_cd_expect_transition(test, cur, target, + num_syncs_expected, true); +} + +static void arm_smmu_test_make_s1_cd(struct arm_smmu_cd *cd, unsigned int asid) +{ + struct arm_smmu_master master = { + .smmu = &smmu, + }; + struct io_pgtable io_pgtable = {}; + struct arm_smmu_domain smmu_domain = { + .pgtbl_ops = &io_pgtable.ops, + .cd = { + .asid = asid, + }, + }; + + io_pgtable.cfg.arm_lpae_s1_cfg.ttbr = 0xdaedbeefdeadbeefULL; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.ips = 1; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tg = 2; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.sh = 3; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.orgn = 1; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.irgn = 2; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tsz = 4; + io_pgtable.cfg.arm_lpae_s1_cfg.mair = 0xabcdef012345678ULL; + + arm_smmu_make_s1_cd(cd, &master, &smmu_domain); +} + +static void arm_smmu_v3_write_cd_test_s1_clear(struct kunit *test) +{ + struct arm_smmu_cd cd = {}; + struct arm_smmu_cd cd_2; + + arm_smmu_test_make_s1_cd(&cd_2, 1997); + arm_smmu_v3_test_cd_expect_non_hitless_transition( + test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2)); + arm_smmu_v3_test_cd_expect_non_hitless_transition( + test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_cd_test_s1_change_asid(struct kunit *test) +{ + struct arm_smmu_cd cd = {}; + struct arm_smmu_cd cd_2; + + arm_smmu_test_make_s1_cd(&cd, 778); + arm_smmu_test_make_s1_cd(&cd_2, 1997); + arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2, + NUM_EXPECTED_SYNCS(1)); + arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd, + NUM_EXPECTED_SYNCS(1)); +} + +static void arm_smmu_test_make_sva_cd(struct arm_smmu_cd *cd, unsigned int asid) +{ + struct arm_smmu_master master = { + .smmu = &smmu, + }; + + arm_smmu_make_sva_cd(cd, &master, &sva_mm, asid); +} + +static void arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd *cd, + unsigned int asid) +{ + struct arm_smmu_master master = { + .smmu = &smmu, + }; + + arm_smmu_make_sva_cd(cd, &master, NULL, asid); +} + +static void arm_smmu_v3_write_cd_test_sva_clear(struct kunit *test) +{ + struct arm_smmu_cd cd = {}; + struct arm_smmu_cd cd_2; + + arm_smmu_test_make_sva_cd(&cd_2, 1997); + arm_smmu_v3_test_cd_expect_non_hitless_transition( + test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2)); + arm_smmu_v3_test_cd_expect_non_hitless_transition( + test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_cd_test_sva_release(struct kunit *test) +{ + struct arm_smmu_cd cd; + struct arm_smmu_cd cd_2; + + arm_smmu_test_make_sva_cd(&cd, 1997); + arm_smmu_test_make_sva_release_cd(&cd_2, 1997); + arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2, + NUM_EXPECTED_SYNCS(2)); + arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd, + NUM_EXPECTED_SYNCS(2)); +} + +static struct kunit_case arm_smmu_v3_test_cases[] = { + KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_abort), + KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_bypass), + KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_abort), + KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable), + KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass), + KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable), + KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_s1dss_change), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass), + KUNIT_CASE(arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort), + KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass), + KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s1_to_s2), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_s1), + KUNIT_CASE(arm_smmu_v3_write_ste_test_non_hitless), + KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear), + KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid), + KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear), + KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_release), + {}, +}; + +static int arm_smmu_v3_test_suite_init(struct kunit_suite *test) +{ + arm_smmu_make_bypass_ste(&smmu, &bypass_ste); + arm_smmu_make_abort_ste(&abort_ste); + return 0; +} + +static struct kunit_suite arm_smmu_v3_test_module = { + .name = "arm-smmu-v3-kunit-test", + .suite_init = arm_smmu_v3_test_suite_init, + .test_cases = arm_smmu_v3_test_cases, +}; +kunit_test_suites(&arm_smmu_v3_test_module); + +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index f2260f45728e..88b9fa9542a4 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -26,10 +26,10 @@ #include #include #include +#include #include "arm-smmu-v3.h" #include "../../dma-iommu.h" -#include "../../iommu-sva.h" static bool disable_bypass = true; module_param(disable_bypass, bool, 0444); @@ -48,6 +48,10 @@ enum arm_smmu_msi_index { ARM_SMMU_MAX_MSIS, }; +#define NUM_ENTRY_QWORDS 8 +static_assert(sizeof(struct arm_smmu_ste) == NUM_ENTRY_QWORDS * sizeof(u64)); +static_assert(sizeof(struct arm_smmu_cd) == NUM_ENTRY_QWORDS * sizeof(u64)); + static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = { [EVTQ_MSI_INDEX] = { ARM_SMMU_EVTQ_IRQ_CFG0, @@ -74,18 +78,16 @@ struct arm_smmu_option_prop { DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa); DEFINE_MUTEX(arm_smmu_asid_lock); -/* - * Special value used by SVA when a process dies, to quiesce a CD without - * disabling it. - */ -struct arm_smmu_ctx_desc quiet_cd = { 0 }; - static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"}, { 0, NULL}, }; +static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_device *smmu); +static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master); + static void parse_driver_options(struct arm_smmu_device *smmu) { int i = 0; @@ -921,31 +923,29 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu, return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); } -static int arm_smmu_page_response(struct device *dev, - struct iommu_fault_event *unused, - struct iommu_page_response *resp) +static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused, + struct iommu_page_response *resp) { struct arm_smmu_cmdq_ent cmd = {0}; struct arm_smmu_master *master = dev_iommu_priv_get(dev); int sid = master->streams[0].id; - if (master->stall_enabled) { - cmd.opcode = CMDQ_OP_RESUME; - cmd.resume.sid = sid; - cmd.resume.stag = resp->grpid; - switch (resp->code) { - case IOMMU_PAGE_RESP_INVALID: - case IOMMU_PAGE_RESP_FAILURE: - cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT; - break; - case IOMMU_PAGE_RESP_SUCCESS: - cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY; - break; - default: - return -EINVAL; - } - } else { - return -ENODEV; + if (WARN_ON(!master->stall_enabled)) + return; + + cmd.opcode = CMDQ_OP_RESUME; + cmd.resume.sid = sid; + cmd.resume.stag = resp->grpid; + switch (resp->code) { + case IOMMU_PAGE_RESP_INVALID: + case IOMMU_PAGE_RESP_FAILURE: + cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT; + break; + case IOMMU_PAGE_RESP_SUCCESS: + cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY; + break; + default: + break; } arm_smmu_cmdq_issue_cmd(master->smmu, &cmd); @@ -955,8 +955,6 @@ static int arm_smmu_page_response(struct device *dev, * terminated... at some point in the future. PRI_RESP is fire and * forget. */ - - return 0; } /* Context descriptor manipulation functions */ @@ -971,14 +969,200 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid) arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); } -static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, +/* + * Based on the value of ent report which bits of the STE the HW will access. It + * would be nice if this was complete according to the spec, but minimally it + * has to capture the bits this driver uses. + */ +VISIBLE_IF_KUNIT +void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits) +{ + unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent[0])); + + used_bits[0] = cpu_to_le64(STRTAB_STE_0_V); + if (!(ent[0] & cpu_to_le64(STRTAB_STE_0_V))) + return; + + used_bits[0] |= cpu_to_le64(STRTAB_STE_0_CFG); + + /* S1 translates */ + if (cfg & BIT(0)) { + used_bits[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT | + STRTAB_STE_0_S1CTXPTR_MASK | + STRTAB_STE_0_S1CDMAX); + used_bits[1] |= + cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR | + STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH | + STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW | + STRTAB_STE_1_EATS); + used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID); + + /* + * See 13.5 Summary of attribute/permission configuration fields + * for the SHCFG behavior. + */ + if (FIELD_GET(STRTAB_STE_1_S1DSS, le64_to_cpu(ent[1])) == + STRTAB_STE_1_S1DSS_BYPASS) + used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG); + } + + /* S2 translates */ + if (cfg & BIT(1)) { + used_bits[1] |= + cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG); + used_bits[2] |= + cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR | + STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI | + STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R); + used_bits[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK); + } + + if (cfg == STRTAB_STE_0_CFG_BYPASS) + used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG); +} +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used); + +/* + * Figure out if we can do a hitless update of entry to become target. Returns a + * bit mask where 1 indicates that qword needs to be set disruptively. + * unused_update is an intermediate value of entry that has unused bits set to + * their new values. + */ +static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer, + const __le64 *entry, const __le64 *target, + __le64 *unused_update) +{ + __le64 target_used[NUM_ENTRY_QWORDS] = {}; + __le64 cur_used[NUM_ENTRY_QWORDS] = {}; + u8 used_qword_diff = 0; + unsigned int i; + + writer->ops->get_used(entry, cur_used); + writer->ops->get_used(target, target_used); + + for (i = 0; i != NUM_ENTRY_QWORDS; i++) { + /* + * Check that masks are up to date, the make functions are not + * allowed to set a bit to 1 if the used function doesn't say it + * is used. + */ + WARN_ON_ONCE(target[i] & ~target_used[i]); + + /* Bits can change because they are not currently being used */ + unused_update[i] = (entry[i] & cur_used[i]) | + (target[i] & ~cur_used[i]); + /* + * Each bit indicates that a used bit in a qword needs to be + * changed after unused_update is applied. + */ + if ((unused_update[i] & target_used[i]) != target[i]) + used_qword_diff |= 1 << i; + } + return used_qword_diff; +} + +static bool entry_set(struct arm_smmu_entry_writer *writer, __le64 *entry, + const __le64 *target, unsigned int start, + unsigned int len) +{ + bool changed = false; + unsigned int i; + + for (i = start; len != 0; len--, i++) { + if (entry[i] != target[i]) { + WRITE_ONCE(entry[i], target[i]); + changed = true; + } + } + + if (changed) + writer->ops->sync(writer); + return changed; +} + +/* + * Update the STE/CD to the target configuration. The transition from the + * current entry to the target entry takes place over multiple steps that + * attempts to make the transition hitless if possible. This function takes care + * not to create a situation where the HW can perceive a corrupted entry. HW is + * only required to have a 64 bit atomicity with stores from the CPU, while + * entries are many 64 bit values big. + * + * The difference between the current value and the target value is analyzed to + * determine which of three updates are required - disruptive, hitless or no + * change. + * + * In the most general disruptive case we can make any update in three steps: + * - Disrupting the entry (V=0) + * - Fill now unused qwords, execpt qword 0 which contains V + * - Make qword 0 have the final value and valid (V=1) with a single 64 + * bit store + * + * However this disrupts the HW while it is happening. There are several + * interesting cases where a STE/CD can be updated without disturbing the HW + * because only a small number of bits are changing (S1DSS, CONFIG, etc) or + * because the used bits don't intersect. We can detect this by calculating how + * many 64 bit values need update after adjusting the unused bits and skip the + * V=0 process. This relies on the IGNORED behavior described in the + * specification. + */ +VISIBLE_IF_KUNIT +void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry, + const __le64 *target) +{ + __le64 unused_update[NUM_ENTRY_QWORDS]; + u8 used_qword_diff; + + used_qword_diff = + arm_smmu_entry_qword_diff(writer, entry, target, unused_update); + if (hweight8(used_qword_diff) == 1) { + /* + * Only one qword needs its used bits to be changed. This is a + * hitless update, update all bits the current STE/CD is + * ignoring to their new values, then update a single "critical + * qword" to change the STE/CD and finally 0 out any bits that + * are now unused in the target configuration. + */ + unsigned int critical_qword_index = ffs(used_qword_diff) - 1; + + /* + * Skip writing unused bits in the critical qword since we'll be + * writing it in the next step anyways. This can save a sync + * when the only change is in that qword. + */ + unused_update[critical_qword_index] = + entry[critical_qword_index]; + entry_set(writer, entry, unused_update, 0, NUM_ENTRY_QWORDS); + entry_set(writer, entry, target, critical_qword_index, 1); + entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS); + } else if (used_qword_diff) { + /* + * At least two qwords need their inuse bits to be changed. This + * requires a breaking update, zero the V bit, write all qwords + * but 0, then set qword 0 + */ + unused_update[0] = 0; + entry_set(writer, entry, unused_update, 0, 1); + entry_set(writer, entry, target, 1, NUM_ENTRY_QWORDS - 1); + entry_set(writer, entry, target, 0, 1); + } else { + /* + * No inuse bit changed. Sanity check that all unused bits are 0 + * in the entry. The target was already sanity checked by + * compute_qword_diff(). + */ + WARN_ON_ONCE( + entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS)); + } +} +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry); + +static void arm_smmu_sync_cd(struct arm_smmu_master *master, int ssid, bool leaf) { size_t i; - unsigned long flags; - struct arm_smmu_master *master; struct arm_smmu_cmdq_batch cmds; - struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_device *smmu = master->smmu; struct arm_smmu_cmdq_ent cmd = { .opcode = CMDQ_OP_CFGI_CD, .cfgi = { @@ -988,15 +1172,10 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, }; cmds.num = 0; - - spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_for_each_entry(master, &smmu_domain->devices, domain_head) { - for (i = 0; i < master->num_streams; i++) { - cmd.cfgi.sid = master->streams[i].id; - arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); - } + for (i = 0; i < master->num_streams; i++) { + cmd.cfgi.sid = master->streams[i].id; + arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); } - spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); arm_smmu_cmdq_batch_submit(smmu, &cmds); } @@ -1022,150 +1201,211 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst, u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | CTXDESC_L1_DESC_V; - /* See comment in arm_smmu_write_ctx_desc() */ + /* The HW has 64 bit atomicity with stores to the L2 CD table */ WRITE_ONCE(*dst, cpu_to_le64(val)); } -static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain, - u32 ssid) +struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, + u32 ssid) { - __le64 *l1ptr; - unsigned int idx; struct arm_smmu_l1_ctx_desc *l1_desc; - struct arm_smmu_device *smmu = smmu_domain->smmu; - struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg; + struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; - if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR) - return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS; + if (!cd_table->cdtab) + return NULL; - idx = ssid >> CTXDESC_SPLIT; - l1_desc = &cdcfg->l1_desc[idx]; - if (!l1_desc->l2ptr) { - if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc)) + if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR) + return (struct arm_smmu_cd *)(cd_table->cdtab + + ssid * CTXDESC_CD_DWORDS); + + l1_desc = &cd_table->l1_desc[ssid / CTXDESC_L2_ENTRIES]; + if (!l1_desc->l2ptr) + return NULL; + return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES]; +} + +static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master, + u32 ssid) +{ + struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; + struct arm_smmu_device *smmu = master->smmu; + + might_sleep(); + iommu_group_mutex_assert(master->dev); + + if (!cd_table->cdtab) { + if (arm_smmu_alloc_cd_tables(master)) return NULL; + } + + if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) { + unsigned int idx = ssid / CTXDESC_L2_ENTRIES; + struct arm_smmu_l1_ctx_desc *l1_desc; + + l1_desc = &cd_table->l1_desc[idx]; + if (!l1_desc->l2ptr) { + __le64 *l1ptr; + + if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc)) + return NULL; - l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS; - arm_smmu_write_cd_l1_desc(l1ptr, l1_desc); - /* An invalid L1CD can be cached */ - arm_smmu_sync_cd(smmu_domain, ssid, false); + l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS; + arm_smmu_write_cd_l1_desc(l1ptr, l1_desc); + /* An invalid L1CD can be cached */ + arm_smmu_sync_cd(master, ssid, false); + } } - idx = ssid & (CTXDESC_L2_ENTRIES - 1); - return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS; + return arm_smmu_get_cd_ptr(master, ssid); } -int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid, - struct arm_smmu_ctx_desc *cd) +struct arm_smmu_cd_writer { + struct arm_smmu_entry_writer writer; + unsigned int ssid; +}; + +VISIBLE_IF_KUNIT +void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits) { + used_bits[0] = cpu_to_le64(CTXDESC_CD_0_V); + if (!(ent[0] & cpu_to_le64(CTXDESC_CD_0_V))) + return; + memset(used_bits, 0xFF, sizeof(struct arm_smmu_cd)); + /* - * This function handles the following cases: - * - * (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0). - * (2) Install a secondary CD, for SID+SSID traffic. - * (3) Update ASID of a CD. Atomically write the first 64 bits of the - * CD, then invalidate the old entry and mappings. - * (4) Quiesce the context without clearing the valid bit. Disable - * translation, and ignore any translation fault. - * (5) Remove a secondary CD. + * If EPD0 is set by the make function it means + * T0SZ/TG0/IR0/OR0/SH0/TTB0 are IGNORED */ - u64 val; - bool cd_live; - __le64 *cdptr; + if (ent[0] & cpu_to_le64(CTXDESC_CD_0_TCR_EPD0)) { + used_bits[0] &= ~cpu_to_le64( + CTXDESC_CD_0_TCR_T0SZ | CTXDESC_CD_0_TCR_TG0 | + CTXDESC_CD_0_TCR_IRGN0 | CTXDESC_CD_0_TCR_ORGN0 | + CTXDESC_CD_0_TCR_SH0); + used_bits[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK); + } +} +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_cd_used); - if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax))) - return -E2BIG; +static void arm_smmu_cd_writer_sync_entry(struct arm_smmu_entry_writer *writer) +{ + struct arm_smmu_cd_writer *cd_writer = + container_of(writer, struct arm_smmu_cd_writer, writer); - cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid); - if (!cdptr) - return -ENOMEM; + arm_smmu_sync_cd(writer->master, cd_writer->ssid, true); +} - val = le64_to_cpu(cdptr[0]); - cd_live = !!(val & CTXDESC_CD_0_V); +static const struct arm_smmu_entry_writer_ops arm_smmu_cd_writer_ops = { + .sync = arm_smmu_cd_writer_sync_entry, + .get_used = arm_smmu_get_cd_used, +}; - if (!cd) { /* (5) */ - val = 0; - } else if (cd == &quiet_cd) { /* (4) */ - val |= CTXDESC_CD_0_TCR_EPD0; - } else if (cd_live) { /* (3) */ - val &= ~CTXDESC_CD_0_ASID; - val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid); - /* - * Until CD+TLB invalidation, both ASIDs may be used for tagging - * this substream's traffic - */ - } else { /* (1) and (2) */ - cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK); - cdptr[2] = 0; - cdptr[3] = cpu_to_le64(cd->mair); +void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid, + struct arm_smmu_cd *cdptr, + const struct arm_smmu_cd *target) +{ + bool target_valid = target->data[0] & cpu_to_le64(CTXDESC_CD_0_V); + bool cur_valid = cdptr->data[0] & cpu_to_le64(CTXDESC_CD_0_V); + struct arm_smmu_cd_writer cd_writer = { + .writer = { + .ops = &arm_smmu_cd_writer_ops, + .master = master, + }, + .ssid = ssid, + }; - /* - * STE is live, and the SMMU might read dwords of this CD in any - * order. Ensure that it observes valid values before reading - * V=1. - */ - arm_smmu_sync_cd(smmu_domain, ssid, true); + if (ssid != IOMMU_NO_PASID && cur_valid != target_valid) { + if (cur_valid) + master->cd_table.used_ssids--; + else + master->cd_table.used_ssids++; + } - val = cd->tcr | -#ifdef __BIG_ENDIAN - CTXDESC_CD_0_ENDI | -#endif - CTXDESC_CD_0_R | CTXDESC_CD_0_A | - (cd->mm ? 0 : CTXDESC_CD_0_ASET) | - CTXDESC_CD_0_AA64 | - FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) | - CTXDESC_CD_0_V; + arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data); +} - if (smmu_domain->stall_enabled) - val |= CTXDESC_CD_0_S; - } +void arm_smmu_make_s1_cd(struct arm_smmu_cd *target, + struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain) +{ + struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; + const struct io_pgtable_cfg *pgtbl_cfg = + &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg; + typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = + &pgtbl_cfg->arm_lpae_s1_cfg.tcr; - /* - * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3 - * "Configuration structures and configuration invalidation completion" - * - * The size of single-copy atomic reads made by the SMMU is - * IMPLEMENTATION DEFINED but must be at least 64 bits. Any single - * field within an aligned 64-bit span of a structure can be altered - * without first making the structure invalid. - */ - WRITE_ONCE(cdptr[0], cpu_to_le64(val)); - arm_smmu_sync_cd(smmu_domain, ssid, true); - return 0; + memset(target, 0, sizeof(*target)); + + target->data[0] = cpu_to_le64( + FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) | + FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) | + FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) | + FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) | + FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) | +#ifdef __BIG_ENDIAN + CTXDESC_CD_0_ENDI | +#endif + CTXDESC_CD_0_TCR_EPD1 | + CTXDESC_CD_0_V | + FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) | + CTXDESC_CD_0_AA64 | + (master->stall_enabled ? CTXDESC_CD_0_S : 0) | + CTXDESC_CD_0_R | + CTXDESC_CD_0_A | + CTXDESC_CD_0_ASET | + FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) + ); + target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr & + CTXDESC_CD_1_TTB0_MASK); + target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair); +} +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s1_cd); + +void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid) +{ + struct arm_smmu_cd target = {}; + struct arm_smmu_cd *cdptr; + + if (!master->cd_table.cdtab) + return; + cdptr = arm_smmu_get_cd_ptr(master, ssid); + if (WARN_ON(!cdptr)) + return; + arm_smmu_write_cd_entry(master, ssid, cdptr, &target); } -static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain) +static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master) { int ret; size_t l1size; size_t max_contexts; - struct arm_smmu_device *smmu = smmu_domain->smmu; - struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; - struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg; + struct arm_smmu_device *smmu = master->smmu; + struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; - max_contexts = 1 << cfg->s1cdmax; + cd_table->s1cdmax = master->ssid_bits; + max_contexts = 1 << cd_table->s1cdmax; if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) || max_contexts <= CTXDESC_L2_ENTRIES) { - cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR; - cdcfg->num_l1_ents = max_contexts; + cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR; + cd_table->num_l1_ents = max_contexts; l1size = max_contexts * (CTXDESC_CD_DWORDS << 3); } else { - cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2; - cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts, + cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2; + cd_table->num_l1_ents = DIV_ROUND_UP(max_contexts, CTXDESC_L2_ENTRIES); - cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents, - sizeof(*cdcfg->l1_desc), + cd_table->l1_desc = devm_kcalloc(smmu->dev, cd_table->num_l1_ents, + sizeof(*cd_table->l1_desc), GFP_KERNEL); - if (!cdcfg->l1_desc) + if (!cd_table->l1_desc) return -ENOMEM; - l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); + l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); } - cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma, + cd_table->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cd_table->cdtab_dma, GFP_KERNEL); - if (!cdcfg->cdtab) { + if (!cd_table->cdtab) { dev_warn(smmu->dev, "failed to allocate context descriptor\n"); ret = -ENOMEM; goto err_free_l1; @@ -1174,58 +1414,42 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain) return 0; err_free_l1: - if (cdcfg->l1_desc) { - devm_kfree(smmu->dev, cdcfg->l1_desc); - cdcfg->l1_desc = NULL; + if (cd_table->l1_desc) { + devm_kfree(smmu->dev, cd_table->l1_desc); + cd_table->l1_desc = NULL; } return ret; } -static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain) +static void arm_smmu_free_cd_tables(struct arm_smmu_master *master) { int i; size_t size, l1size; - struct arm_smmu_device *smmu = smmu_domain->smmu; - struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg; + struct arm_smmu_device *smmu = master->smmu; + struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; - if (cdcfg->l1_desc) { + if (cd_table->l1_desc) { size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3); - for (i = 0; i < cdcfg->num_l1_ents; i++) { - if (!cdcfg->l1_desc[i].l2ptr) + for (i = 0; i < cd_table->num_l1_ents; i++) { + if (!cd_table->l1_desc[i].l2ptr) continue; dmam_free_coherent(smmu->dev, size, - cdcfg->l1_desc[i].l2ptr, - cdcfg->l1_desc[i].l2ptr_dma); + cd_table->l1_desc[i].l2ptr, + cd_table->l1_desc[i].l2ptr_dma); } - devm_kfree(smmu->dev, cdcfg->l1_desc); - cdcfg->l1_desc = NULL; + devm_kfree(smmu->dev, cd_table->l1_desc); + cd_table->l1_desc = NULL; - l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); + l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3); } else { - l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3); + l1size = cd_table->num_l1_ents * (CTXDESC_CD_DWORDS << 3); } - dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma); - cdcfg->cdtab_dma = 0; - cdcfg->cdtab = NULL; -} - -bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd) -{ - bool free; - struct arm_smmu_ctx_desc *old_cd; - - if (!cd->asid) - return false; - - free = refcount_dec_and_test(&cd->refs); - if (free) { - old_cd = xa_erase(&arm_smmu_asid_xa, cd->asid); - WARN_ON(old_cd != cd); - } - return free; + dmam_free_coherent(smmu->dev, l1size, cd_table->cdtab, cd_table->cdtab_dma); + cd_table->cdtab_dma = 0; + cd_table->cdtab = NULL; } /* Stream table manipulation functions */ @@ -1237,181 +1461,210 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc) val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span); val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; - /* See comment in arm_smmu_write_ctx_desc() */ + /* The HW has 64 bit atomicity with stores to the L2 STE table */ WRITE_ONCE(*dst, cpu_to_le64(val)); } -static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) +struct arm_smmu_ste_writer { + struct arm_smmu_entry_writer writer; + u32 sid; +}; + +static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer) { + struct arm_smmu_ste_writer *ste_writer = + container_of(writer, struct arm_smmu_ste_writer, writer); struct arm_smmu_cmdq_ent cmd = { .opcode = CMDQ_OP_CFGI_STE, .cfgi = { - .sid = sid, + .sid = ste_writer->sid, .leaf = true, }, }; - arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); + arm_smmu_cmdq_issue_cmd_with_sync(writer->master->smmu, &cmd); } -static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, - __le64 *dst) +static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = { + .sync = arm_smmu_ste_writer_sync_entry, + .get_used = arm_smmu_get_ste_used, +}; + +static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid, + struct arm_smmu_ste *ste, + const struct arm_smmu_ste *target) { - /* - * This is hideously complicated, but we only really care about - * three cases at the moment: - * - * 1. Invalid (all zero) -> bypass/fault (init) - * 2. Bypass/fault -> translation/bypass (attach) - * 3. Translation/bypass -> bypass/fault (detach) - * - * Given that we can't update the STE atomically and the SMMU - * doesn't read the thing in a defined order, that leaves us - * with the following maintenance requirements: - * - * 1. Update Config, return (init time STEs aren't live) - * 2. Write everything apart from dword 0, sync, write dword 0, sync - * 3. Update Config, sync - */ - u64 val = le64_to_cpu(dst[0]); - bool ste_live = false; - struct arm_smmu_device *smmu = NULL; - struct arm_smmu_s1_cfg *s1_cfg = NULL; - struct arm_smmu_s2_cfg *s2_cfg = NULL; - struct arm_smmu_domain *smmu_domain = NULL; - struct arm_smmu_cmdq_ent prefetch_cmd = { - .opcode = CMDQ_OP_PREFETCH_CFG, - .prefetch = { - .sid = sid, + struct arm_smmu_device *smmu = master->smmu; + struct arm_smmu_ste_writer ste_writer = { + .writer = { + .ops = &arm_smmu_ste_writer_ops, + .master = master, }, + .sid = sid, }; - if (master) { - smmu_domain = master->domain; - smmu = master->smmu; - } + arm_smmu_write_entry(&ste_writer.writer, ste->data, target->data); - if (smmu_domain) { - switch (smmu_domain->stage) { - case ARM_SMMU_DOMAIN_S1: - s1_cfg = &smmu_domain->s1_cfg; - break; - case ARM_SMMU_DOMAIN_S2: - case ARM_SMMU_DOMAIN_NESTED: - s2_cfg = &smmu_domain->s2_cfg; - break; - default: - break; - } - } + /* It's likely that we'll want to use the new STE soon */ + if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) { + struct arm_smmu_cmdq_ent + prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG, + .prefetch = { + .sid = sid, + } }; - if (val & STRTAB_STE_0_V) { - switch (FIELD_GET(STRTAB_STE_0_CFG, val)) { - case STRTAB_STE_0_CFG_BYPASS: - break; - case STRTAB_STE_0_CFG_S1_TRANS: - case STRTAB_STE_0_CFG_S2_TRANS: - ste_live = true; - break; - case STRTAB_STE_0_CFG_ABORT: - BUG_ON(!disable_bypass); - break; - default: - BUG(); /* STE corruption */ - } + arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); } +} - /* Nuke the existing STE_0 value, as we're going to rewrite it */ - val = STRTAB_STE_0_V; - - /* Bypass/fault */ - if (!smmu_domain || !(s1_cfg || s2_cfg)) { - if (!smmu_domain && disable_bypass) - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT); - else - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); +VISIBLE_IF_KUNIT +void arm_smmu_make_abort_ste(struct arm_smmu_ste *target) +{ + memset(target, 0, sizeof(*target)); + target->data[0] = cpu_to_le64( + STRTAB_STE_0_V | + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT)); +} +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_abort_ste); - dst[0] = cpu_to_le64(val); - dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, - STRTAB_STE_1_SHCFG_INCOMING)); - dst[2] = 0; /* Nuke the VMID */ - /* - * The SMMU can perform negative caching, so we must sync - * the STE regardless of whether the old value was live. - */ - if (smmu) - arm_smmu_sync_ste_for_sid(smmu, sid); - return; - } +VISIBLE_IF_KUNIT +void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu, + struct arm_smmu_ste *target) +{ + memset(target, 0, sizeof(*target)); + target->data[0] = cpu_to_le64( + STRTAB_STE_0_V | + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS)); - if (s1_cfg) { - u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ? - STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1; + if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) + target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, + STRTAB_STE_1_SHCFG_INCOMING)); +} +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_bypass_ste); - BUG_ON(ste_live); - dst[1] = cpu_to_le64( - FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | - FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | - FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | - FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | - FIELD_PREP(STRTAB_STE_1_STRW, strw)); +VISIBLE_IF_KUNIT +void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, + struct arm_smmu_master *master, bool ats_enabled, + unsigned int s1dss) +{ + struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; + struct arm_smmu_device *smmu = master->smmu; - if (smmu->features & ARM_SMMU_FEAT_STALLS && - !master->stall_enabled) - dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); + memset(target, 0, sizeof(*target)); + target->data[0] = cpu_to_le64( + STRTAB_STE_0_V | + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | + FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) | + (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | + FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax)); + + target->data[1] = cpu_to_le64( + FIELD_PREP(STRTAB_STE_1_S1DSS, s1dss) | + FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | + FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | + FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | + ((smmu->features & ARM_SMMU_FEAT_STALLS && + !master->stall_enabled) ? + STRTAB_STE_1_S1STALLD : + 0) | + FIELD_PREP(STRTAB_STE_1_EATS, + ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); + + if ((smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) && + s1dss == STRTAB_STE_1_S1DSS_BYPASS) + target->data[1] |= cpu_to_le64(FIELD_PREP( + STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); + + if (smmu->features & ARM_SMMU_FEAT_E2H) { + /* + * To support BTM the streamworld needs to match the + * configuration of the CPU so that the ASID broadcasts are + * properly matched. This means either S/NS-EL2-E2H (hypervisor) + * or NS-EL1 (guest). Since an SVA domain can be installed in a + * PASID this should always use a BTM compatible configuration + * if the HW supports it. + */ + target->data[1] |= cpu_to_le64( + FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_EL2)); + } else { + target->data[1] |= cpu_to_le64( + FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1)); - val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | - FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | - FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) | - FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt); + /* + * VMID 0 is reserved for stage-2 bypass EL1 STEs, see + * arm_smmu_domain_alloc_id() + */ + target->data[2] = + cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0)); } +} +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_cdtable_ste); + +VISIBLE_IF_KUNIT +void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, + struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain, + bool ats_enabled) +{ + struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg; + const struct io_pgtable_cfg *pgtbl_cfg = + &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg; + typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr = + &pgtbl_cfg->arm_lpae_s2_cfg.vtcr; + u64 vtcr_val; + struct arm_smmu_device *smmu = master->smmu; - if (s2_cfg) { - BUG_ON(ste_live); - dst[2] = cpu_to_le64( - FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | - FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | + memset(target, 0, sizeof(*target)); + target->data[0] = cpu_to_le64( + STRTAB_STE_0_V | + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS)); + + target->data[1] = cpu_to_le64( + FIELD_PREP(STRTAB_STE_1_EATS, + ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); + + if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) + target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, + STRTAB_STE_1_SHCFG_INCOMING)); + + vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) | + FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps); + target->data[2] = cpu_to_le64( + FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | + FIELD_PREP(STRTAB_STE_2_VTCR, vtcr_val) | + STRTAB_STE_2_S2AA64 | #ifdef __BIG_ENDIAN - STRTAB_STE_2_S2ENDI | + STRTAB_STE_2_S2ENDI | #endif - STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 | - STRTAB_STE_2_S2R); - - dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK); - - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS); - } + STRTAB_STE_2_S2PTW | + STRTAB_STE_2_S2R); - if (master->ats_enabled) - dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS, - STRTAB_STE_1_EATS_TRANS)); - - arm_smmu_sync_ste_for_sid(smmu, sid); - /* See comment in arm_smmu_write_ctx_desc() */ - WRITE_ONCE(dst[0], cpu_to_le64(val)); - arm_smmu_sync_ste_for_sid(smmu, sid); - - /* It's likely that we'll want to use the new STE soon */ - if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) - arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); + target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr & + STRTAB_STE_3_S2TTB_MASK); } +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s2_domain_ste); -static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force) +/* + * This can safely directly manipulate the STE memory without a sync sequence + * because the STE table has not been installed in the SMMU yet. + */ +static void arm_smmu_init_initial_stes(struct arm_smmu_device *smmu, + struct arm_smmu_ste *strtab, + unsigned int nent) { unsigned int i; - u64 val = STRTAB_STE_0_V; - - if (disable_bypass && !force) - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT); - else - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); for (i = 0; i < nent; ++i) { - strtab[0] = cpu_to_le64(val); - strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, - STRTAB_STE_1_SHCFG_INCOMING)); - strtab[2] = 0; - strtab += STRTAB_STE_DWORDS; + if (disable_bypass) + arm_smmu_make_abort_ste(strtab); + else + arm_smmu_make_bypass_ste(smmu, strtab); + strtab++; } } @@ -1438,7 +1691,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) return -ENOMEM; } - arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false); + arm_smmu_init_initial_stes(smmu, desc->l2ptr, 1 << STRTAB_SPLIT); arm_smmu_write_strtab_l1_desc(strtab, desc); return 0; } @@ -1479,27 +1732,19 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) /* IRQ and event handlers */ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) { - int ret; - u32 reason; + int ret = 0; u32 perm = 0; struct arm_smmu_master *master; bool ssid_valid = evt[0] & EVTQ_0_SSV; u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]); - struct iommu_fault_event fault_evt = { }; + struct iopf_fault fault_evt = { }; struct iommu_fault *flt = &fault_evt.fault; switch (FIELD_GET(EVTQ_0_ID, evt[0])) { case EVT_ID_TRANSLATION_FAULT: - reason = IOMMU_FAULT_REASON_PTE_FETCH; - break; case EVT_ID_ADDR_SIZE_FAULT: - reason = IOMMU_FAULT_REASON_OOR_ADDRESS; - break; case EVT_ID_ACCESS_FAULT: - reason = IOMMU_FAULT_REASON_ACCESS; - break; case EVT_ID_PERMISSION_FAULT: - reason = IOMMU_FAULT_REASON_PERMISSION; break; default: return -EOPNOTSUPP; @@ -1509,6 +1754,9 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) if (evt[1] & EVTQ_1_S2) return -EFAULT; + if (!(evt[1] & EVTQ_1_STALL)) + return -EOPNOTSUPP; + if (evt[1] & EVTQ_1_RnW) perm |= IOMMU_FAULT_PERM_READ; else @@ -1520,32 +1768,17 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) if (evt[1] & EVTQ_1_PnU) perm |= IOMMU_FAULT_PERM_PRIV; - if (evt[1] & EVTQ_1_STALL) { - flt->type = IOMMU_FAULT_PAGE_REQ; - flt->prm = (struct iommu_fault_page_request) { - .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE, - .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]), - .perm = perm, - .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]), - }; - - if (ssid_valid) { - flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; - flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]); - } - } else { - flt->type = IOMMU_FAULT_DMA_UNRECOV; - flt->event = (struct iommu_fault_unrecoverable) { - .reason = reason, - .flags = IOMMU_FAULT_UNRECOV_ADDR_VALID, - .perm = perm, - .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]), - }; + flt->type = IOMMU_FAULT_PAGE_REQ; + flt->prm = (struct iommu_fault_page_request) { + .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE, + .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]), + .perm = perm, + .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]), + }; - if (ssid_valid) { - flt->event.flags |= IOMMU_FAULT_UNRECOV_PASID_VALID; - flt->event.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]); - } + if (ssid_valid) { + flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]); } mutex_lock(&smmu->streams_mutex); @@ -1556,16 +1789,6 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) } ret = iommu_report_device_fault(master->dev, &fault_evt); - if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) { - /* Nobody cared, abort the access */ - struct iommu_page_response resp = { - .pasid = flt->prm.pasid, - .grpid = flt->prm.grpid, - .code = IOMMU_PAGE_RESP_FAILURE, - }; - arm_smmu_page_response(master->dev, &fault_evt, &resp); - } - out_unlock: mutex_unlock(&smmu->streams_mutex); return ret; @@ -1800,13 +2023,14 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, cmd->atc.size = log2_span; } -static int arm_smmu_atc_inv_master(struct arm_smmu_master *master) +static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, + ioasid_t ssid) { int i; struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_batch cmds; - arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd); + arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd); cmds.num = 0; for (i = 0; i < master->num_streams; i++) { @@ -1817,13 +2041,13 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master) return arm_smmu_cmdq_batch_submit(master->smmu, &cmds); } -int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, +int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, unsigned long iova, size_t size) { + struct arm_smmu_master_domain *master_domain; int i; unsigned long flags; struct arm_smmu_cmdq_ent cmd; - struct arm_smmu_master *master; struct arm_smmu_cmdq_batch cmds; if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) @@ -1846,15 +2070,18 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, if (!atomic_read(&smmu_domain->nr_ats_masters)) return 0; - arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd); - cmds.num = 0; spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_for_each_entry(master, &smmu_domain->devices, domain_head) { + list_for_each_entry(master_domain, &smmu_domain->devices, + devices_elm) { + struct arm_smmu_master *master = master_domain->master; + if (!master->ats_enabled) continue; + arm_smmu_atc_inv_to_cmd(master_domain->ssid, iova, size, &cmd); + for (i = 0; i < master->num_streams; i++) { cmd.atc.sid = master->streams[i].id; arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); @@ -1880,13 +2107,13 @@ static void arm_smmu_tlb_inv_context(void *cookie) * careful, 007. */ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { - arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid); + arm_smmu_tlb_inv_asid(smmu, smmu_domain->cd.asid); } else { cmd.opcode = CMDQ_OP_TLBI_S12_VMALL; cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); } - arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0); + arm_smmu_atc_inv_domain(smmu_domain, 0, 0); } static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, @@ -1973,7 +2200,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA; - cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; + cmd.tlbi.asid = smmu_domain->cd.asid; } else { cmd.opcode = CMDQ_OP_TLBI_S2_IPA; cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; @@ -1984,7 +2211,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, * Unfortunately, this can't be leaf-only since we may have * zapped an entire table. */ - arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size); + arm_smmu_atc_inv_domain(smmu_domain, iova, size); } void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, @@ -2042,51 +2269,59 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap) } } -static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) +struct arm_smmu_domain *arm_smmu_domain_alloc(void) { struct arm_smmu_domain *smmu_domain; - if (type == IOMMU_DOMAIN_SVA) - return arm_smmu_sva_domain_alloc(); + smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); + if (!smmu_domain) + return ERR_PTR(-ENOMEM); - if (type != IOMMU_DOMAIN_UNMANAGED && - type != IOMMU_DOMAIN_DMA && - type != IOMMU_DOMAIN_IDENTITY) - return NULL; + mutex_init(&smmu_domain->init_mutex); + INIT_LIST_HEAD(&smmu_domain->devices); + spin_lock_init(&smmu_domain->devices_lock); + + return smmu_domain; +} + +static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) +{ + struct arm_smmu_domain *smmu_domain; /* * Allocate the domain and initialise some of its data structures. * We can't really do anything meaningful until we've added a * master. */ - smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); - if (!smmu_domain) - return NULL; + smmu_domain = arm_smmu_domain_alloc(); + if (IS_ERR(smmu_domain)) + return ERR_CAST(smmu_domain); - mutex_init(&smmu_domain->init_mutex); - INIT_LIST_HEAD(&smmu_domain->devices); - spin_lock_init(&smmu_domain->devices_lock); - INIT_LIST_HEAD(&smmu_domain->mmu_notifiers); + if (dev) { + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + int ret; + ret = arm_smmu_domain_finalise(smmu_domain, master->smmu); + if (ret) { + kfree(smmu_domain); + return ERR_PTR(ret); + } + } return &smmu_domain->domain; } -static void arm_smmu_domain_free(struct iommu_domain *domain) +static void arm_smmu_domain_free_paging(struct iommu_domain *domain) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; free_io_pgtable_ops(smmu_domain->pgtbl_ops); - /* Free the CD and ASID, if we allocated them */ + /* Free the ASID or VMID */ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { - struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; - /* Prevent SVA from touching the CD while we're freeing it */ mutex_lock(&arm_smmu_asid_lock); - if (cfg->cdcfg.cdtab) - arm_smmu_free_cd_tables(smmu_domain); - arm_smmu_free_asid(&cfg->cd); + xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); mutex_unlock(&arm_smmu_asid_lock); } else { struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; @@ -2097,73 +2332,27 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) kfree(smmu_domain); } -static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, - struct arm_smmu_master *master, - struct io_pgtable_cfg *pgtbl_cfg) +static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu, + struct arm_smmu_domain *smmu_domain) { int ret; - u32 asid; - struct arm_smmu_device *smmu = smmu_domain->smmu; - struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; - typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr; - - refcount_set(&cfg->cd.refs, 1); + u32 asid = 0; + struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; /* Prevent SVA from modifying the ASID until it is written to the CD */ mutex_lock(&arm_smmu_asid_lock); - ret = xa_alloc(&arm_smmu_asid_xa, &asid, &cfg->cd, + ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain, XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); - if (ret) - goto out_unlock; - - cfg->s1cdmax = master->ssid_bits; - - smmu_domain->stall_enabled = master->stall_enabled; - - ret = arm_smmu_alloc_cd_tables(smmu_domain); - if (ret) - goto out_free_asid; - - cfg->cd.asid = (u16)asid; - cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr; - cfg->cd.tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) | - FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) | - FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) | - FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) | - FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) | - FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) | - CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64; - cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair; - - /* - * Note that this will end up calling arm_smmu_sync_cd() before - * the master has been added to the devices list for this domain. - * This isn't an issue because the STE hasn't been installed yet. - */ - ret = arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, &cfg->cd); - if (ret) - goto out_free_cd_tables; - - mutex_unlock(&arm_smmu_asid_lock); - return 0; - -out_free_cd_tables: - arm_smmu_free_cd_tables(smmu_domain); -out_free_asid: - arm_smmu_free_asid(&cfg->cd); -out_unlock: + cd->asid = (u16)asid; mutex_unlock(&arm_smmu_asid_lock); return ret; } -static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, - struct arm_smmu_master *master, - struct io_pgtable_cfg *pgtbl_cfg) +static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu, + struct arm_smmu_domain *smmu_domain) { int vmid; - struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; - typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr; /* Reserve VMID 0 for stage-2 bypass STEs */ vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1, @@ -2171,37 +2360,20 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, if (vmid < 0) return vmid; - vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr; cfg->vmid = (u16)vmid; - cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; - cfg->vtcr = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) | - FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) | - FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) | - FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) | - FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) | - FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) | - FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps); return 0; } -static int arm_smmu_domain_finalise(struct iommu_domain *domain, - struct arm_smmu_master *master) +static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_device *smmu) { int ret; unsigned long ias, oas; enum io_pgtable_fmt fmt; struct io_pgtable_cfg pgtbl_cfg; struct io_pgtable_ops *pgtbl_ops; - int (*finalise_stage_fn)(struct arm_smmu_domain *, - struct arm_smmu_master *, - struct io_pgtable_cfg *); - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_device *smmu = smmu_domain->smmu; - - if (domain->type == IOMMU_DOMAIN_IDENTITY) { - smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; - return 0; - } + int (*finalise_stage_fn)(struct arm_smmu_device *smmu, + struct arm_smmu_domain *smmu_domain); /* Restrict the stage to what we can actually support */ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) @@ -2241,50 +2413,57 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain, if (!pgtbl_ops) return -ENOMEM; - domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; - domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; - domain->geometry.force_aperture = true; + smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; + smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; + smmu_domain->domain.geometry.force_aperture = true; - ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg); + ret = finalise_stage_fn(smmu, smmu_domain); if (ret < 0) { free_io_pgtable_ops(pgtbl_ops); return ret; } smmu_domain->pgtbl_ops = pgtbl_ops; + smmu_domain->smmu = smmu; return 0; } -static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) +static struct arm_smmu_ste * +arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) { - __le64 *step; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { - struct arm_smmu_strtab_l1_desc *l1_desc; - int idx; + unsigned int idx1, idx2; /* Two-level walk */ - idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS; - l1_desc = &cfg->l1_desc[idx]; - idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS; - step = &l1_desc->l2ptr[idx]; + idx1 = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS; + idx2 = sid & ((1 << STRTAB_SPLIT) - 1); + return &cfg->l1_desc[idx1].l2ptr[idx2]; } else { /* Simple linear lookup */ - step = &cfg->strtab[sid * STRTAB_STE_DWORDS]; + return (struct arm_smmu_ste *)&cfg + ->strtab[sid * STRTAB_STE_DWORDS]; } - - return step; } -static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master) +static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master, + const struct arm_smmu_ste *target) { int i, j; struct arm_smmu_device *smmu = master->smmu; + master->cd_table.in_ste = + FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(target->data[0])) == + STRTAB_STE_0_CFG_S1_TRANS; + master->ste_ats_enabled = + FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(target->data[1])) == + STRTAB_STE_1_EATS_TRANS; + for (i = 0; i < master->num_streams; ++i) { u32 sid = master->streams[i].id; - __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); + struct arm_smmu_ste *step = + arm_smmu_get_step_for_sid(smmu, sid); /* Bridged PCI devices may end up with duplicated IDs */ for (j = 0; j < i; j++) @@ -2293,7 +2472,7 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master) if (j < i) continue; - arm_smmu_write_strtab_ent(master, sid, step); + arm_smmu_write_ste(master, sid, step, target); } } @@ -2317,37 +2496,17 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master) size_t stu; struct pci_dev *pdev; struct arm_smmu_device *smmu = master->smmu; - struct arm_smmu_domain *smmu_domain = master->domain; - - /* Don't enable ATS at the endpoint if it's not enabled in the STE */ - if (!master->ats_enabled) - return; /* Smallest Translation Unit: log2 of the smallest supported granule */ stu = __ffs(smmu->pgsize_bitmap); pdev = to_pci_dev(master->dev); - atomic_inc(&smmu_domain->nr_ats_masters); - arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0); - if (pci_enable_ats(pdev, stu)) - dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); -} - -static void arm_smmu_disable_ats(struct arm_smmu_master *master) -{ - struct arm_smmu_domain *smmu_domain = master->domain; - - if (!master->ats_enabled) - return; - - pci_disable_ats(to_pci_dev(master->dev)); /* - * Ensure ATS is disabled at the endpoint before we issue the - * ATC invalidation via the SMMU. + * ATC invalidation of PASID 0 causes the entire ATC to be flushed. */ - wmb(); - arm_smmu_atc_inv_master(master); - atomic_dec(&smmu_domain->nr_ats_masters); + arm_smmu_atc_inv_master(master, IOMMU_NO_PASID); + if (pci_enable_ats(pdev, stu)) + dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); } static int arm_smmu_enable_pasid(struct arm_smmu_master *master) @@ -2397,99 +2556,492 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master) pci_disable_pasid(pdev); } -static void arm_smmu_detach_dev(struct arm_smmu_master *master) +static struct arm_smmu_master_domain * +arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_master *master, + ioasid_t ssid) +{ + struct arm_smmu_master_domain *master_domain; + + lockdep_assert_held(&smmu_domain->devices_lock); + + list_for_each_entry(master_domain, &smmu_domain->devices, + devices_elm) { + if (master_domain->master == master && + master_domain->ssid == ssid) + return master_domain; + } + return NULL; +} + +/* + * If the domain uses the smmu_domain->devices list return the arm_smmu_domain + * structure, otherwise NULL. These domains track attached devices so they can + * issue invalidations. + */ +static struct arm_smmu_domain * +to_smmu_domain_devices(struct iommu_domain *domain) { + /* The domain can be NULL only when processing the first attach */ + if (!domain) + return NULL; + if ((domain->type & __IOMMU_DOMAIN_PAGING) || + domain->type == IOMMU_DOMAIN_SVA) + return to_smmu_domain(domain); + return NULL; +} + +static void arm_smmu_remove_master_domain(struct arm_smmu_master *master, + struct iommu_domain *domain, + ioasid_t ssid) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain_devices(domain); + struct arm_smmu_master_domain *master_domain; unsigned long flags; - struct arm_smmu_domain *smmu_domain = master->domain; if (!smmu_domain) return; - arm_smmu_disable_ats(master); - spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_del(&master->domain_head); + master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid); + if (master_domain) { + list_del(&master_domain->devices_elm); + kfree(master_domain); + if (master->ats_enabled) + atomic_dec(&smmu_domain->nr_ats_masters); + } spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); +} + +struct arm_smmu_attach_state { + /* Inputs */ + struct iommu_domain *old_domain; + struct arm_smmu_master *master; + bool cd_needs_ats; + ioasid_t ssid; + /* Resulting state */ + bool ats_enabled; +}; + +/* + * Start the sequence to attach a domain to a master. The sequence contains three + * steps: + * arm_smmu_attach_prepare() + * arm_smmu_install_ste_for_dev() + * arm_smmu_attach_commit() + * + * If prepare succeeds then the sequence must be completed. The STE installed + * must set the STE.EATS field according to state.ats_enabled. + * + * If the device supports ATS then this determines if EATS should be enabled + * in the STE, and starts sequencing EATS disable if required. + * + * The change of the EATS in the STE and the PCI ATS config space is managed by + * this sequence to be in the right order so that if PCI ATS is enabled then + * STE.ETAS is enabled. + * + * new_domain can be a non-paging domain. In this case ATS will not be enabled, + * and invalidations won't be tracked. + */ +static int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, + struct iommu_domain *new_domain) +{ + struct arm_smmu_master *master = state->master; + struct arm_smmu_master_domain *master_domain; + struct arm_smmu_domain *smmu_domain = + to_smmu_domain_devices(new_domain); + unsigned long flags; + + /* + * arm_smmu_share_asid() must not see two domains pointing to the same + * arm_smmu_master_domain contents otherwise it could randomly write one + * or the other to the CD. + */ + lockdep_assert_held(&arm_smmu_asid_lock); + + if (smmu_domain || state->cd_needs_ats) { + /* + * The SMMU does not support enabling ATS with bypass/abort. + * When the STE is in bypass (STE.Config[2:0] == 0b100), ATS + * Translation Requests and Translated transactions are denied + * as though ATS is disabled for the stream (STE.EATS == 0b00), + * causing F_BAD_ATS_TREQ and F_TRANSL_FORBIDDEN events + * (IHI0070Ea 5.2 Stream Table Entry). Thus ATS can only be + * enabled if we have arm_smmu_domain, those always have page + * tables. + */ + state->ats_enabled = arm_smmu_ats_supported(master); + } + + if (smmu_domain) { + master_domain = kzalloc(sizeof(*master_domain), GFP_KERNEL); + if (!master_domain) + return -ENOMEM; + master_domain->master = master; + master_domain->ssid = state->ssid; + + /* + * During prepare we want the current smmu_domain and new + * smmu_domain to be in the devices list before we change any + * HW. This ensures that both domains will send ATS + * invalidations to the master until we are done. + * + * It is tempting to make this list only track masters that are + * using ATS, but arm_smmu_share_asid() also uses this to change + * the ASID of a domain, unrelated to ATS. + * + * Notice if we are re-attaching the same domain then the list + * will have two identical entries and commit will remove only + * one of them. + */ + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + if (state->ats_enabled) + atomic_inc(&smmu_domain->nr_ats_masters); + list_add(&master_domain->devices_elm, &smmu_domain->devices); + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); + } - master->domain = NULL; - master->ats_enabled = false; - arm_smmu_install_ste_for_dev(master); + if (!state->ats_enabled && master->ats_enabled) { + pci_disable_ats(to_pci_dev(master->dev)); + /* + * This is probably overkill, but the config write for disabling + * ATS should complete before the STE is configured to generate + * UR to avoid AER noise. + */ + wmb(); + } + return 0; +} + +/* + * Commit is done after the STE/CD are configured with the EATS setting. It + * completes synchronizing the PCI device's ATC and finishes manipulating the + * smmu_domain->devices list. + */ +static void arm_smmu_attach_commit(struct arm_smmu_attach_state *state) +{ + struct arm_smmu_master *master = state->master; + + lockdep_assert_held(&arm_smmu_asid_lock); + + if (state->ats_enabled && !master->ats_enabled) { + arm_smmu_enable_ats(master); + } else if (state->ats_enabled && master->ats_enabled) { + /* + * The translation has changed, flush the ATC. At this point the + * SMMU is translating for the new domain and both the old&new + * domain will issue invalidations. + */ + arm_smmu_atc_inv_master(master, state->ssid); + } else if (!state->ats_enabled && master->ats_enabled) { + /* ATS is being switched off, invalidate the entire ATC */ + arm_smmu_atc_inv_master(master, IOMMU_NO_PASID); + } + master->ats_enabled = state->ats_enabled; + + arm_smmu_remove_master_domain(master, state->old_domain, state->ssid); } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { int ret = 0; - unsigned long flags; + struct arm_smmu_ste target; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct arm_smmu_device *smmu; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_attach_state state = { + .old_domain = iommu_get_domain_for_dev(dev), + .ssid = IOMMU_NO_PASID, + }; struct arm_smmu_master *master; + struct arm_smmu_cd *cdptr; if (!fwspec) return -ENOENT; - master = dev_iommu_priv_get(dev); + state.master = master = dev_iommu_priv_get(dev); smmu = master->smmu; + mutex_lock(&smmu_domain->init_mutex); + + if (!smmu_domain->smmu) { + ret = arm_smmu_domain_finalise(smmu_domain, smmu); + } else if (smmu_domain->smmu != smmu) + ret = -EINVAL; + + mutex_unlock(&smmu_domain->init_mutex); + if (ret) + return ret; + + if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { + cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID); + if (!cdptr) + return -ENOMEM; + } else if (arm_smmu_ssids_in_use(&master->cd_table)) + return -EBUSY; + /* - * Checking that SVA is disabled ensures that this device isn't bound to - * any mm, and can be safely detached from its old domain. Bonds cannot - * be removed concurrently since we're holding the group mutex. + * Prevent arm_smmu_share_asid() from trying to change the ASID + * of either the old or new domain while we are working on it. + * This allows the STE and the smmu_domain->devices list to + * be inconsistent during this routine. */ - if (arm_smmu_master_sva_enabled(master)) { - dev_err(dev, "cannot attach - SVA enabled\n"); - return -EBUSY; + mutex_lock(&arm_smmu_asid_lock); + + ret = arm_smmu_attach_prepare(&state, domain); + if (ret) { + mutex_unlock(&arm_smmu_asid_lock); + return ret; } - arm_smmu_detach_dev(master); + switch (smmu_domain->stage) { + case ARM_SMMU_DOMAIN_S1: { + struct arm_smmu_cd target_cd; + + arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); + arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr, + &target_cd); + arm_smmu_make_cdtable_ste(&target, master, state.ats_enabled, + STRTAB_STE_1_S1DSS_SSID0); + arm_smmu_install_ste_for_dev(master, &target); + break; + } + case ARM_SMMU_DOMAIN_S2: + arm_smmu_make_s2_domain_ste(&target, master, smmu_domain, + state.ats_enabled); + arm_smmu_install_ste_for_dev(master, &target); + arm_smmu_clear_cd(master, IOMMU_NO_PASID); + break; + } - mutex_lock(&smmu_domain->init_mutex); + arm_smmu_attach_commit(&state); + mutex_unlock(&arm_smmu_asid_lock); + return 0; +} - if (!smmu_domain->smmu) { - smmu_domain->smmu = smmu; - ret = arm_smmu_domain_finalise(domain, master); - if (ret) { - smmu_domain->smmu = NULL; - goto out_unlock; - } - } else if (smmu_domain->smmu != smmu) { - ret = -EINVAL; - goto out_unlock; - } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && - master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) { - ret = -EINVAL; - goto out_unlock; - } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && - smmu_domain->stall_enabled != master->stall_enabled) { +static int arm_smmu_s1_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t id) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_device *smmu = master->smmu; + struct arm_smmu_cd target_cd; + int ret = 0; + + mutex_lock(&smmu_domain->init_mutex); + if (!smmu_domain->smmu) + ret = arm_smmu_domain_finalise(smmu_domain, smmu); + else if (smmu_domain->smmu != smmu) ret = -EINVAL; - goto out_unlock; - } + mutex_unlock(&smmu_domain->init_mutex); + if (ret) + return ret; - master->domain = smmu_domain; + if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) + return -EINVAL; /* - * The SMMU does not support enabling ATS with bypass. When the STE is - * in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests and - * Translated transactions are denied as though ATS is disabled for the - * stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and - * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry). + * We can read cd.asid outside the lock because arm_smmu_set_pasid() + * will fix it */ - if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS) - master->ats_enabled = arm_smmu_ats_supported(master); + arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); + return arm_smmu_set_pasid(master, to_smmu_domain(domain), id, + &target_cd); +} - arm_smmu_install_ste_for_dev(master); +static void arm_smmu_update_ste(struct arm_smmu_master *master, + struct iommu_domain *sid_domain, + bool ats_enabled) +{ + unsigned int s1dss = STRTAB_STE_1_S1DSS_TERMINATE; + struct arm_smmu_ste ste; - spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_add(&master->domain_head, &smmu_domain->devices); - spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); + if (master->cd_table.in_ste && master->ste_ats_enabled == ats_enabled) + return; - arm_smmu_enable_ats(master); + if (sid_domain->type == IOMMU_DOMAIN_IDENTITY) + s1dss = STRTAB_STE_1_S1DSS_BYPASS; + else + WARN_ON(sid_domain->type != IOMMU_DOMAIN_BLOCKED); + + /* + * Change the STE into a cdtable one with SID IDENTITY/BLOCKED behavior + * using s1dss if necessary. If the cd_table is already installed then + * the S1DSS is correct and this will just update the EATS. Otherwise it + * installs the entire thing. This will be hitless. + */ + arm_smmu_make_cdtable_ste(&ste, master, ats_enabled, s1dss); + arm_smmu_install_ste_for_dev(master, &ste); +} + +int arm_smmu_set_pasid(struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain, ioasid_t pasid, + struct arm_smmu_cd *cd) +{ + struct iommu_domain *sid_domain = iommu_get_domain_for_dev(master->dev); + struct arm_smmu_attach_state state = { + .master = master, + /* + * For now the core code prevents calling this when a domain is + * already attached, no need to set old_domain. + */ + .ssid = pasid, + }; + struct arm_smmu_cd *cdptr; + int ret; + + /* The core code validates pasid */ + + if (smmu_domain->smmu != master->smmu) + return -EINVAL; + + if (!master->cd_table.in_ste && + sid_domain->type != IOMMU_DOMAIN_IDENTITY && + sid_domain->type != IOMMU_DOMAIN_BLOCKED) + return -EINVAL; + + cdptr = arm_smmu_alloc_cd_ptr(master, pasid); + if (!cdptr) + return -ENOMEM; + + mutex_lock(&arm_smmu_asid_lock); + ret = arm_smmu_attach_prepare(&state, &smmu_domain->domain); + if (ret) + goto out_unlock; + + /* + * We don't want to obtain to the asid_lock too early, so fix up the + * caller set ASID under the lock in case it changed. + */ + cd->data[0] &= ~cpu_to_le64(CTXDESC_CD_0_ASID); + cd->data[0] |= cpu_to_le64( + FIELD_PREP(CTXDESC_CD_0_ASID, smmu_domain->cd.asid)); + + arm_smmu_write_cd_entry(master, pasid, cdptr, cd); + arm_smmu_update_ste(master, sid_domain, state.ats_enabled); + + arm_smmu_attach_commit(&state); out_unlock: - mutex_unlock(&smmu_domain->init_mutex); + mutex_unlock(&arm_smmu_asid_lock); return ret; } +static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain) +{ + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_domain *smmu_domain; + + smmu_domain = to_smmu_domain(domain); + + mutex_lock(&arm_smmu_asid_lock); + arm_smmu_clear_cd(master, pasid); + if (master->ats_enabled) + arm_smmu_atc_inv_master(master, pasid); + arm_smmu_remove_master_domain(master, &smmu_domain->domain, pasid); + mutex_unlock(&arm_smmu_asid_lock); + + /* + * When the last user of the CD table goes away downgrade the STE back + * to a non-cd_table one. + */ + if (!arm_smmu_ssids_in_use(&master->cd_table)) { + struct iommu_domain *sid_domain = + iommu_get_domain_for_dev(master->dev); + + if (sid_domain->type == IOMMU_DOMAIN_IDENTITY || + sid_domain->type == IOMMU_DOMAIN_BLOCKED) + sid_domain->ops->attach_dev(sid_domain, dev); + } +} + +static void arm_smmu_attach_dev_ste(struct iommu_domain *domain, + struct device *dev, + struct arm_smmu_ste *ste, + unsigned int s1dss) +{ + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_attach_state state = { + .master = master, + .old_domain = iommu_get_domain_for_dev(dev), + .ssid = IOMMU_NO_PASID, + }; + + /* + * Do not allow any ASID to be changed while are working on the STE, + * otherwise we could miss invalidations. + */ + mutex_lock(&arm_smmu_asid_lock); + + /* + * If the CD table is not in use we can use the provided STE, otherwise + * we use a cdtable STE with the provided S1DSS. + */ + if (arm_smmu_ssids_in_use(&master->cd_table)) { + /* + * If a CD table has to be present then we need to run with ATS + * on even though the RID will fail ATS queries with UR. This is + * because we have no idea what the PASID's need. + */ + state.cd_needs_ats = true; + arm_smmu_attach_prepare(&state, domain); + arm_smmu_make_cdtable_ste(ste, master, state.ats_enabled, s1dss); + } else { + arm_smmu_attach_prepare(&state, domain); + } + arm_smmu_install_ste_for_dev(master, ste); + arm_smmu_attach_commit(&state); + mutex_unlock(&arm_smmu_asid_lock); + + /* + * This has to be done after removing the master from the + * arm_smmu_domain->devices to avoid races updating the same context + * descriptor from arm_smmu_share_asid(). + */ + arm_smmu_clear_cd(master, IOMMU_NO_PASID); +} + +static int arm_smmu_attach_dev_identity(struct iommu_domain *domain, + struct device *dev) +{ + struct arm_smmu_ste ste; + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + + arm_smmu_make_bypass_ste(master->smmu, &ste); + arm_smmu_attach_dev_ste(domain, dev, &ste, STRTAB_STE_1_S1DSS_BYPASS); + return 0; +} + +static const struct iommu_domain_ops arm_smmu_identity_ops = { + .attach_dev = arm_smmu_attach_dev_identity, +}; + +static struct iommu_domain arm_smmu_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &arm_smmu_identity_ops, +}; + +static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain, + struct device *dev) +{ + struct arm_smmu_ste ste; + + arm_smmu_make_abort_ste(&ste); + arm_smmu_attach_dev_ste(domain, dev, &ste, + STRTAB_STE_1_S1DSS_TERMINATE); + return 0; +} + +static const struct iommu_domain_ops arm_smmu_blocked_ops = { + .attach_dev = arm_smmu_attach_dev_blocked, +}; + +static struct iommu_domain arm_smmu_blocked_domain = { + .type = IOMMU_DOMAIN_BLOCKED, + .ops = &arm_smmu_blocked_ops, +}; + static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) @@ -2663,9 +3215,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) struct arm_smmu_master *master; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - if (!fwspec || fwspec->ops != &arm_smmu_ops) - return ERR_PTR(-ENODEV); - if (WARN_ON_ONCE(dev_iommu_priv_get(dev))) return ERR_PTR(-EBUSY); @@ -2679,7 +3228,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) master->dev = dev; master->smmu = smmu; - INIT_LIST_HEAD(&master->bonds); dev_iommu_priv_set(dev, master); ret = arm_smmu_insert_master(smmu, master); @@ -2722,9 +3270,17 @@ static void arm_smmu_release_device(struct device *dev) if (WARN_ON(arm_smmu_master_sva_enabled(master))) iopf_queue_remove_device(master->smmu->evtq.iopf, dev); - arm_smmu_detach_dev(master); + + /* Put the STE back to what arm_smmu_init_strtab() sets */ + if (disable_bypass && !dev->iommu->require_direct) + arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev); + else + arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev); + arm_smmu_disable_pasid(master); arm_smmu_remove_master(master); + if (master->cd_table.cdtab) + arm_smmu_free_cd_tables(master); kfree(master); } @@ -2853,20 +3409,12 @@ static int arm_smmu_def_domain_type(struct device *dev) return 0; } -static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid) -{ - struct iommu_domain *domain; - - domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA); - if (WARN_ON(IS_ERR(domain)) || !domain) - return; - - arm_smmu_sva_remove_dev_pasid(domain, dev, pasid); -} - static struct iommu_ops arm_smmu_ops = { + .identity_domain = &arm_smmu_identity_domain, + .blocked_domain = &arm_smmu_blocked_domain, .capable = arm_smmu_capable, - .domain_alloc = arm_smmu_domain_alloc, + .domain_alloc_paging = arm_smmu_domain_alloc_paging, + .domain_alloc_sva = arm_smmu_sva_domain_alloc, .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .device_group = arm_smmu_device_group, @@ -2881,13 +3429,14 @@ static struct iommu_ops arm_smmu_ops = { .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = arm_smmu_attach_dev, + .set_dev_pasid = arm_smmu_s1_set_dev_pasid, .map_pages = arm_smmu_map_pages, .unmap_pages = arm_smmu_unmap_pages, .flush_iotlb_all = arm_smmu_flush_iotlb_all, .iotlb_sync = arm_smmu_iotlb_sync, .iova_to_phys = arm_smmu_iova_to_phys, .enable_nesting = arm_smmu_enable_nesting, - .free = arm_smmu_domain_free, + .free = arm_smmu_domain_free_paging, } }; @@ -3070,7 +3619,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); cfg->strtab_base_cfg = reg; - arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false); + arm_smmu_init_initial_stes(smmu, strtab, cfg->num_l1_ents); return 0; } @@ -3582,6 +4131,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) return -ENXIO; } + if (reg & IDR1_ATTR_TYPES_OVR) + smmu->features |= ARM_SMMU_FEAT_ATTR_TYPES_OVR; + /* Queue sizes, capped to ensure natural alignment */ smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, reg)); @@ -3781,7 +4333,6 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); list_for_each_entry(e, &rmr_list, list) { - __le64 *step; struct iommu_iort_rmr_data *rmr; int ret, i; @@ -3794,8 +4345,12 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) continue; } - step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]); - arm_smmu_init_bypass_stes(step, 1, true); + /* + * STE table is not programmed to HW, see + * arm_smmu_initial_bypass_stes() + */ + arm_smmu_make_bypass_ste(smmu, + arm_smmu_get_step_for_sid(smmu, rmr->sids[i])); } } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 9915850dd4db..739afc7d8fc7 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -44,6 +44,7 @@ #define IDR1_TABLES_PRESET (1 << 30) #define IDR1_QUEUES_PRESET (1 << 29) #define IDR1_REL (1 << 28) +#define IDR1_ATTR_TYPES_OVR (1 << 27) #define IDR1_CMDQS GENMASK(25, 21) #define IDR1_EVTQS GENMASK(20, 16) #define IDR1_PRIQS GENMASK(15, 11) @@ -206,6 +207,11 @@ #define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6) #define STRTAB_STE_DWORDS 8 + +struct arm_smmu_ste { + __le64 data[STRTAB_STE_DWORDS]; +}; + #define STRTAB_STE_0_V (1UL << 0) #define STRTAB_STE_0_CFG GENMASK_ULL(3, 1) #define STRTAB_STE_0_CFG_ABORT 0 @@ -269,14 +275,18 @@ * 2lvl: at most 1024 L1 entries, * 1024 lazy entries per table. */ -#define CTXDESC_SPLIT 10 -#define CTXDESC_L2_ENTRIES (1 << CTXDESC_SPLIT) +#define CTXDESC_L2_ENTRIES 1024 #define CTXDESC_L1_DESC_DWORDS 1 #define CTXDESC_L1_DESC_V (1UL << 0) #define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12) #define CTXDESC_CD_DWORDS 8 + +struct arm_smmu_cd { + __le64 data[CTXDESC_CD_DWORDS]; +}; + #define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0) #define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6) #define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8) @@ -571,22 +581,16 @@ struct arm_smmu_priq { struct arm_smmu_strtab_l1_desc { u8 span; - __le64 *l2ptr; + struct arm_smmu_ste *l2ptr; dma_addr_t l2ptr_dma; }; struct arm_smmu_ctx_desc { u16 asid; - u64 ttbr; - u64 tcr; - u64 mair; - - refcount_t refs; - struct mm_struct *mm; }; struct arm_smmu_l1_ctx_desc { - __le64 *l2ptr; + struct arm_smmu_cd *l2ptr; dma_addr_t l2ptr_dma; }; @@ -595,19 +599,21 @@ struct arm_smmu_ctx_desc_cfg { dma_addr_t cdtab_dma; struct arm_smmu_l1_ctx_desc *l1_desc; unsigned int num_l1_ents; -}; - -struct arm_smmu_s1_cfg { - struct arm_smmu_ctx_desc_cfg cdcfg; - struct arm_smmu_ctx_desc cd; + unsigned int used_ssids; + u8 in_ste; u8 s1fmt; + /* log2 of the maximum number of CDs supported by this table */ u8 s1cdmax; }; +/* True if the cd table has SSIDS > 0 in use. */ +static inline bool arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg *cd_table) +{ + return cd_table->used_ssids; +} + struct arm_smmu_s2_cfg { u16 vmid; - u64 vttbr; - u64 vtcr; }; struct arm_smmu_strtab_cfg { @@ -646,6 +652,7 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_SVA (1 << 17) #define ARM_SMMU_FEAT_E2H (1 << 18) #define ARM_SMMU_FEAT_NESTING (1 << 19) +#define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20) u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) @@ -694,15 +701,15 @@ struct arm_smmu_stream { struct arm_smmu_master { struct arm_smmu_device *smmu; struct device *dev; - struct arm_smmu_domain *domain; - struct list_head domain_head; struct arm_smmu_stream *streams; + /* Locked by the iommu core using the group mutex */ + struct arm_smmu_ctx_desc_cfg cd_table; unsigned int num_streams; - bool ats_enabled; + bool ats_enabled : 1; + bool ste_ats_enabled : 1; bool stall_enabled; bool sva_enabled; bool iopf_enabled; - struct list_head bonds; unsigned int ssid_bits; }; @@ -711,7 +718,6 @@ enum arm_smmu_domain_stage { ARM_SMMU_DOMAIN_S1 = 0, ARM_SMMU_DOMAIN_S2, ARM_SMMU_DOMAIN_NESTED, - ARM_SMMU_DOMAIN_BYPASS, }; struct arm_smmu_domain { @@ -719,21 +725,59 @@ struct arm_smmu_domain { struct mutex init_mutex; /* Protects smmu pointer */ struct io_pgtable_ops *pgtbl_ops; - bool stall_enabled; atomic_t nr_ats_masters; enum arm_smmu_domain_stage stage; union { - struct arm_smmu_s1_cfg s1_cfg; - struct arm_smmu_s2_cfg s2_cfg; + struct arm_smmu_ctx_desc cd; + struct arm_smmu_s2_cfg s2_cfg; }; struct iommu_domain domain; + /* List of struct arm_smmu_master_domain */ struct list_head devices; spinlock_t devices_lock; - struct list_head mmu_notifiers; + struct mmu_notifier mmu_notifier; +}; + +/* The following are exposed for testing purposes. */ +struct arm_smmu_entry_writer_ops; +struct arm_smmu_entry_writer { + const struct arm_smmu_entry_writer_ops *ops; + struct arm_smmu_master *master; +}; + +struct arm_smmu_entry_writer_ops { + void (*get_used)(const __le64 *entry, __le64 *used); + void (*sync)(struct arm_smmu_entry_writer *writer); +}; + +#if IS_ENABLED(CONFIG_KUNIT) +void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits); +void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur, + const __le64 *target); +void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits); +void arm_smmu_make_abort_ste(struct arm_smmu_ste *target); +void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu, + struct arm_smmu_ste *target); +void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, + struct arm_smmu_master *master, bool ats_enabled, + unsigned int s1dss); +void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, + struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain, + bool ats_enabled); +void arm_smmu_make_sva_cd(struct arm_smmu_cd *target, + struct arm_smmu_master *master, struct mm_struct *mm, + u16 asid); +#endif + +struct arm_smmu_master_domain { + struct list_head devices_elm; + struct arm_smmu_master *master; + ioasid_t ssid; }; static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) @@ -743,16 +787,28 @@ static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) extern struct xarray arm_smmu_asid_xa; extern struct mutex arm_smmu_asid_lock; -extern struct arm_smmu_ctx_desc quiet_cd; -int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid, - struct arm_smmu_ctx_desc *cd); +struct arm_smmu_domain *arm_smmu_domain_alloc(void); + +void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid); +struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, + u32 ssid); +void arm_smmu_make_s1_cd(struct arm_smmu_cd *target, + struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain); +void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid, + struct arm_smmu_cd *cdptr, + const struct arm_smmu_cd *target); + +int arm_smmu_set_pasid(struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain, ioasid_t pasid, + struct arm_smmu_cd *cd); + void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid); void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, size_t granule, bool leaf, struct arm_smmu_domain *smmu_domain); -bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd); -int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, +int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, unsigned long iova, size_t size); #ifdef CONFIG_ARM_SMMU_V3_SVA @@ -763,9 +819,8 @@ int arm_smmu_master_enable_sva(struct arm_smmu_master *master); int arm_smmu_master_disable_sva(struct arm_smmu_master *master); bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master); void arm_smmu_sva_notifier_synchronize(void); -struct iommu_domain *arm_smmu_sva_domain_alloc(void); -void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t id); +struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev, + struct mm_struct *mm); #else /* CONFIG_ARM_SMMU_V3_SVA */ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) { @@ -799,10 +854,7 @@ static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master static inline void arm_smmu_sva_notifier_synchronize(void) {} -static inline struct iommu_domain *arm_smmu_sva_domain_alloc(void) -{ - return NULL; -} +#define arm_smmu_sva_domain_alloc NULL static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, struct device *dev, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 42c5012ba8aa..4d09c0047892 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1116,11 +1116,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) struct arm_smmu_device *smmu; int ret; - if (!fwspec || fwspec->ops != &arm_smmu_ops) { - dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); - return -ENXIO; - } - /* * FIXME: The arch/arm DMA API code tries to attach devices to its own * domains between of_xlate() and probe_device() - we have no way to cope @@ -1357,21 +1352,8 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) fwspec = dev_iommu_fwspec_get(dev); if (ret) goto out_free; - } else if (fwspec && fwspec->ops == &arm_smmu_ops) { - smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); - - /* - * Defer probe if the relevant SMMU instance hasn't finished - * probing yet. This is a fragile hack and we'd ideally - * avoid this race in the core code. Until that's ironed - * out, however, this is the most pragmatic option on the - * table. - */ - if (!smmu) - return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER, - "smmu dev has not bound yet\n")); } else { - return ERR_PTR(-ENODEV); + smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); } ret = -EINVAL; @@ -2172,7 +2154,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev) return err; } - err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); + err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, + using_legacy_binding ? NULL : dev); if (err) { dev_err(dev, "Failed to register iommu\n"); iommu_device_sysfs_remove(&smmu->iommu); diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index 775a3cbaff4e..33f3c870086c 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -79,16 +79,6 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) static const struct iommu_ops qcom_iommu_ops; -static struct qcom_iommu_dev * to_iommu(struct device *dev) -{ - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - - if (!fwspec || fwspec->ops != &qcom_iommu_ops) - return NULL; - - return dev_iommu_priv_get(dev); -} - static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid) { struct qcom_iommu_dev *qcom_iommu = d->iommu; @@ -332,12 +322,10 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, return ret; } -static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type) +static struct iommu_domain *qcom_iommu_domain_alloc_paging(struct device *dev) { struct qcom_iommu_domain *qcom_domain; - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) - return NULL; /* * Allocate the domain and initialise some of its data structures. * We can't really do anything meaningful until we've added a @@ -374,7 +362,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain) static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); + struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); int ret; @@ -400,6 +388,44 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev return 0; } +static int qcom_iommu_identity_attach(struct iommu_domain *identity_domain, + struct device *dev) +{ + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + struct qcom_iommu_domain *qcom_domain; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev); + unsigned int i; + + if (domain == identity_domain || !domain) + return 0; + + qcom_domain = to_qcom_iommu_domain(domain); + if (WARN_ON(!qcom_domain->iommu)) + return -EINVAL; + + pm_runtime_get_sync(qcom_iommu->dev); + for (i = 0; i < fwspec->num_ids; i++) { + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); + + /* Disable the context bank: */ + iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); + + ctx->domain = NULL; + } + pm_runtime_put_sync(qcom_iommu->dev); + return 0; +} + +static struct iommu_domain_ops qcom_iommu_identity_ops = { + .attach_dev = qcom_iommu_identity_attach, +}; + +static struct iommu_domain qcom_iommu_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &qcom_iommu_identity_ops, +}; + static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) @@ -499,7 +525,7 @@ static bool qcom_iommu_capable(struct device *dev, enum iommu_cap cap) static struct iommu_device *qcom_iommu_probe_device(struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); + struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev); struct device_link *link; if (!qcom_iommu) @@ -565,8 +591,9 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) } static const struct iommu_ops qcom_iommu_ops = { + .identity_domain = &qcom_iommu_identity_domain, .capable = qcom_iommu_capable, - .domain_alloc = qcom_iommu_domain_alloc, + .domain_alloc_paging = qcom_iommu_domain_alloc_paging, .probe_device = qcom_iommu_probe_device, .device_group = generic_device_group, .of_xlate = qcom_iommu_of_xlate, diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index c275fe71c4db..10c13ac413e3 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -24,6 +24,7 @@ typedef u32 sysmmu_iova_t; typedef u32 sysmmu_pte_t; +static struct iommu_domain exynos_identity_domain; /* We do not consider super section mapping (16MB) */ #define SECT_ORDER 20 @@ -886,7 +887,7 @@ static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) DMA_TO_DEVICE); } -static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) +static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev) { struct exynos_iommu_domain *domain; dma_addr_t handle; @@ -895,9 +896,6 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) /* Check if correct PTE offsets are initialized */ BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev); - if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; @@ -980,17 +978,20 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) kfree(domain); } -static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, - struct device *dev) +static int exynos_iommu_identity_attach(struct iommu_domain *identity_domain, + struct device *dev) { - struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); - phys_addr_t pagetable = virt_to_phys(domain->pgtable); + struct exynos_iommu_domain *domain; + phys_addr_t pagetable; struct sysmmu_drvdata *data, *next; unsigned long flags; - if (!has_sysmmu(dev) || owner->domain != iommu_domain) - return; + if (owner->domain == identity_domain) + return 0; + + domain = to_exynos_domain(owner->domain); + pagetable = virt_to_phys(domain->pgtable); mutex_lock(&owner->rpm_lock); @@ -1009,15 +1010,25 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, list_del_init(&data->domain_node); spin_unlock(&data->lock); } - owner->domain = NULL; + owner->domain = identity_domain; spin_unlock_irqrestore(&domain->lock, flags); mutex_unlock(&owner->rpm_lock); - dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__, - &pagetable); + dev_dbg(dev, "%s: Restored IOMMU to IDENTITY from pgtable %pa\n", + __func__, &pagetable); + return 0; } +static struct iommu_domain_ops exynos_identity_ops = { + .attach_dev = exynos_iommu_identity_attach, +}; + +static struct iommu_domain exynos_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &exynos_identity_ops, +}; + static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, struct device *dev) { @@ -1026,12 +1037,11 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, struct sysmmu_drvdata *data; phys_addr_t pagetable = virt_to_phys(domain->pgtable); unsigned long flags; + int err; - if (!has_sysmmu(dev)) - return -ENODEV; - - if (owner->domain) - exynos_iommu_detach_device(owner->domain, dev); + err = exynos_iommu_identity_attach(&exynos_identity_domain, dev); + if (err) + return err; mutex_lock(&owner->rpm_lock); @@ -1407,26 +1417,12 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev) return &data->iommu; } -static void exynos_iommu_set_platform_dma(struct device *dev) -{ - struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); - - if (owner->domain) { - struct iommu_group *group = iommu_group_get(dev); - - if (group) { - exynos_iommu_detach_device(owner->domain, dev); - iommu_group_put(group); - } - } -} - static void exynos_iommu_release_device(struct device *dev) { struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); struct sysmmu_drvdata *data; - exynos_iommu_set_platform_dma(dev); + WARN_ON(exynos_iommu_identity_attach(&exynos_identity_domain, dev)); list_for_each_entry(data, &owner->controllers, owner_node) device_link_del(data->link); @@ -1457,6 +1453,7 @@ static int exynos_iommu_of_xlate(struct device *dev, INIT_LIST_HEAD(&owner->controllers); mutex_init(&owner->rpm_lock); + owner->domain = &exynos_identity_domain; dev_iommu_priv_set(dev, owner); } @@ -1471,11 +1468,9 @@ static int exynos_iommu_of_xlate(struct device *dev, } static const struct iommu_ops exynos_iommu_ops = { - .domain_alloc = exynos_iommu_domain_alloc, + .identity_domain = &exynos_identity_domain, + .domain_alloc_paging = exynos_iommu_domain_alloc_paging, .device_group = generic_device_group, -#ifdef CONFIG_ARM - .set_platform_dma_ops = exynos_iommu_set_platform_dma, -#endif .probe_device = exynos_iommu_probe_device, .release_device = exynos_iommu_release_device, .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 4ac0e247ec2b..e9d2bff4659b 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -196,6 +196,13 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) { struct fsl_dma_domain *dma_domain; + /* + * FIXME: This isn't creating an unmanaged domain since the + * default_domain_ops do not have any map/unmap function it doesn't meet + * the requirements for __IOMMU_DOMAIN_PAGING. The only purpose seems to + * allow drivers/soc/fsl/qbman/qman_portal.c to do + * fsl_pamu_configure_l1_stash() + */ if (type != IOMMU_DOMAIN_UNMANAGED) return NULL; @@ -283,15 +290,33 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, return ret; } -static void fsl_pamu_set_platform_dma(struct device *dev) +/* + * FIXME: fsl/pamu is completely broken in terms of how it works with the iommu + * API. Immediately after probe the HW is left in an IDENTITY translation and + * the driver provides a non-working UNMANAGED domain that it can switch over + * to. However it cannot switch back to an IDENTITY translation, instead it + * switches to what looks like BLOCKING. + */ +static int fsl_pamu_platform_attach(struct iommu_domain *platform_domain, + struct device *dev) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); + struct fsl_dma_domain *dma_domain; const u32 *prop; int len; struct pci_dev *pdev = NULL; struct pci_controller *pci_ctl; + /* + * Hack to keep things working as they always have, only leaving an + * UNMANAGED domain makes it BLOCKING. + */ + if (domain == platform_domain || !domain || + domain->type != IOMMU_DOMAIN_UNMANAGED) + return 0; + + dma_domain = to_fsl_dma_domain(domain); + /* * Use LIODN of the PCI controller while detaching a * PCI device. @@ -312,8 +337,18 @@ static void fsl_pamu_set_platform_dma(struct device *dev) detach_device(dev, dma_domain); else pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); + return 0; } +static struct iommu_domain_ops fsl_pamu_platform_ops = { + .attach_dev = fsl_pamu_platform_attach, +}; + +static struct iommu_domain fsl_pamu_platform_domain = { + .type = IOMMU_DOMAIN_PLATFORM, + .ops = &fsl_pamu_platform_ops, +}; + /* Set the domain stash attribute */ int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu) { @@ -395,11 +430,11 @@ static struct iommu_device *fsl_pamu_probe_device(struct device *dev) } static const struct iommu_ops fsl_pamu_ops = { + .default_domain = &fsl_pamu_platform_domain, .capable = fsl_pamu_capable, .domain_alloc = fsl_pamu_domain_alloc, .probe_device = fsl_pamu_probe_device, .device_group = fsl_pamu_device_group, - .set_platform_dma_ops = fsl_pamu_set_platform_dma, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = fsl_pamu_attach_device, .iova_to_phys = fsl_pamu_iova_to_phys, diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig index 2e56bd79f589..f6f46b1b3dad 100644 --- a/drivers/iommu/intel/Kconfig +++ b/drivers/iommu/intel/Kconfig @@ -15,6 +15,7 @@ config INTEL_IOMMU select DMA_OPS select IOMMU_API select IOMMU_IOVA + select IOMMUFD_DRIVER if IOMMUFD select NEED_DMA_MAP_STATE select DMAR_TABLE select SWIOTLB @@ -50,6 +51,7 @@ config INTEL_IOMMU_SVM depends on X86_64 select MMU_NOTIFIER select IOMMU_SVA + select IOMMU_IOPF help Shared Virtual Memory (SVM) provides a facility for devices to access DMA resources through process address space by diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 6a745616d85a..96dc2b2afa36 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -27,7 +27,6 @@ #include "iommu.h" #include "../dma-iommu.h" #include "../irq_remapping.h" -#include "../iommu-sva.h" #include "pasid.h" #include "cap_audit.h" #include "perfmon.h" @@ -300,6 +299,7 @@ static int iommu_skip_te_disable; #define IDENTMAP_AZALIA 4 const struct iommu_ops intel_iommu_ops; +static const struct iommu_dirty_ops intel_dirty_ops; static bool translation_pre_enabled(struct intel_iommu *iommu) { @@ -4030,9 +4030,9 @@ static int blocking_domain_attach_dev(struct iommu_domain *domain, } static struct iommu_domain blocking_domain = { + .type = IOMMU_DOMAIN_BLOCKED, .ops = &(const struct iommu_domain_ops) { .attach_dev = blocking_domain_attach_dev, - .free = intel_iommu_domain_free } }; @@ -4042,8 +4042,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) struct iommu_domain *domain; switch (type) { - case IOMMU_DOMAIN_BLOCKED: - return &blocking_domain; case IOMMU_DOMAIN_DMA: case IOMMU_DOMAIN_UNMANAGED: dmar_domain = alloc_domain(type); @@ -4075,9 +4073,56 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return NULL; } +static struct iommu_domain * +intel_iommu_domain_alloc_user(struct device *dev, u32 flags, + struct iommu_domain *parent, + const struct iommu_user_data *user_data) +{ + struct iommu_domain *domain; + struct intel_iommu *iommu; + bool dirty_tracking; + + if (flags & + (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) + return ERR_PTR(-EOPNOTSUPP); + + if (parent || user_data) + return ERR_PTR(-EOPNOTSUPP); + + iommu = device_to_iommu(dev, NULL, NULL); + if (!iommu) + return ERR_PTR(-ENODEV); + + if ((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) && !nested_supported(iommu)) + return ERR_PTR(-EOPNOTSUPP); + + dirty_tracking = (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING); + if (dirty_tracking && !ssads_supported(iommu)) + return ERR_PTR(-EOPNOTSUPP); + + /* + * domain_alloc_user op needs to fully initialize a domain + * before return, so uses iommu_domain_alloc() here for + * simple. + */ + domain = iommu_domain_alloc(dev->bus); + if (!domain) + domain = ERR_PTR(-ENOMEM); + + if (!IS_ERR(domain) && dirty_tracking) { + if (to_dmar_domain(domain)->use_first_level) { + iommu_domain_free(domain); + return ERR_PTR(-EOPNOTSUPP); + } + domain->dirty_ops = &intel_dirty_ops; + } + + return domain; +} + static void intel_iommu_domain_free(struct iommu_domain *domain) { - if (domain != &si_domain->domain && domain != &blocking_domain) + if (domain != &si_domain->domain) domain_exit(to_dmar_domain(domain)); } @@ -4095,6 +4140,9 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) return -EINVAL; + if (domain->dirty_ops && !ssads_supported(iommu)) + return -EINVAL; + /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) @@ -4350,6 +4398,8 @@ static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap) return dmar_platform_optin(); case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: return ecap_sc_support(info->iommu->ecap); + case IOMMU_CAP_DIRTY_TRACKING: + return ssads_supported(info->iommu); default: return false; } @@ -4577,23 +4627,15 @@ static int intel_iommu_enable_iopf(struct device *dev) if (ret) return ret; - ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev); - if (ret) - goto iopf_remove_device; - ret = pci_enable_pri(pdev, PRQ_DEPTH); - if (ret) - goto iopf_unregister_handler; + if (ret) { + iopf_queue_remove_device(iommu->iopf_queue, dev); + return ret; + } + info->pri_enabled = 1; return 0; - -iopf_unregister_handler: - iommu_unregister_device_fault_handler(dev); -iopf_remove_device: - iopf_queue_remove_device(iommu->iopf_queue, dev); - - return ret; } static int intel_iommu_disable_iopf(struct device *dev) @@ -4614,14 +4656,7 @@ static int intel_iommu_disable_iopf(struct device *dev) */ pci_disable_pri(to_pci_dev(dev)); info->pri_enabled = 0; - - /* - * With PRI disabled and outstanding PRQs drained, unregistering - * fault handler and removing device from iopf queue should never - * fail. - */ - WARN_ON(iommu_unregister_device_fault_handler(dev)); - WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev)); + iopf_queue_remove_device(iommu->iopf_queue, dev); return 0; } @@ -4693,18 +4728,14 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain, __mapping_notify_one(info->iommu, dmar_domain, pfn, pages); } -static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) +static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain) { struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); + struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct dev_pasid_info *curr, *dev_pasid = NULL; - struct dmar_domain *dmar_domain; - struct iommu_domain *domain; unsigned long flags; - domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0); - if (WARN_ON_ONCE(!domain)) - goto out_tear_down; - /* * The SVA implementation needs to handle its own stuffs like the mm * notification. Before consolidating that code into iommu core, let @@ -4715,7 +4746,6 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) goto out_tear_down; } - dmar_domain = to_dmar_domain(domain); spin_lock_irqsave(&dmar_domain->lock, flags); list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { if (curr->dev == dev && curr->pasid == pasid) { @@ -4747,6 +4777,9 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) return -EOPNOTSUPP; + if (domain->dirty_ops) + return -EINVAL; + if (context_copied(iommu, info->bus, info->devfn)) return -EBUSY; @@ -4805,10 +4838,89 @@ static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type) return vtd; } +static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain, + bool enable) +{ + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + struct device_domain_info *info; + int ret; + + spin_lock(&dmar_domain->lock); + if (dmar_domain->dirty_tracking == enable) + goto out_unlock; + + list_for_each_entry(info, &dmar_domain->devices, link) { + ret = intel_pasid_setup_dirty_tracking(info->iommu, + info->domain, info->dev, + IOMMU_NO_PASID, enable); + if (ret) + goto err_unwind; + } + + dmar_domain->dirty_tracking = enable; +out_unlock: + spin_unlock(&dmar_domain->lock); + + return 0; + +err_unwind: + list_for_each_entry(info, &dmar_domain->devices, link) + intel_pasid_setup_dirty_tracking(info->iommu, dmar_domain, + info->dev, IOMMU_NO_PASID, + dmar_domain->dirty_tracking); + spin_unlock(&dmar_domain->lock); + return ret; +} + +static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + unsigned long end = iova + size - 1; + unsigned long pgsize; + + /* + * IOMMUFD core calls into a dirty tracking disabled domain without an + * IOVA bitmap set in order to clean dirty bits in all PTEs that might + * have occurred when we stopped dirty tracking. This ensures that we + * never inherit dirtied bits from a previous cycle. + */ + if (!dmar_domain->dirty_tracking && dirty->bitmap) + return -EINVAL; + + do { + struct dma_pte *pte; + int lvl = 0; + + pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &lvl, + GFP_ATOMIC); + pgsize = level_size(lvl) << VTD_PAGE_SHIFT; + if (!pte || !dma_pte_present(pte)) { + iova += pgsize; + continue; + } + + if (dma_sl_pte_test_and_clear_dirty(pte, flags)) + iommu_dirty_bitmap_record(dirty, iova, pgsize); + iova += pgsize; + } while (iova < end); + + return 0; +} + +static const struct iommu_dirty_ops intel_dirty_ops = { + .set_dirty_tracking = intel_iommu_set_dirty_tracking, + .read_and_clear_dirty = intel_iommu_read_and_clear_dirty, +}; + const struct iommu_ops intel_iommu_ops = { + .blocked_domain = &blocking_domain, .capable = intel_iommu_capable, .hw_info = intel_iommu_hw_info, .domain_alloc = intel_iommu_domain_alloc, + .domain_alloc_user = intel_iommu_domain_alloc_user, .probe_device = intel_iommu_probe_device, .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index e6a3e7065616..32fde11bf92f 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -48,6 +48,9 @@ #define DMA_FL_PTE_DIRTY BIT_ULL(6) #define DMA_FL_PTE_XD BIT_ULL(63) +#define DMA_SL_PTE_DIRTY_BIT 9 +#define DMA_SL_PTE_DIRTY BIT_ULL(DMA_SL_PTE_DIRTY_BIT) + #define ADDR_WIDTH_5LEVEL (57) #define ADDR_WIDTH_4LEVEL (48) @@ -539,6 +542,10 @@ enum { #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) #define pasid_supported(iommu) (sm_supported(iommu) && \ ecap_pasid((iommu)->ecap)) +#define ssads_supported(iommu) (sm_supported(iommu) && \ + ecap_slads((iommu)->ecap)) +#define nested_supported(iommu) (sm_supported(iommu) && \ + ecap_nest((iommu)->ecap)) struct pasid_entry; struct pasid_state_entry; @@ -595,6 +602,7 @@ struct dmar_domain { u8 has_mappings:1; /* Has mappings configured through * iommu_map() interface. */ + u8 dirty_tracking:1; /* Dirty tracking is enabled */ spinlock_t lock; /* Protect device tracking lists */ struct list_head devices; /* all devices' list */ @@ -784,6 +792,16 @@ static inline bool dma_pte_present(struct dma_pte *pte) return (pte->val & 3) != 0; } +static inline bool dma_sl_pte_test_and_clear_dirty(struct dma_pte *pte, + unsigned long flags) +{ + if (flags & IOMMU_DIRTY_NO_CLEAR) + return (pte->val & DMA_SL_PTE_DIRTY) != 0; + + return test_and_clear_bit(DMA_SL_PTE_DIRTY_BIT, + (unsigned long *)&pte->val); +} + static inline bool dma_pte_superpage(struct dma_pte *pte) { return (pte->val & DMA_PTE_LARGE_PAGE); @@ -850,8 +868,8 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); void intel_svm_check(struct intel_iommu *iommu); int intel_svm_enable_prq(struct intel_iommu *iommu); int intel_svm_finish_prq(struct intel_iommu *iommu); -int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, - struct iommu_page_response *msg); +void intel_svm_page_response(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *msg); struct iommu_domain *intel_svm_domain_alloc(void); void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid); void intel_drain_pasid_prq(struct device *dev, u32 pasid); diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 8faa93cffac4..06ea2dd53542 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -277,6 +277,11 @@ static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits) WRITE_ONCE(*ptr, (old & ~mask) | bits); } +static inline u64 pasid_get_bits(u64 *ptr) +{ + return READ_ONCE(*ptr); +} + /* * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode * PASID entry. @@ -335,6 +340,36 @@ static inline void pasid_set_fault_enable(struct pasid_entry *pe) pasid_set_bits(&pe->val[0], 1 << 1, 0); } +/* + * Enable second level A/D bits by setting the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry. + */ +static inline void pasid_set_ssade(struct pasid_entry *pe) +{ + pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9); +} + +/* + * Disable second level A/D bits by clearing the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry. + */ +static inline void pasid_clear_ssade(struct pasid_entry *pe) +{ + pasid_set_bits(&pe->val[0], 1 << 9, 0); +} + +/* + * Checks if second level A/D bits specifically the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry is set. + */ +static inline bool pasid_get_ssade(struct pasid_entry *pe) +{ + return pasid_get_bits(&pe->val[0]) & (1 << 9); +} + /* * Setup the WPE(Write Protect Enable) field (Bit 132) of a * scalable mode PASID entry. @@ -630,6 +665,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY); pasid_set_fault_enable(pte); pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); + if (domain->dirty_tracking) + pasid_set_ssade(pte); pasid_set_present(pte); spin_unlock(&iommu->lock); @@ -639,6 +676,78 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, return 0; } +/* + * Set up dirty tracking on a second only or nested translation type. + */ +int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, u32 pasid, + bool enabled) +{ + struct pasid_entry *pte; + u16 did, pgtt; + + spin_lock(&iommu->lock); + + pte = intel_pasid_get_entry(dev, pasid); + if (!pte) { + spin_unlock(&iommu->lock); + dev_err_ratelimited( + dev, "Failed to get pasid entry of PASID %d\n", pasid); + return -ENODEV; + } + + did = domain_id_iommu(domain, iommu); + pgtt = pasid_pte_get_pgtt(pte); + if (pgtt != PASID_ENTRY_PGTT_SL_ONLY && + pgtt != PASID_ENTRY_PGTT_NESTED) { + spin_unlock(&iommu->lock); + dev_err_ratelimited( + dev, + "Dirty tracking not supported on translation type %d\n", + pgtt); + return -EOPNOTSUPP; + } + + if (pasid_get_ssade(pte) == enabled) { + spin_unlock(&iommu->lock); + return 0; + } + + if (enabled) + pasid_set_ssade(pte); + else + pasid_clear_ssade(pte); + spin_unlock(&iommu->lock); + + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(pte, sizeof(*pte)); + + /* + * From VT-d spec table 25 "Guidance to Software for Invalidations": + * + * - PASID-selective-within-Domain PASID-cache invalidation + * If (PGTT=SS or Nested) + * - Domain-selective IOTLB invalidation + * Else + * - PASID-selective PASID-based IOTLB invalidation + * - If (pasid is RID_PASID) + * - Global Device-TLB invalidation to affected functions + * Else + * - PASID-based Device-TLB invalidation (with S=1 and + * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions + */ + pasid_cache_invalidation_with_pasid(iommu, did, pasid); + + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); + + /* Device IOTLB doesn't need to be flushed in caching mode. */ + if (!cap_caching_mode(iommu->cap)) + devtlb_invalidation_with_pasid(iommu, dev, pasid); + + return 0; +} + /* * Set up the scalable mode pasid entry for passthrough translation type. */ diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 4e9e68c3c388..958050b093aa 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -106,6 +106,10 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, int intel_pasid_setup_second_level(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); +int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, u32 pasid, + bool enabled); int intel_pasid_setup_pass_through(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 6010b93c514c..a7d19ccf8b8a 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -22,7 +22,6 @@ #include "iommu.h" #include "pasid.h" #include "perf.h" -#include "../iommu-sva.h" #include "trace.h" static irqreturn_t prq_event_thread(int irq, void *d); @@ -316,21 +315,22 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid, } static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, - struct mm_struct *mm) + struct iommu_domain *domain, ioasid_t pasid) { struct device_domain_info *info = dev_iommu_priv_get(dev); + struct mm_struct *mm = domain->mm; struct intel_svm_dev *sdev; struct intel_svm *svm; unsigned long sflags; int ret = 0; - svm = pasid_private_find(mm->pasid); + svm = pasid_private_find(pasid); if (!svm) { svm = kzalloc(sizeof(*svm), GFP_KERNEL); if (!svm) return -ENOMEM; - svm->pasid = mm->pasid; + svm->pasid = pasid; svm->mm = mm; INIT_LIST_HEAD_RCU(&svm->devs); @@ -368,7 +368,7 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, /* Setup the pasid table: */ sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0; - ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, + ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, pasid, FLPT_DEFAULT_DID, sflags); if (ret) goto free_sdev; @@ -382,7 +382,7 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, free_svm: if (list_empty(&svm->devs)) { mmu_notifier_unregister(&svm->notifier, mm); - pasid_private_remove(mm->pasid); + pasid_private_remove(pasid); kfree(svm); } @@ -566,16 +566,12 @@ static int prq_to_iommu_prot(struct page_req_dsc *req) return prot; } -static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, - struct page_req_dsc *desc) +static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, + struct page_req_dsc *desc) { - struct iommu_fault_event event; - - if (!dev || !dev_is_pci(dev)) - return -ENODEV; + struct iopf_fault event = { }; /* Fill in event data for device specific processing */ - memset(&event, 0, sizeof(struct iommu_fault_event)); event.fault.type = IOMMU_FAULT_PAGE_REQ; event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT; event.fault.prm.pasid = desc->pasid; @@ -607,7 +603,7 @@ static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, event.fault.prm.private_data[0] = ktime_to_ns(ktime_get()); } - return iommu_report_device_fault(dev, &event); + iommu_report_device_fault(dev, &event); } static void handle_bad_prq_event(struct intel_iommu *iommu, @@ -710,12 +706,10 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (!pdev) goto bad_req; - if (intel_svm_prq_report(iommu, &pdev->dev, req)) - handle_bad_prq_event(iommu, req, QI_RESP_INVALID); - else - trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1, - req->priv_data[0], req->priv_data[1], - iommu->prq_seq_number++); + intel_svm_prq_report(iommu, &pdev->dev, req); + trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1, + req->priv_data[0], req->priv_data[1], + iommu->prq_seq_number++); pci_dev_put(pdev); prq_advance: head = (head + sizeof(*req)) & PRQ_RING_MASK; @@ -746,9 +740,8 @@ static irqreturn_t prq_event_thread(int irq, void *d) return IRQ_RETVAL(handled); } -int intel_svm_page_response(struct device *dev, - struct iommu_fault_event *evt, - struct iommu_page_response *msg) +void intel_svm_page_response(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *msg) { struct iommu_fault_page_request *prm; struct intel_iommu *iommu; @@ -775,16 +768,6 @@ int intel_svm_page_response(struct device *dev, private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; - if (!pasid_present) { - ret = -EINVAL; - goto out; - } - - if (prm->pasid == 0 || prm->pasid >= PASID_MAX) { - ret = -EINVAL; - goto out; - } - /* * Per VT-d spec. v3.0 ch7.7, system software must respond * with page group response if private data is present (PDP) @@ -813,8 +796,6 @@ int intel_svm_page_response(struct device *dev, qi_submit_sync(iommu, &desc, 1, 0); } -out: - return ret; } static int intel_svm_set_dev_pasid(struct iommu_domain *domain, @@ -822,9 +803,8 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain, { struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_iommu *iommu = info->iommu; - struct mm_struct *mm = domain->mm; - return intel_svm_bind_mm(iommu, dev, mm); + return intel_svm_bind_mm(iommu, dev, domain, pasid); } static void intel_svm_domain_free(struct iommu_domain *domain) diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c index e5b8b9110c13..8b5926c1452e 100644 --- a/drivers/iommu/io-pgfault.c +++ b/drivers/iommu/io-pgfault.c @@ -11,101 +11,171 @@ #include #include -#include "iommu-sva.h" +#include "iommu-priv.h" -/** - * struct iopf_queue - IO Page Fault queue - * @wq: the fault workqueue - * @devices: devices attached to this queue - * @lock: protects the device list - */ -struct iopf_queue { - struct workqueue_struct *wq; - struct list_head devices; - struct mutex lock; -}; - -/** - * struct iopf_device_param - IO Page Fault data attached to a device - * @dev: the device that owns this param - * @queue: IOPF queue - * @queue_list: index into queue->devices - * @partial: faults that are part of a Page Request Group for which the last - * request hasn't been submitted yet. +/* + * Return the fault parameter of a device if it exists. Otherwise, return NULL. + * On a successful return, the caller takes a reference of this parameter and + * should put it after use by calling iopf_put_dev_fault_param(). */ -struct iopf_device_param { - struct device *dev; - struct iopf_queue *queue; - struct list_head queue_list; - struct list_head partial; -}; - -struct iopf_fault { - struct iommu_fault fault; - struct list_head list; -}; - -struct iopf_group { - struct iopf_fault last_fault; - struct list_head faults; - struct work_struct work; - struct device *dev; -}; - -static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf, - enum iommu_page_response_code status) +static struct iommu_fault_param *iopf_get_dev_fault_param(struct device *dev) { - struct iommu_page_response resp = { - .version = IOMMU_PAGE_RESP_VERSION_1, - .pasid = iopf->fault.prm.pasid, - .grpid = iopf->fault.prm.grpid, - .code = status, - }; + struct dev_iommu *param = dev->iommu; + struct iommu_fault_param *fault_param; - if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) && - (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID)) - resp.flags = IOMMU_PAGE_RESP_PASID_VALID; + rcu_read_lock(); + fault_param = rcu_dereference(param->fault_param); + if (fault_param && !refcount_inc_not_zero(&fault_param->users)) + fault_param = NULL; + rcu_read_unlock(); - return iommu_page_response(dev, &resp); + return fault_param; } -static void iopf_handler(struct work_struct *work) +/* Caller must hold a reference of the fault parameter. */ +static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param) { - struct iopf_group *group; - struct iommu_domain *domain; - struct iopf_fault *iopf, *next; - enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; + if (refcount_dec_and_test(&fault_param->users)) + kfree_rcu(fault_param, rcu); +} - group = container_of(work, struct iopf_group, work); - domain = iommu_get_domain_for_dev_pasid(group->dev, - group->last_fault.fault.prm.pasid, 0); - if (!domain || !domain->iopf_handler) - status = IOMMU_PAGE_RESP_INVALID; +static void __iopf_free_group(struct iopf_group *group) +{ + struct iopf_fault *iopf, *next; list_for_each_entry_safe(iopf, next, &group->faults, list) { + if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) + kfree(iopf); + } + + /* Pair with iommu_report_device_fault(). */ + iopf_put_dev_fault_param(group->fault_param); +} + +void iopf_free_group(struct iopf_group *group) +{ + __iopf_free_group(group); + kfree(group); +} +EXPORT_SYMBOL_GPL(iopf_free_group); + +/* Non-last request of a group. Postpone until the last one. */ +static int report_partial_fault(struct iommu_fault_param *fault_param, + struct iommu_fault *fault) +{ + struct iopf_fault *iopf; + + iopf = kzalloc(sizeof(*iopf), GFP_KERNEL); + if (!iopf) + return -ENOMEM; + + iopf->fault = *fault; + + mutex_lock(&fault_param->lock); + list_add(&iopf->list, &fault_param->partial); + mutex_unlock(&fault_param->lock); + + return 0; +} + +static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param, + struct iopf_fault *evt, + struct iopf_group *abort_group) +{ + struct iopf_fault *iopf, *next; + struct iopf_group *group; + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) { /* - * For the moment, errors are sticky: don't handle subsequent - * faults in the group if there is an error. + * We always need to construct the group as we need it to abort + * the request at the driver if it can't be handled. */ - if (status == IOMMU_PAGE_RESP_SUCCESS) - status = domain->iopf_handler(&iopf->fault, - domain->fault_data); + group = abort_group; + } - if (!(iopf->fault.prm.flags & - IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) - kfree(iopf); + group->fault_param = iopf_param; + group->last_fault.fault = evt->fault; + INIT_LIST_HEAD(&group->faults); + INIT_LIST_HEAD(&group->pending_node); + list_add(&group->last_fault.list, &group->faults); + + /* See if we have partial faults for this group */ + mutex_lock(&iopf_param->lock); + list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { + if (iopf->fault.prm.grpid == evt->fault.prm.grpid) + /* Insert *before* the last fault */ + list_move(&iopf->list, &group->faults); } + list_add(&group->pending_node, &iopf_param->faults); + mutex_unlock(&iopf_param->lock); - iopf_complete_group(group->dev, &group->last_fault, status); - kfree(group); + group->fault_count = list_count_nodes(&group->faults); + + return group; +} + +static struct iommu_attach_handle *find_fault_handler(struct device *dev, + struct iopf_fault *evt) +{ + struct iommu_fault *fault = &evt->fault; + struct iommu_attach_handle *attach_handle; + + if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { + attach_handle = iommu_attach_handle_get(dev->iommu_group, + fault->prm.pasid, 0); + if (IS_ERR(attach_handle)) { + const struct iommu_ops *ops = dev_iommu_ops(dev); + + if (!ops->user_pasid_table) + return NULL; + /* + * The iommu driver for this device supports user- + * managed PASID table. Therefore page faults for + * any PASID should go through the NESTING domain + * attached to the device RID. + */ + attach_handle = iommu_attach_handle_get( + dev->iommu_group, IOMMU_NO_PASID, + IOMMU_DOMAIN_NESTED); + if (IS_ERR(attach_handle)) + return NULL; + } + } else { + attach_handle = iommu_attach_handle_get(dev->iommu_group, + IOMMU_NO_PASID, 0); + + if (IS_ERR(attach_handle)) + return NULL; + } + + if (!attach_handle->domain->iopf_handler) + return NULL; + + return attach_handle; +} + +static void iopf_error_response(struct device *dev, struct iopf_fault *evt) +{ + const struct iommu_ops *ops = dev_iommu_ops(dev); + struct iommu_fault *fault = &evt->fault; + struct iommu_page_response resp = { + .pasid = fault->prm.pasid, + .grpid = fault->prm.grpid, + .code = IOMMU_PAGE_RESP_INVALID + }; + + ops->page_response(dev, evt, &resp); } /** - * iommu_queue_iopf - IO Page Fault handler - * @fault: fault event - * @cookie: struct device, passed to iommu_register_device_fault_handler. + * iommu_report_device_fault() - Report fault event to device driver + * @dev: the device + * @evt: fault event data * - * Add a fault to the device workqueue, to be handled by mm. + * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ + * handler. If this function fails then ops->page_response() was called to + * complete evt if required. * * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't @@ -136,84 +206,82 @@ static void iopf_handler(struct work_struct *work) * handling framework should guarantee that the iommu domain could only be * freed after the device has stopped generating page faults (or the iommu * hardware has been set to block the page faults) and the pending page faults - * have been flushed. + * have been flushed. In case no page fault handler is attached or no iopf params + * are setup, then the ops->page_response() is called to complete the evt. * - * Return: 0 on success and <0 on error. + * Returns 0 on success, or an error in case of a bad/failed iopf setup. */ -int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) +int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) { - int ret; + struct iommu_attach_handle *attach_handle; + struct iommu_fault *fault = &evt->fault; + struct iommu_fault_param *iopf_param; + struct iopf_group abort_group = {}; struct iopf_group *group; - struct iopf_fault *iopf, *next; - struct iopf_device_param *iopf_param; - - struct device *dev = cookie; - struct dev_iommu *param = dev->iommu; - - lockdep_assert_held(¶m->lock); - if (fault->type != IOMMU_FAULT_PAGE_REQ) - /* Not a recoverable page fault */ - return -EOPNOTSUPP; + attach_handle = find_fault_handler(dev, evt); + if (!attach_handle) + goto err_bad_iopf; /* - * As long as we're holding param->lock, the queue can't be unlinked - * from the device and therefore cannot disappear. + * Something has gone wrong if a fault capable domain is attached but no + * iopf_param is setup */ - iopf_param = param->iopf_param; - if (!iopf_param) - return -ENODEV; + iopf_param = iopf_get_dev_fault_param(dev); + if (WARN_ON(!iopf_param)) + goto err_bad_iopf; if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { - iopf = kzalloc(sizeof(*iopf), GFP_KERNEL); - if (!iopf) - return -ENOMEM; - - iopf->fault = *fault; + int ret; - /* Non-last request of a group. Postpone until the last one */ - list_add(&iopf->list, &iopf_param->partial); + ret = report_partial_fault(iopf_param, fault); + iopf_put_dev_fault_param(iopf_param); + /* A request that is not the last does not need to be ack'd */ - return 0; + return ret; } - group = kzalloc(sizeof(*group), GFP_KERNEL); - if (!group) { - /* - * The caller will send a response to the hardware. But we do - * need to clean up before leaving, otherwise partial faults - * will be stuck. - */ - ret = -ENOMEM; - goto cleanup_partial; - } + /* + * This is the last page fault of a group. Allocate an iopf group and + * pass it to domain's page fault handler. The group holds a reference + * count of the fault parameter. It will be released after response or + * error path of this function. If an error is returned, the caller + * will send a response to the hardware. We need to clean up before + * leaving, otherwise partial faults will be stuck. + */ + group = iopf_group_alloc(iopf_param, evt, &abort_group); + if (group == &abort_group) + goto err_abort; - group->dev = dev; - group->last_fault.fault = *fault; - INIT_LIST_HEAD(&group->faults); - list_add(&group->last_fault.list, &group->faults); - INIT_WORK(&group->work, iopf_handler); + group->attach_handle = attach_handle; - /* See if we have partial faults for this group */ - list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { - if (iopf->fault.prm.grpid == fault->prm.grpid) - /* Insert *before* the last fault */ - list_move(&iopf->list, &group->faults); - } + /* + * On success iopf_handler must call iopf_group_response() and + * iopf_free_group() + */ + if (group->attach_handle->domain->iopf_handler(group)) + goto err_abort; - queue_work(iopf_param->queue->wq, &group->work); return 0; -cleanup_partial: - list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { - if (iopf->fault.prm.grpid == fault->prm.grpid) { - list_del(&iopf->list); - kfree(iopf); - } - } - return ret; +err_abort: + dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n", + fault->prm.pasid); + iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE); + if (group == &abort_group) + __iopf_free_group(group); + else + iopf_free_group(group); + + return 0; + +err_bad_iopf: + if (fault->type == IOMMU_FAULT_PAGE_REQ) + iopf_error_response(dev, evt); + + return -EINVAL; } -EXPORT_SYMBOL_GPL(iommu_queue_iopf); +EXPORT_SYMBOL_GPL(iommu_report_device_fault); /** * iopf_queue_flush_dev - Ensure that all queued faults have been processed @@ -229,25 +297,51 @@ EXPORT_SYMBOL_GPL(iommu_queue_iopf); */ int iopf_queue_flush_dev(struct device *dev) { - int ret = 0; - struct iopf_device_param *iopf_param; - struct dev_iommu *param = dev->iommu; + struct iommu_fault_param *iopf_param; - if (!param) + /* + * It's a driver bug to be here after iopf_queue_remove_device(). + * Therefore, it's safe to dereference the fault parameter without + * holding the lock. + */ + iopf_param = rcu_dereference_check(dev->iommu->fault_param, true); + if (WARN_ON(!iopf_param)) return -ENODEV; - mutex_lock(¶m->lock); - iopf_param = param->iopf_param; - if (iopf_param) - flush_workqueue(iopf_param->queue->wq); - else - ret = -ENODEV; - mutex_unlock(¶m->lock); + flush_workqueue(iopf_param->queue->wq); - return ret; + return 0; } EXPORT_SYMBOL_GPL(iopf_queue_flush_dev); +/** + * iopf_group_response - Respond a group of page faults + * @group: the group of faults with the same group id + * @status: the response code + */ +void iopf_group_response(struct iopf_group *group, + enum iommu_page_response_code status) +{ + struct iommu_fault_param *fault_param = group->fault_param; + struct iopf_fault *iopf = &group->last_fault; + struct device *dev = group->fault_param->dev; + const struct iommu_ops *ops = dev_iommu_ops(dev); + struct iommu_page_response resp = { + .pasid = iopf->fault.prm.pasid, + .grpid = iopf->fault.prm.grpid, + .code = status, + }; + + /* Only send response if there is a fault report pending */ + mutex_lock(&fault_param->lock); + if (!list_empty(&group->pending_node)) { + ops->page_response(dev, &group->last_fault, &resp); + list_del_init(&group->pending_node); + } + mutex_unlock(&fault_param->lock); +} +EXPORT_SYMBOL_GPL(iopf_group_response); + /** * iopf_queue_discard_partial - Remove all pending partial fault * @queue: the queue whose partial faults need to be discarded @@ -261,18 +355,20 @@ EXPORT_SYMBOL_GPL(iopf_queue_flush_dev); int iopf_queue_discard_partial(struct iopf_queue *queue) { struct iopf_fault *iopf, *next; - struct iopf_device_param *iopf_param; + struct iommu_fault_param *iopf_param; if (!queue) return -EINVAL; mutex_lock(&queue->lock); list_for_each_entry(iopf_param, &queue->devices, queue_list) { + mutex_lock(&iopf_param->lock); list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { list_del(&iopf->list); kfree(iopf); } + mutex_unlock(&iopf_param->lock); } mutex_unlock(&queue->lock); return 0; @@ -288,34 +384,42 @@ EXPORT_SYMBOL_GPL(iopf_queue_discard_partial); */ int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) { - int ret = -EBUSY; - struct iopf_device_param *iopf_param; + int ret = 0; struct dev_iommu *param = dev->iommu; + struct iommu_fault_param *fault_param; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (!param) + if (!ops->page_response) return -ENODEV; - iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL); - if (!iopf_param) - return -ENOMEM; - - INIT_LIST_HEAD(&iopf_param->partial); - iopf_param->queue = queue; - iopf_param->dev = dev; - mutex_lock(&queue->lock); mutex_lock(¶m->lock); - if (!param->iopf_param) { - list_add(&iopf_param->queue_list, &queue->devices); - param->iopf_param = iopf_param; - ret = 0; + if (rcu_dereference_check(param->fault_param, + lockdep_is_held(¶m->lock))) { + ret = -EBUSY; + goto done_unlock; } + + fault_param = kzalloc(sizeof(*fault_param), GFP_KERNEL); + if (!fault_param) { + ret = -ENOMEM; + goto done_unlock; + } + + mutex_init(&fault_param->lock); + INIT_LIST_HEAD(&fault_param->faults); + INIT_LIST_HEAD(&fault_param->partial); + fault_param->dev = dev; + refcount_set(&fault_param->users, 1); + list_add(&fault_param->queue_list, &queue->devices); + fault_param->queue = queue; + + rcu_assign_pointer(param->fault_param, fault_param); + +done_unlock: mutex_unlock(¶m->lock); mutex_unlock(&queue->lock); - if (ret) - kfree(iopf_param); - return ret; } EXPORT_SYMBOL_GPL(iopf_queue_add_device); @@ -325,40 +429,67 @@ EXPORT_SYMBOL_GPL(iopf_queue_add_device); * @queue: IOPF queue * @dev: device to remove * - * Caller makes sure that no more faults are reported for this device. + * Removing a device from an iopf_queue. It's recommended to follow these + * steps when removing a device: * - * Return: 0 on success and <0 on error. + * - Disable new PRI reception: Turn off PRI generation in the IOMMU hardware + * and flush any hardware page request queues. This should be done before + * calling into this helper. + * - Acknowledge all outstanding PRQs to the device: Respond to all outstanding + * page requests with IOMMU_PAGE_RESP_INVALID, indicating the device should + * not retry. This helper function handles this. + * - Disable PRI on the device: After calling this helper, the caller could + * then disable PRI on the device. + * + * Calling iopf_queue_remove_device() essentially disassociates the device. + * The fault_param might still exist, but iommu_page_response() will do + * nothing. The device fault parameter reference count has been properly + * passed from iommu_report_device_fault() to the fault handling work, and + * will eventually be released after iommu_page_response(). */ -int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) +void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) { - int ret = -EINVAL; - struct iopf_fault *iopf, *next; - struct iopf_device_param *iopf_param; + struct iopf_fault *partial_iopf; + struct iopf_fault *next; + struct iopf_group *group, *temp; struct dev_iommu *param = dev->iommu; - - if (!param || !queue) - return -EINVAL; + struct iommu_fault_param *fault_param; + const struct iommu_ops *ops = dev_iommu_ops(dev); mutex_lock(&queue->lock); mutex_lock(¶m->lock); - iopf_param = param->iopf_param; - if (iopf_param && iopf_param->queue == queue) { - list_del(&iopf_param->queue_list); - param->iopf_param = NULL; - ret = 0; + fault_param = rcu_dereference_check(param->fault_param, + lockdep_is_held(¶m->lock)); + + if (WARN_ON(!fault_param || fault_param->queue != queue)) + goto unlock; + + mutex_lock(&fault_param->lock); + list_for_each_entry_safe(partial_iopf, next, &fault_param->partial, list) + kfree(partial_iopf); + + list_for_each_entry_safe(group, temp, &fault_param->faults, pending_node) { + struct iopf_fault *iopf = &group->last_fault; + struct iommu_page_response resp = { + .pasid = iopf->fault.prm.pasid, + .grpid = iopf->fault.prm.grpid, + .code = IOMMU_PAGE_RESP_INVALID + }; + + ops->page_response(dev, iopf, &resp); + list_del_init(&group->pending_node); + iopf_free_group(group); } - mutex_unlock(¶m->lock); - mutex_unlock(&queue->lock); - if (ret) - return ret; + mutex_unlock(&fault_param->lock); - /* Just in case some faults are still stuck */ - list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) - kfree(iopf); + list_del(&fault_param->queue_list); - kfree(iopf_param); - - return 0; + /* dec the ref owned by iopf_queue_add_device() */ + rcu_assign_pointer(param->fault_param, NULL); + iopf_put_dev_fault_param(fault_param); +unlock: + mutex_unlock(¶m->lock); + mutex_unlock(&queue->lock); } EXPORT_SYMBOL_GPL(iopf_queue_remove_device); @@ -404,7 +535,7 @@ EXPORT_SYMBOL_GPL(iopf_queue_alloc); */ void iopf_queue_free(struct iopf_queue *queue) { - struct iopf_device_param *iopf_param, *next; + struct iommu_fault_param *iopf_param, *next; if (!queue) return; diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h index 2024a2313348..f396c7063a32 100644 --- a/drivers/iommu/iommu-priv.h +++ b/drivers/iommu/iommu-priv.h @@ -27,4 +27,17 @@ void iommu_device_unregister_bus(struct iommu_device *iommu, struct bus_type *bus, struct notifier_block *nb); +int iommu_mock_device_add(struct device *dev, struct iommu_device *iommu); + +struct iommu_attach_handle *iommu_attach_handle_get(struct iommu_group *group, + ioasid_t pasid, + unsigned int type); +int iommu_attach_group_handle(struct iommu_domain *domain, + struct iommu_group *group, + struct iommu_attach_handle *handle); +void iommu_detach_group_handle(struct iommu_domain *domain, + struct iommu_group *group); +int iommu_replace_group_handle(struct iommu_group *group, + struct iommu_domain *new_domain, + struct iommu_attach_handle *handle); #endif /* __LINUX_IOMMU_PRIV_H */ diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c index b78671a8a914..e314cd99dc0a 100644 --- a/drivers/iommu/iommu-sva.c +++ b/drivers/iommu/iommu-sva.c @@ -7,37 +7,49 @@ #include #include -#include "iommu-sva.h" +#include "iommu-priv.h" static DEFINE_MUTEX(iommu_sva_lock); +static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, + struct mm_struct *mm); /* Allocate a PASID for the mm within range (inclusive) */ -static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev) +static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev) { + struct iommu_mm_data *iommu_mm; ioasid_t pasid; - int ret = 0; + + lockdep_assert_held(&iommu_sva_lock); if (!arch_pgtable_dma_compat(mm)) - return -EBUSY; + return ERR_PTR(-EBUSY); - mutex_lock(&iommu_sva_lock); + iommu_mm = mm->iommu_mm; /* Is a PASID already associated with this mm? */ - if (mm_valid_pasid(mm)) { - if (mm->pasid >= dev->iommu->max_pasids) - ret = -EOVERFLOW; - goto out; + if (iommu_mm) { + if (iommu_mm->pasid >= dev->iommu->max_pasids) + return ERR_PTR(-EOVERFLOW); + return iommu_mm; } + iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL); + if (!iommu_mm) + return ERR_PTR(-ENOMEM); + pasid = iommu_alloc_global_pasid(dev); if (pasid == IOMMU_PASID_INVALID) { - ret = -ENOSPC; - goto out; + kfree(iommu_mm); + return ERR_PTR(-ENOSPC); } - mm->pasid = pasid; - ret = 0; -out: - mutex_unlock(&iommu_sva_lock); - return ret; + iommu_mm->pasid = pasid; + INIT_LIST_HEAD(&iommu_mm->sva_domains); + /* + * Make sure the write to mm->iommu_mm is not reordered in front of + * initialization to iommu_mm fields. If it does, readers may see a + * valid iommu_mm with uninitialized values. + */ + smp_store_release(&mm->iommu_mm, iommu_mm); + return iommu_mm; } /** @@ -58,57 +70,84 @@ static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev) */ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) { + struct iommu_group *group = dev->iommu_group; + struct iommu_attach_handle *attach_handle; + struct iommu_mm_data *iommu_mm; struct iommu_domain *domain; struct iommu_sva *handle; int ret; + if (!group) + return ERR_PTR(-ENODEV); + + mutex_lock(&iommu_sva_lock); + /* Allocate mm->pasid if necessary. */ - ret = iommu_sva_alloc_pasid(mm, dev); - if (ret) - return ERR_PTR(ret); + iommu_mm = iommu_alloc_mm_data(mm, dev); + if (IS_ERR(iommu_mm)) { + ret = PTR_ERR(iommu_mm); + goto out_unlock; + } - handle = kzalloc(sizeof(*handle), GFP_KERNEL); - if (!handle) - return ERR_PTR(-ENOMEM); + /* A bond already exists, just take a reference`. */ + attach_handle = iommu_attach_handle_get(group, iommu_mm->pasid, IOMMU_DOMAIN_SVA); + if (!IS_ERR(attach_handle)) { + handle = container_of(attach_handle, struct iommu_sva, handle); + if (attach_handle->domain->mm != mm) { + ret = -EBUSY; + goto out_unlock; + } + refcount_inc(&handle->users); + mutex_unlock(&iommu_sva_lock); + return handle; + } - mutex_lock(&iommu_sva_lock); - /* Search for an existing domain. */ - domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid, - IOMMU_DOMAIN_SVA); - if (IS_ERR(domain)) { - ret = PTR_ERR(domain); + if (PTR_ERR(attach_handle) != -ENOENT) { + ret = PTR_ERR(attach_handle); goto out_unlock; } - if (domain) { - domain->users++; - goto out; + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) { + ret = -ENOMEM; + goto out_unlock; + } + + /* Search for an existing domain. */ + list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) { + ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid, + &handle->handle); + if (!ret) { + domain->users++; + goto out; + } } /* Allocate a new domain and set it on device pasid. */ domain = iommu_sva_domain_alloc(dev, mm); - if (!domain) { - ret = -ENOMEM; + if (IS_ERR(domain)) { + ret = PTR_ERR(domain); goto out_unlock; } - ret = iommu_attach_device_pasid(domain, dev, mm->pasid); + ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid, + &handle->handle); if (ret) goto out_free_domain; domain->users = 1; + list_add(&domain->next, &mm->iommu_mm->sva_domains); + out: + refcount_set(&handle->users, 1); mutex_unlock(&iommu_sva_lock); handle->dev = dev; - handle->domain = domain; - return handle; out_free_domain: iommu_domain_free(domain); + kfree(handle); out_unlock: mutex_unlock(&iommu_sva_lock); - kfree(handle); - return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(iommu_sva_bind_device); @@ -123,13 +162,19 @@ EXPORT_SYMBOL_GPL(iommu_sva_bind_device); */ void iommu_sva_unbind_device(struct iommu_sva *handle) { - struct iommu_domain *domain = handle->domain; - ioasid_t pasid = domain->mm->pasid; + struct iommu_domain *domain = handle->handle.domain; + struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm; struct device *dev = handle->dev; mutex_lock(&iommu_sva_lock); + if (!refcount_dec_and_test(&handle->users)) { + mutex_unlock(&iommu_sva_lock); + return; + } + + iommu_detach_device_pasid(domain, dev, iommu_mm->pasid); if (--domain->users == 0) { - iommu_detach_device_pasid(domain, dev, pasid); + list_del(&domain->next); iommu_domain_free(domain); } mutex_unlock(&iommu_sva_lock); @@ -139,21 +184,31 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); u32 iommu_sva_get_pasid(struct iommu_sva *handle) { - struct iommu_domain *domain = handle->domain; + struct iommu_domain *domain = handle->handle.domain; - return domain->mm->pasid; + return mm_get_enqcmd_pasid(domain->mm); } EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); +void mm_pasid_drop(struct mm_struct *mm) +{ + struct iommu_mm_data *iommu_mm = mm->iommu_mm; + + if (!iommu_mm) + return; + + iommu_free_global_pasid(iommu_mm->pasid); + kfree(iommu_mm); +} + /* * I/O page fault handler for SVA */ -enum iommu_page_response_code -iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) +static enum iommu_page_response_code +iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm) { vm_fault_t ret; struct vm_area_struct *vma; - struct mm_struct *mm = data; unsigned int access_flags = 0; unsigned int fault_flags = FAULT_FLAG_REMOTE; struct iommu_fault_page_request *prm = &fault->prm; @@ -203,10 +258,61 @@ iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) return status; } -void mm_pasid_drop(struct mm_struct *mm) +static void iommu_sva_handle_iopf(struct work_struct *work) { - if (likely(!mm_valid_pasid(mm))) - return; + struct iopf_fault *iopf; + struct iopf_group *group; + enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; + + group = container_of(work, struct iopf_group, work); + list_for_each_entry(iopf, &group->faults, list) { + /* + * For the moment, errors are sticky: don't handle subsequent + * faults in the group if there is an error. + */ + if (status != IOMMU_PAGE_RESP_SUCCESS) + break; + + status = iommu_sva_handle_mm(&iopf->fault, + group->attach_handle->domain->mm); + } + + iopf_group_response(group, status); + iopf_free_group(group); +} + +static int iommu_sva_iopf_handler(struct iopf_group *group) +{ + struct iommu_fault_param *fault_param = group->fault_param; + + INIT_WORK(&group->work, iommu_sva_handle_iopf); + if (!queue_work(fault_param->queue->wq, &group->work)) + return -EBUSY; + + return 0; +} + +static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, + struct mm_struct *mm) +{ + const struct iommu_ops *ops = dev_iommu_ops(dev); + struct iommu_domain *domain; + + if (ops->domain_alloc_sva) { + domain = ops->domain_alloc_sva(dev, mm); + if (IS_ERR(domain)) + return domain; + } else { + domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); + if (!domain) + return ERR_PTR(-ENOMEM); + } + + domain->type = IOMMU_DOMAIN_SVA; + mmgrab(mm); + domain->mm = mm; + domain->owner = ops; + domain->iopf_handler = iommu_sva_iopf_handler; - iommu_free_global_pasid(mm->pasid); + return domain; } diff --git a/drivers/iommu/iommu-sva.h b/drivers/iommu/iommu-sva.h deleted file mode 100644 index 54946b5a7caf..000000000000 --- a/drivers/iommu/iommu-sva.h +++ /dev/null @@ -1,71 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * SVA library for IOMMU drivers - */ -#ifndef _IOMMU_SVA_H -#define _IOMMU_SVA_H - -#include - -/* I/O Page fault */ -struct device; -struct iommu_fault; -struct iopf_queue; - -#ifdef CONFIG_IOMMU_SVA -int iommu_queue_iopf(struct iommu_fault *fault, void *cookie); - -int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev); -int iopf_queue_remove_device(struct iopf_queue *queue, - struct device *dev); -int iopf_queue_flush_dev(struct device *dev); -struct iopf_queue *iopf_queue_alloc(const char *name); -void iopf_queue_free(struct iopf_queue *queue); -int iopf_queue_discard_partial(struct iopf_queue *queue); -enum iommu_page_response_code -iommu_sva_handle_iopf(struct iommu_fault *fault, void *data); - -#else /* CONFIG_IOMMU_SVA */ -static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) -{ - return -ENODEV; -} - -static inline int iopf_queue_add_device(struct iopf_queue *queue, - struct device *dev) -{ - return -ENODEV; -} - -static inline int iopf_queue_remove_device(struct iopf_queue *queue, - struct device *dev) -{ - return -ENODEV; -} - -static inline int iopf_queue_flush_dev(struct device *dev) -{ - return -ENODEV; -} - -static inline struct iopf_queue *iopf_queue_alloc(const char *name) -{ - return NULL; -} - -static inline void iopf_queue_free(struct iopf_queue *queue) -{ -} - -static inline int iopf_queue_discard_partial(struct iopf_queue *queue) -{ - return -ENODEV; -} - -static inline enum iommu_page_response_code -iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) -{ - return IOMMU_PAGE_RESP_INVALID; -} -#endif /* CONFIG_IOMMU_SVA */ -#endif /* _IOMMU_SVA_H */ diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3fa5699b9ff1..a20136be1267 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -36,9 +36,6 @@ #include "dma-iommu.h" #include "iommu-priv.h" -#include "iommu-sva.h" -#include "iommu-priv.h" - static struct kset *iommu_group_kset; static DEFINE_IDA(iommu_group_ida); static DEFINE_IDA(iommu_global_pasid_ida); @@ -96,8 +93,8 @@ static const char * const iommu_group_resv_type_string[] = { static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data); static void iommu_release_device(struct device *dev); -static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, - unsigned type); +static struct iommu_domain * +__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type); static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev); static int __iommu_attach_group(struct iommu_domain *domain, @@ -149,7 +146,7 @@ struct iommu_group_attribute iommu_group_attr_##_name = \ static LIST_HEAD(iommu_device_list); static DEFINE_SPINLOCK(iommu_device_lock); -static struct bus_type * const iommu_buses[] = { +static const struct bus_type * const iommu_buses[] = { &platform_bus_type, #ifdef CONFIG_PCI &pci_bus_type, @@ -184,6 +181,8 @@ static const char *iommu_domain_type_str(unsigned int t) case IOMMU_DOMAIN_DMA: case IOMMU_DOMAIN_DMA_FQ: return "Translated"; + case IOMMU_DOMAIN_PLATFORM: + return "Platform"; default: return "Unknown"; } @@ -256,13 +255,6 @@ int iommu_device_register(struct iommu_device *iommu, /* We need to be able to take module references appropriately */ if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) return -EINVAL; - /* - * Temporarily enforce global restriction to a single driver. This was - * already the de-facto behaviour, since any possible combination of - * existing drivers would compete for at least the PCI or platform bus. - */ - if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) - return -EBUSY; iommu->ops = ops; if (hwdev) @@ -272,10 +264,8 @@ int iommu_device_register(struct iommu_device *iommu, list_add_tail(&iommu->list, &iommu_device_list); spin_unlock(&iommu_device_lock); - for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { - iommu_buses[i]->iommu_ops = ops; + for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) err = bus_iommu_probe(iommu_buses[i]); - } if (err) iommu_device_unregister(iommu); return err; @@ -299,6 +289,7 @@ void iommu_device_unregister_bus(struct iommu_device *iommu, struct notifier_block *nb) { bus_unregister_notifier(bus, nb); + fwnode_remove_software_node(iommu->fwnode); iommu_device_unregister(iommu); } EXPORT_SYMBOL_GPL(iommu_device_unregister_bus); @@ -320,19 +311,43 @@ int iommu_device_register_bus(struct iommu_device *iommu, if (err) return err; + iommu->fwnode = fwnode_create_software_node(NULL, NULL); + if (IS_ERR(iommu->fwnode)) { + bus_unregister_notifier(bus, nb); + return PTR_ERR(iommu->fwnode); + } + spin_lock(&iommu_device_lock); list_add_tail(&iommu->list, &iommu_device_list); spin_unlock(&iommu_device_lock); - bus->iommu_ops = ops; err = bus_iommu_probe(bus); if (err) { iommu_device_unregister_bus(iommu, bus, nb); return err; } + WRITE_ONCE(iommu->ready, true); return 0; } EXPORT_SYMBOL_GPL(iommu_device_register_bus); + +int iommu_mock_device_add(struct device *dev, struct iommu_device *iommu) +{ + int rc; + + mutex_lock(&iommu_probe_device_lock); + rc = iommu_fwspec_init(dev, iommu->fwnode); + mutex_unlock(&iommu_probe_device_lock); + + if (rc) + return rc; + + rc = device_add(dev); + if (rc) + iommu_fwspec_free(dev); + return rc; +} +EXPORT_SYMBOL_GPL(iommu_mock_device_add); #endif static struct dev_iommu *dev_iommu_get(struct device *dev) @@ -363,6 +378,15 @@ static void dev_iommu_free(struct device *dev) kfree(param); } +/* + * Internal equivalent of device_iommu_mapped() for when we care that a device + * actually has API ops, and don't want false positives from VFIO-only groups. + */ +static bool dev_has_iommu(struct device *dev) +{ + return dev->iommu && dev->iommu->iommu_dev; +} + static u32 dev_iommu_get_max_pasids(struct device *dev) { u32 max_pasids = 0, bits = 0; @@ -483,11 +507,26 @@ DEFINE_MUTEX(iommu_probe_device_lock); static int __iommu_probe_device(struct device *dev, struct list_head *group_list) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops; + struct iommu_fwspec *fwspec; struct iommu_group *group; struct group_device *gdev; int ret; + /* + * For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU + * instances with non-NULL fwnodes, and client devices should have been + * identified with a fwspec by this point. Otherwise, we can currently + * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can + * be present, and that any of their registered instances has suitable + * ops for probing, and thus cheekily co-opt the same mechanism. + */ + fwspec = dev_iommu_fwspec_get(dev); + if (fwspec && fwspec->ops) + ops = fwspec->ops; + else + ops = iommu_ops_from_fwnode(NULL); + if (!ops) return -ENODEV; /* @@ -624,7 +663,7 @@ static void __iommu_group_remove_device(struct device *dev) list_del(&device->list); __iommu_group_free_device(group, device); - if (dev->iommu && dev->iommu->iommu_dev) + if (dev_has_iommu(dev)) iommu_deinit_device(dev); else dev->iommu_group = NULL; @@ -823,7 +862,7 @@ int iommu_get_group_resv_regions(struct iommu_group *group, * Non-API groups still expose reserved_regions in sysfs, * so filter out calls that get here that way. */ - if (!device->dev->iommu) + if (!dev_has_iommu(device->dev)) break; INIT_LIST_HEAD(&dev_resv_regions); @@ -1229,6 +1268,31 @@ void iommu_group_remove_device(struct device *dev) } EXPORT_SYMBOL_GPL(iommu_group_remove_device); +#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API) +/** + * iommu_group_mutex_assert - Check device group mutex lock + * @dev: the device that has group param set + * + * This function is called by an iommu driver to check whether it holds + * group mutex lock for the given device or not. + * + * Note that this function must be called after device group param is set. + */ +void iommu_group_mutex_assert(struct device *dev) +{ + struct iommu_group *group = dev->iommu_group; + + lockdep_assert_held(&group->mutex); +} +EXPORT_SYMBOL_GPL(iommu_group_mutex_assert); +#endif + +static struct device *iommu_group_first_dev(struct iommu_group *group) +{ + lockdep_assert_held(&group->mutex); + return list_first_entry(&group->devices, struct group_device, list)->dev; +} + /** * iommu_group_for_each_dev - iterate over each device in the group * @group: the group @@ -1305,217 +1369,6 @@ void iommu_group_put(struct iommu_group *group) } EXPORT_SYMBOL_GPL(iommu_group_put); -/** - * iommu_register_device_fault_handler() - Register a device fault handler - * @dev: the device - * @handler: the fault handler - * @data: private data passed as argument to the handler - * - * When an IOMMU fault event is received, this handler gets called with the - * fault event and data as argument. The handler should return 0 on success. If - * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also - * complete the fault by calling iommu_page_response() with one of the following - * response code: - * - IOMMU_PAGE_RESP_SUCCESS: retry the translation - * - IOMMU_PAGE_RESP_INVALID: terminate the fault - * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting - * page faults if possible. - * - * Return 0 if the fault handler was installed successfully, or an error. - */ -int iommu_register_device_fault_handler(struct device *dev, - iommu_dev_fault_handler_t handler, - void *data) -{ - struct dev_iommu *param = dev->iommu; - int ret = 0; - - if (!param) - return -EINVAL; - - mutex_lock(¶m->lock); - /* Only allow one fault handler registered for each device */ - if (param->fault_param) { - ret = -EBUSY; - goto done_unlock; - } - - get_device(dev); - param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); - if (!param->fault_param) { - put_device(dev); - ret = -ENOMEM; - goto done_unlock; - } - param->fault_param->handler = handler; - param->fault_param->data = data; - mutex_init(¶m->fault_param->lock); - INIT_LIST_HEAD(¶m->fault_param->faults); - -done_unlock: - mutex_unlock(¶m->lock); - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); - -/** - * iommu_unregister_device_fault_handler() - Unregister the device fault handler - * @dev: the device - * - * Remove the device fault handler installed with - * iommu_register_device_fault_handler(). - * - * Return 0 on success, or an error. - */ -int iommu_unregister_device_fault_handler(struct device *dev) -{ - struct dev_iommu *param = dev->iommu; - int ret = 0; - - if (!param) - return -EINVAL; - - mutex_lock(¶m->lock); - - if (!param->fault_param) - goto unlock; - - /* we cannot unregister handler if there are pending faults */ - if (!list_empty(¶m->fault_param->faults)) { - ret = -EBUSY; - goto unlock; - } - - kfree(param->fault_param); - param->fault_param = NULL; - put_device(dev); -unlock: - mutex_unlock(¶m->lock); - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); - -/** - * iommu_report_device_fault() - Report fault event to device driver - * @dev: the device - * @evt: fault event data - * - * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ - * handler. When this function fails and the fault is recoverable, it is the - * caller's responsibility to complete the fault. - * - * Return 0 on success, or an error. - */ -int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) -{ - struct dev_iommu *param = dev->iommu; - struct iommu_fault_event *evt_pending = NULL; - struct iommu_fault_param *fparam; - int ret = 0; - - if (!param || !evt) - return -EINVAL; - - /* we only report device fault if there is a handler registered */ - mutex_lock(¶m->lock); - fparam = param->fault_param; - if (!fparam || !fparam->handler) { - ret = -EINVAL; - goto done_unlock; - } - - if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && - (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { - evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), - GFP_KERNEL); - if (!evt_pending) { - ret = -ENOMEM; - goto done_unlock; - } - mutex_lock(&fparam->lock); - list_add_tail(&evt_pending->list, &fparam->faults); - mutex_unlock(&fparam->lock); - } - - ret = fparam->handler(&evt->fault, fparam->data); - if (ret && evt_pending) { - mutex_lock(&fparam->lock); - list_del(&evt_pending->list); - mutex_unlock(&fparam->lock); - kfree(evt_pending); - } -done_unlock: - mutex_unlock(¶m->lock); - return ret; -} -EXPORT_SYMBOL_GPL(iommu_report_device_fault); - -int iommu_page_response(struct device *dev, - struct iommu_page_response *msg) -{ - bool needs_pasid; - int ret = -EINVAL; - struct iommu_fault_event *evt; - struct iommu_fault_page_request *prm; - struct dev_iommu *param = dev->iommu; - const struct iommu_ops *ops = dev_iommu_ops(dev); - bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; - - if (!ops->page_response) - return -ENODEV; - - if (!param || !param->fault_param) - return -EINVAL; - - if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || - msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) - return -EINVAL; - - /* Only send response if there is a fault report pending */ - mutex_lock(¶m->fault_param->lock); - if (list_empty(¶m->fault_param->faults)) { - dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); - goto done_unlock; - } - /* - * Check if we have a matching page request pending to respond, - * otherwise return -EINVAL - */ - list_for_each_entry(evt, ¶m->fault_param->faults, list) { - prm = &evt->fault.prm; - if (prm->grpid != msg->grpid) - continue; - - /* - * If the PASID is required, the corresponding request is - * matched using the group ID, the PASID valid bit and the PASID - * value. Otherwise only the group ID matches request and - * response. - */ - needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; - if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) - continue; - - if (!needs_pasid && has_pasid) { - /* No big deal, just clear it. */ - msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; - msg->pasid = 0; - } - - ret = ops->page_response(dev, evt, msg); - list_del(&evt->list); - kfree(evt); - break; - } - -done_unlock: - mutex_unlock(¶m->fault_param->lock); - return ret; -} -EXPORT_SYMBOL_GPL(iommu_page_response); - /** * iommu_group_id - Return ID for a group * @group: the group to ID @@ -1727,26 +1580,12 @@ struct iommu_group *fsl_mc_device_group(struct device *dev) } EXPORT_SYMBOL_GPL(fsl_mc_device_group); -static int iommu_get_def_domain_type(struct device *dev) -{ - const struct iommu_ops *ops = dev_iommu_ops(dev); - - if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) - return IOMMU_DOMAIN_DMA; - - if (ops->def_domain_type) - return ops->def_domain_type(dev); - - return 0; -} - static struct iommu_domain * -__iommu_group_alloc_default_domain(const struct bus_type *bus, - struct iommu_group *group, int req_type) +__iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) { if (group->default_domain && group->default_domain->type == req_type) return group->default_domain; - return __iommu_domain_alloc(bus, req_type); + return __iommu_group_domain_alloc(group, req_type); } /* @@ -1756,27 +1595,36 @@ __iommu_group_alloc_default_domain(const struct bus_type *bus, static struct iommu_domain * iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) { - const struct bus_type *bus = - list_first_entry(&group->devices, struct group_device, list) - ->dev->bus; + const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group)); struct iommu_domain *dom; lockdep_assert_held(&group->mutex); + /* + * Allow legacy drivers to specify the domain that will be the default + * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM + * domain. Do not use in new drivers. + */ + if (ops->default_domain) { + if (req_type != ops->default_domain->type) + return NULL; + return ops->default_domain; + } + if (req_type) - return __iommu_group_alloc_default_domain(bus, group, req_type); + return __iommu_group_alloc_default_domain(group, req_type); /* The driver gave no guidance on what type to use, try the default */ - dom = __iommu_group_alloc_default_domain(bus, group, iommu_def_domain_type); - if (dom) + dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); + if (!IS_ERR(dom)) return dom; /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) - return NULL; - dom = __iommu_group_alloc_default_domain(bus, group, IOMMU_DOMAIN_DMA); - if (!dom) - return NULL; + return ERR_PTR(-EINVAL); + dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); + if (IS_ERR(dom)) + return dom; pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", iommu_def_domain_type, group->name); @@ -1820,40 +1668,117 @@ static int iommu_bus_notifier(struct notifier_block *nb, return 0; } -/* A target_type of 0 will select the best domain type and cannot fail */ +/* + * Combine the driver's chosen def_domain_type across all the devices in a + * group. Drivers must give a consistent result. + */ +static int iommu_get_def_domain_type(struct iommu_group *group, + struct device *dev, int cur_type) +{ + const struct iommu_ops *ops = dev_iommu_ops(dev); + int type; + + if (ops->default_domain) { + /* + * Drivers that declare a global static default_domain will + * always choose that. + */ + type = ops->default_domain->type; + } else { + if (ops->def_domain_type) + type = ops->def_domain_type(dev); + else + return cur_type; + } + if (!type || cur_type == type) + return cur_type; + if (!cur_type) + return type; + + dev_err_ratelimited( + dev, + "IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n", + iommu_domain_type_str(cur_type), iommu_domain_type_str(type), + group->id); + + /* + * Try to recover, drivers are allowed to force IDENITY or DMA, IDENTITY + * takes precedence. + */ + if (type == IOMMU_DOMAIN_IDENTITY) + return type; + return cur_type; +} + +/* + * A target_type of 0 will select the best domain type. 0 can be returned in + * this case meaning the global default should be used. + */ static int iommu_get_default_domain_type(struct iommu_group *group, int target_type) { - int best_type = target_type; + struct device *untrusted = NULL; struct group_device *gdev; - struct device *last_dev; + int driver_type = 0; lockdep_assert_held(&group->mutex); + /* + * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an + * identity_domain and it will automatically become their default + * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain. + * Override the selection to IDENTITY. + */ + if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { + static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) && + IS_ENABLED(CONFIG_IOMMU_DMA))); + driver_type = IOMMU_DOMAIN_IDENTITY; + } + for_each_group_device(group, gdev) { - unsigned int type = iommu_get_def_domain_type(gdev->dev); - - if (best_type && type && best_type != type) { - if (target_type) { - dev_err_ratelimited( - gdev->dev, - "Device cannot be in %s domain\n", - iommu_domain_type_str(target_type)); + driver_type = iommu_get_def_domain_type(group, gdev->dev, + driver_type); + + if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) { + /* + * No ARM32 using systems will set untrusted, it cannot + * work. + */ + if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))) return -1; - } + untrusted = gdev->dev; + } + } - dev_warn( - gdev->dev, - "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", - iommu_domain_type_str(type), dev_name(last_dev), - iommu_domain_type_str(best_type)); - return 0; + /* + * If the common dma ops are not selected in kconfig then we cannot use + * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been + * selected. + */ + if (!IS_ENABLED(CONFIG_IOMMU_DMA)) { + if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA)) + return -1; + if (!driver_type) + driver_type = IOMMU_DOMAIN_IDENTITY; + } + + if (untrusted) { + if (driver_type && driver_type != IOMMU_DOMAIN_DMA) { + dev_err_ratelimited( + untrusted, + "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n", + group->id, iommu_domain_type_str(driver_type)); + return -1; } - if (!best_type) - best_type = type; - last_dev = gdev->dev; + driver_type = IOMMU_DOMAIN_DMA; + } + + if (target_type) { + if (driver_type && target_type != driver_type) + return -1; + return target_type; } - return best_type; + return driver_type; } static void iommu_group_do_probe_finalize(struct device *dev) @@ -1907,9 +1832,28 @@ int bus_iommu_probe(const struct bus_type *bus) return 0; } +/** + * iommu_present() - make platform-specific assumptions about an IOMMU + * @bus: bus to check + * + * Do not use this function. You want device_iommu_mapped() instead. + * + * Return: true if some IOMMU is present and aware of devices on the given bus; + * in general it may not be the only IOMMU, and it may not have anything to do + * with whatever device you are ultimately interested in. + */ bool iommu_present(const struct bus_type *bus) { - return bus->iommu_ops != NULL; + bool ret = false; + + for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { + if (iommu_buses[i] == bus) { + spin_lock(&iommu_device_lock); + ret = !list_empty(&iommu_device_list); + spin_unlock(&iommu_device_lock); + } + } + return ret; } EXPORT_SYMBOL_GPL(iommu_present); @@ -1925,7 +1869,7 @@ bool device_iommu_capable(struct device *dev, enum iommu_cap cap) { const struct iommu_ops *ops; - if (!dev->iommu || !dev->iommu->iommu_dev) + if (!dev_has_iommu(dev)) return false; ops = dev_iommu_ops(dev); @@ -1982,40 +1926,91 @@ void iommu_set_fault_handler(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_set_fault_handler); -static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, - unsigned type) +static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops, + struct device *dev, + unsigned int type) { struct iommu_domain *domain; unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS; - if (bus == NULL || bus->iommu_ops == NULL) - return NULL; + if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain) + return ops->identity_domain; + else if (alloc_type == IOMMU_DOMAIN_BLOCKED && ops->blocked_domain) + return ops->blocked_domain; + else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging) + domain = ops->domain_alloc_paging(dev); + else if (ops->domain_alloc) + domain = ops->domain_alloc(alloc_type); + else + return ERR_PTR(-EOPNOTSUPP); - domain = bus->iommu_ops->domain_alloc(alloc_type); + /* + * Many domain_alloc ops now return ERR_PTR, make things easier for the + * driver by accepting ERR_PTR from all domain_alloc ops instead of + * having two rules. + */ + if (IS_ERR(domain)) + return domain; if (!domain) - return NULL; + return ERR_PTR(-ENOMEM); domain->type = type; + domain->owner = ops; /* * If not already set, assume all sizes by default; the driver * may override this later */ if (!domain->pgsize_bitmap) - domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; + domain->pgsize_bitmap = ops->pgsize_bitmap; if (!domain->ops) - domain->ops = bus->iommu_ops->default_domain_ops; + domain->ops = ops->default_domain_ops; + + if (iommu_is_dma_domain(domain)) { + int rc; - if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { - iommu_domain_free(domain); - domain = NULL; + rc = iommu_get_dma_cookie(domain); + if (rc) { + iommu_domain_free(domain); + return ERR_PTR(rc); + } } return domain; } +static struct iommu_domain * +__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) +{ + struct device *dev = iommu_group_first_dev(group); + + return __iommu_domain_alloc(dev_iommu_ops(dev), dev, type); +} + +static int __iommu_domain_alloc_dev(struct device *dev, void *data) +{ + const struct iommu_ops **ops = data; + + if (!dev_has_iommu(dev)) + return 0; + + if (WARN_ONCE(*ops && *ops != dev_iommu_ops(dev), + "Multiple IOMMU drivers present for bus %s, which the public IOMMU API can't fully support yet. You will still need to disable one or more for this to work, sorry!\n", + dev_bus_name(dev))) + return -EBUSY; + + *ops = dev_iommu_ops(dev); + return 0; +} + struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) { - return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); + const struct iommu_ops *ops = NULL; + int err = bus_for_each_dev(bus, NULL, &ops, __iommu_domain_alloc_dev); + + if (err || !ops) + return NULL; + + return __iommu_domain_alloc(ops, NULL, IOMMU_DOMAIN_UNMANAGED); } EXPORT_SYMBOL_GPL(iommu_domain_alloc); @@ -2024,7 +2019,8 @@ void iommu_domain_free(struct iommu_domain *domain) if (domain->type == IOMMU_DOMAIN_SVA) mmdrop(domain->mm); iommu_put_dma_cookie(domain); - domain->ops->free(domain); + if (domain->ops->free) + domain->ops->free(domain); } EXPORT_SYMBOL_GPL(iommu_domain_free); @@ -2157,10 +2153,16 @@ struct iommu_domain *iommu_get_dma_domain(struct device *dev) static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { + struct device *dev; + if (group->domain && group->domain != group->default_domain && group->domain != group->blocking_domain) return -EBUSY; + dev = iommu_group_first_dev(group); + if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner) + return -EINVAL; + return __iommu_group_set_domain(group, domain); } @@ -2918,8 +2920,8 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); */ int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) { - if (dev->iommu && dev->iommu->iommu_dev) { - const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; + if (dev_has_iommu(dev)) { + const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->dev_enable_feat) return ops->dev_enable_feat(dev, feat); @@ -2934,8 +2936,8 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); */ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) { - if (dev->iommu && dev->iommu->iommu_dev) { - const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; + if (dev_has_iommu(dev)) { + const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->dev_disable_feat) return ops->dev_disable_feat(dev, feat); @@ -2971,21 +2973,9 @@ static int iommu_setup_default_domain(struct iommu_group *group, if (req_type < 0) return -EINVAL; - /* - * There are still some drivers which don't support default domains, so - * we ignore the failure and leave group->default_domain NULL. - * - * We assume that the iommu driver starts up the device in - * 'set_platform_dma_ops' mode if it does not support default domains. - */ dom = iommu_group_alloc_default_domain(group, req_type); - if (!dom) { - /* Once in default_domain mode we never leave */ - if (group->default_domain) - return -ENODEV; - group->default_domain = NULL; - return 0; - } + if (IS_ERR(dom)) + return PTR_ERR(dom); if (group->default_domain == dom) return 0; @@ -3211,24 +3201,23 @@ void iommu_device_unuse_default_domain(struct device *dev) static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) { - struct group_device *dev = - list_first_entry(&group->devices, struct group_device, list); + struct iommu_domain *domain; if (group->blocking_domain) return 0; - group->blocking_domain = - __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); - if (!group->blocking_domain) { + domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED); + if (IS_ERR(domain)) { /* * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED * create an empty domain instead. */ - group->blocking_domain = __iommu_domain_alloc( - dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); - if (!group->blocking_domain) - return -EINVAL; + domain = __iommu_group_domain_alloc(group, + IOMMU_DOMAIN_UNMANAGED); + if (IS_ERR(domain)) + return PTR_ERR(domain); } + group->blocking_domain = domain; return 0; } @@ -3407,20 +3396,21 @@ static int __iommu_set_group_pasid(struct iommu_domain *domain, if (device == last_gdev) break; - ops->remove_dev_pasid(device->dev, pasid); + ops->remove_dev_pasid(device->dev, pasid, domain); } return ret; } static void __iommu_remove_group_pasid(struct iommu_group *group, - ioasid_t pasid) + ioasid_t pasid, + struct iommu_domain *domain) { struct group_device *device; const struct iommu_ops *ops; for_each_group_device(group, device) { ops = dev_iommu_ops(device->dev); - ops->remove_dev_pasid(device->dev, pasid); + ops->remove_dev_pasid(device->dev, pasid, domain); } } @@ -3429,14 +3419,15 @@ static void __iommu_remove_group_pasid(struct iommu_group *group, * @domain: the iommu domain. * @dev: the attached device. * @pasid: the pasid of the device. + * @handle: the attach handle. * * Return: 0 on success, or an error. */ int iommu_attach_device_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid) + struct device *dev, ioasid_t pasid, + struct iommu_attach_handle *handle) { struct iommu_group *group; - void *curr; int ret; if (!domain->ops->set_dev_pasid) @@ -3446,12 +3437,17 @@ int iommu_attach_device_pasid(struct iommu_domain *domain, if (!group) return -ENODEV; + if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner) + return -EINVAL; + mutex_lock(&group->mutex); - curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); - if (curr) { - ret = xa_err(curr) ? : -EBUSY; + + if (handle) + handle->domain = domain; + + ret = xa_insert(&group->pasid_array, pasid, handle, GFP_KERNEL); + if (ret) goto out_unlock; - } ret = __iommu_set_group_pasid(domain, group, pasid); if (ret) @@ -3479,69 +3475,14 @@ void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, struct iommu_group *group = iommu_group_get(dev); mutex_lock(&group->mutex); - __iommu_remove_group_pasid(group, pasid); - WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); + __iommu_remove_group_pasid(group, pasid, domain); + xa_erase(&group->pasid_array, pasid); mutex_unlock(&group->mutex); iommu_group_put(group); } EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); -/* - * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev - * @dev: the queried device - * @pasid: the pasid of the device - * @type: matched domain type, 0 for any match - * - * This is a variant of iommu_get_domain_for_dev(). It returns the existing - * domain attached to pasid of a device. Callers must hold a lock around this - * function, and both iommu_attach/detach_dev_pasid() whenever a domain of - * type is being manipulated. This API does not internally resolve races with - * attach/detach. - * - * Return: attached domain on success, NULL otherwise. - */ -struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, - ioasid_t pasid, - unsigned int type) -{ - struct iommu_domain *domain; - struct iommu_group *group; - - group = iommu_group_get(dev); - if (!group) - return NULL; - - xa_lock(&group->pasid_array); - domain = xa_load(&group->pasid_array, pasid); - if (type && domain && domain->type != type) - domain = ERR_PTR(-EBUSY); - xa_unlock(&group->pasid_array); - iommu_group_put(group); - - return domain; -} -EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); - -struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, - struct mm_struct *mm) -{ - const struct iommu_ops *ops = dev_iommu_ops(dev); - struct iommu_domain *domain; - - domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); - if (!domain) - return NULL; - - domain->type = IOMMU_DOMAIN_SVA; - mmgrab(mm); - domain->mm = mm; - domain->iopf_handler = iommu_sva_handle_iopf; - domain->fault_data = mm; - - return domain; -} - ioasid_t iommu_alloc_global_pasid(struct device *dev) { int ret; @@ -3568,3 +3509,137 @@ void iommu_free_global_pasid(ioasid_t pasid) ida_free(&iommu_global_pasid_ida, pasid); } EXPORT_SYMBOL_GPL(iommu_free_global_pasid); + +/** + * iommu_attach_handle_get - Return the attach handle + * @group: the iommu group that domain was attached to + * @pasid: the pasid within the group + * @type: matched domain type, 0 for any match + * + * Return handle or ERR_PTR(-ENOENT) on none, ERR_PTR(-EBUSY) on mismatch. + * + * Return the attach handle to the caller. The life cycle of an iommu attach + * handle is from the time when the domain is attached to the time when the + * domain is detached. Callers are required to synchronize the call of + * iommu_attach_handle_get() with domain attachment and detachment. The attach + * handle can only be used during its life cycle. + */ +struct iommu_attach_handle * +iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type) +{ + struct iommu_attach_handle *handle; + + xa_lock(&group->pasid_array); + handle = xa_load(&group->pasid_array, pasid); + if (!handle) + handle = ERR_PTR(-ENOENT); + else if (type && handle->domain->type != type) + handle = ERR_PTR(-EBUSY); + xa_unlock(&group->pasid_array); + + return handle; +} +EXPORT_SYMBOL_NS_GPL(iommu_attach_handle_get, IOMMUFD_INTERNAL); + +/** + * iommu_attach_group_handle - Attach an IOMMU domain to an IOMMU group + * @domain: IOMMU domain to attach + * @group: IOMMU group that will be attached + * @handle: attach handle + * + * Returns 0 on success and error code on failure. + * + * This is a variant of iommu_attach_group(). It allows the caller to provide + * an attach handle and use it when the domain is attached. This is currently + * used by IOMMUFD to deliver the I/O page faults. + */ +int iommu_attach_group_handle(struct iommu_domain *domain, + struct iommu_group *group, + struct iommu_attach_handle *handle) +{ + int ret; + + if (handle) + handle->domain = domain; + + mutex_lock(&group->mutex); + ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); + if (ret) + goto err_unlock; + + ret = __iommu_attach_group(domain, group); + if (ret) + goto err_erase; + mutex_unlock(&group->mutex); + + return 0; +err_erase: + xa_erase(&group->pasid_array, IOMMU_NO_PASID); +err_unlock: + mutex_unlock(&group->mutex); + return ret; +} +EXPORT_SYMBOL_NS_GPL(iommu_attach_group_handle, IOMMUFD_INTERNAL); + +/** + * iommu_detach_group_handle - Detach an IOMMU domain from an IOMMU group + * @domain: IOMMU domain to attach + * @group: IOMMU group that will be attached + * + * Detach the specified IOMMU domain from the specified IOMMU group. + * It must be used in conjunction with iommu_attach_group_handle(). + */ +void iommu_detach_group_handle(struct iommu_domain *domain, + struct iommu_group *group) +{ + mutex_lock(&group->mutex); + __iommu_group_set_core_domain(group); + xa_erase(&group->pasid_array, IOMMU_NO_PASID); + mutex_unlock(&group->mutex); +} +EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, IOMMUFD_INTERNAL); + +/** + * iommu_replace_group_handle - replace the domain that a group is attached to + * @group: IOMMU group that will be attached to the new domain + * @new_domain: new IOMMU domain to replace with + * @handle: attach handle + * + * This is a variant of iommu_group_replace_domain(). It allows the caller to + * provide an attach handle for the new domain and use it when the domain is + * attached. + */ +int iommu_replace_group_handle(struct iommu_group *group, + struct iommu_domain *new_domain, + struct iommu_attach_handle *handle) +{ + void *curr; + int ret; + + if (!new_domain) + return -EINVAL; + + mutex_lock(&group->mutex); + if (handle) { + ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL); + if (ret) + goto err_unlock; + } + + ret = __iommu_group_set_domain(group, new_domain); + if (ret) + goto err_release; + + curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); + WARN_ON(xa_is_err(curr)); + + mutex_unlock(&group->mutex); + + return 0; +err_release: + xa_release(&group->pasid_array, IOMMU_NO_PASID); +err_unlock: + mutex_unlock(&group->mutex); + return ret; +} +EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, IOMMUFD_INTERNAL); diff --git a/drivers/iommu/iommufd/Makefile b/drivers/iommu/iommufd/Makefile index 8aeba81800c5..cf4605962bea 100644 --- a/drivers/iommu/iommufd/Makefile +++ b/drivers/iommu/iommufd/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only iommufd-y := \ device.o \ + fault.o \ hw_pagetable.o \ io_pagetable.o \ ioas.o \ @@ -11,3 +12,4 @@ iommufd-y := \ iommufd-$(CONFIG_IOMMUFD_TEST) += selftest.o obj-$(CONFIG_IOMMUFD) += iommufd.o +obj-$(CONFIG_IOMMUFD_DRIVER) += iova_bitmap.o diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c index ff6386fd1e8f..21021a2cecba 100644 --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -215,6 +215,7 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx, refcount_inc(&idev->obj.users); /* igroup refcount moves into iommufd_device */ idev->igroup = igroup; + mutex_init(&idev->iopf_lock); /* * If the caller fails after this success it must call @@ -293,7 +294,7 @@ u32 iommufd_device_to_id(struct iommufd_device *idev) EXPORT_SYMBOL_NS_GPL(iommufd_device_to_id, IOMMUFD); static int iommufd_group_setup_msi(struct iommufd_group *igroup, - struct iommufd_hw_pagetable *hwpt) + struct iommufd_hwpt_paging *hwpt_paging) { phys_addr_t sw_msi_start = igroup->sw_msi_start; int rc; @@ -311,8 +312,9 @@ static int iommufd_group_setup_msi(struct iommufd_group *igroup, * matches what the IRQ layer actually expects in a newly created * domain. */ - if (sw_msi_start != PHYS_ADDR_MAX && !hwpt->msi_cookie) { - rc = iommu_get_msi_cookie(hwpt->domain, sw_msi_start); + if (sw_msi_start != PHYS_ADDR_MAX && !hwpt_paging->msi_cookie) { + rc = iommu_get_msi_cookie(hwpt_paging->common.domain, + sw_msi_start); if (rc) return rc; @@ -320,7 +322,31 @@ static int iommufd_group_setup_msi(struct iommufd_group *igroup, * iommu_get_msi_cookie() can only be called once per domain, * it returns -EBUSY on later calls. */ - hwpt->msi_cookie = true; + hwpt_paging->msi_cookie = true; + } + return 0; +} + +static int iommufd_hwpt_paging_attach(struct iommufd_hwpt_paging *hwpt_paging, + struct iommufd_device *idev) +{ + int rc; + + lockdep_assert_held(&idev->igroup->lock); + + rc = iopt_table_enforce_dev_resv_regions(&hwpt_paging->ioas->iopt, + idev->dev, + &idev->igroup->sw_msi_start); + if (rc) + return rc; + + if (list_empty(&idev->igroup->device_list)) { + rc = iommufd_group_setup_msi(idev->igroup, hwpt_paging); + if (rc) { + iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, + idev->dev); + return rc; + } } return 0; } @@ -337,18 +363,12 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, goto err_unlock; } - /* Try to upgrade the domain we have */ - if (idev->enforce_cache_coherency) { - rc = iommufd_hw_pagetable_enforce_cc(hwpt); + if (hwpt_is_paging(hwpt)) { + rc = iommufd_hwpt_paging_attach(to_hwpt_paging(hwpt), idev); if (rc) goto err_unlock; } - rc = iopt_table_enforce_dev_resv_regions(&hwpt->ioas->iopt, idev->dev, - &idev->igroup->sw_msi_start); - if (rc) - goto err_unlock; - /* * Only attach to the group once for the first device that is in the * group. All the other devices will follow this attachment. The user @@ -357,11 +377,7 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, * attachment. */ if (list_empty(&idev->igroup->device_list)) { - rc = iommufd_group_setup_msi(idev->igroup, hwpt); - if (rc) - goto err_unresv; - - rc = iommu_attach_group(hwpt->domain, idev->igroup->group); + rc = iommufd_hwpt_attach_device(hwpt, idev); if (rc) goto err_unresv; idev->igroup->hwpt = hwpt; @@ -371,7 +387,9 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, mutex_unlock(&idev->igroup->lock); return 0; err_unresv: - iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev); + if (hwpt_is_paging(hwpt)) + iopt_remove_reserved_iova(&to_hwpt_paging(hwpt)->ioas->iopt, + idev->dev); err_unlock: mutex_unlock(&idev->igroup->lock); return rc; @@ -385,10 +403,12 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev) mutex_lock(&idev->igroup->lock); list_del(&idev->group_item); if (list_empty(&idev->igroup->device_list)) { - iommu_detach_group(hwpt->domain, idev->igroup->group); + iommufd_hwpt_detach_device(hwpt, idev); idev->igroup->hwpt = NULL; } - iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev); + if (hwpt_is_paging(hwpt)) + iopt_remove_reserved_iova(&to_hwpt_paging(hwpt)->ioas->iopt, + idev->dev); mutex_unlock(&idev->igroup->lock); /* Caller must destroy hwpt */ @@ -418,14 +438,55 @@ static bool iommufd_device_is_attached(struct iommufd_device *idev) return false; } +static void +iommufd_group_remove_reserved_iova(struct iommufd_group *igroup, + struct iommufd_hwpt_paging *hwpt_paging) +{ + struct iommufd_device *cur; + + lockdep_assert_held(&igroup->lock); + + list_for_each_entry(cur, &igroup->device_list, group_item) + iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, cur->dev); +} + +static int +iommufd_group_do_replace_paging(struct iommufd_group *igroup, + struct iommufd_hwpt_paging *hwpt_paging) +{ + struct iommufd_hw_pagetable *old_hwpt = igroup->hwpt; + struct iommufd_device *cur; + int rc; + + lockdep_assert_held(&igroup->lock); + + if (!hwpt_is_paging(old_hwpt) || + hwpt_paging->ioas != to_hwpt_paging(old_hwpt)->ioas) { + list_for_each_entry(cur, &igroup->device_list, group_item) { + rc = iopt_table_enforce_dev_resv_regions( + &hwpt_paging->ioas->iopt, cur->dev, NULL); + if (rc) + goto err_unresv; + } + } + + rc = iommufd_group_setup_msi(igroup, hwpt_paging); + if (rc) + goto err_unresv; + return 0; + +err_unresv: + iommufd_group_remove_reserved_iova(igroup, hwpt_paging); + return rc; +} + static struct iommufd_hw_pagetable * iommufd_device_do_replace(struct iommufd_device *idev, struct iommufd_hw_pagetable *hwpt) { struct iommufd_group *igroup = idev->igroup; struct iommufd_hw_pagetable *old_hwpt; - unsigned int num_devices = 0; - struct iommufd_device *cur; + unsigned int num_devices; int rc; mutex_lock(&idev->igroup->lock); @@ -445,42 +506,27 @@ iommufd_device_do_replace(struct iommufd_device *idev, return NULL; } - /* Try to upgrade the domain we have */ - list_for_each_entry(cur, &igroup->device_list, group_item) { - num_devices++; - if (cur->enforce_cache_coherency) { - rc = iommufd_hw_pagetable_enforce_cc(hwpt); - if (rc) - goto err_unlock; - } - } - old_hwpt = igroup->hwpt; - if (hwpt->ioas != old_hwpt->ioas) { - list_for_each_entry(cur, &igroup->device_list, group_item) { - rc = iopt_table_enforce_dev_resv_regions( - &hwpt->ioas->iopt, cur->dev, NULL); - if (rc) - goto err_unresv; - } + if (hwpt_is_paging(hwpt)) { + rc = iommufd_group_do_replace_paging(igroup, + to_hwpt_paging(hwpt)); + if (rc) + goto err_unlock; } - rc = iommufd_group_setup_msi(idev->igroup, hwpt); - if (rc) - goto err_unresv; - - rc = iommu_group_replace_domain(igroup->group, hwpt->domain); + rc = iommufd_hwpt_replace_device(idev, hwpt, old_hwpt); if (rc) goto err_unresv; - if (hwpt->ioas != old_hwpt->ioas) { - list_for_each_entry(cur, &igroup->device_list, group_item) - iopt_remove_reserved_iova(&old_hwpt->ioas->iopt, - cur->dev); - } + if (hwpt_is_paging(old_hwpt) && + (!hwpt_is_paging(hwpt) || + to_hwpt_paging(hwpt)->ioas != to_hwpt_paging(old_hwpt)->ioas)) + iommufd_group_remove_reserved_iova(igroup, + to_hwpt_paging(old_hwpt)); igroup->hwpt = hwpt; + num_devices = list_count_nodes(&igroup->device_list); /* * Move the refcounts held by the device_list to the new hwpt. Retain a * refcount for this thread as the caller will free it. @@ -494,8 +540,9 @@ iommufd_device_do_replace(struct iommufd_device *idev, /* Caller must destroy old_hwpt */ return old_hwpt; err_unresv: - list_for_each_entry(cur, &igroup->device_list, group_item) - iopt_remove_reserved_iova(&hwpt->ioas->iopt, cur->dev); + if (hwpt_is_paging(hwpt)) + iommufd_group_remove_reserved_iova(igroup, + to_hwpt_paging(hwpt)); err_unlock: mutex_unlock(&idev->igroup->lock); return ERR_PTR(rc); @@ -523,6 +570,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev, */ bool immediate_attach = do_attach == iommufd_device_do_attach; struct iommufd_hw_pagetable *destroy_hwpt; + struct iommufd_hwpt_paging *hwpt_paging; struct iommufd_hw_pagetable *hwpt; /* @@ -531,15 +579,16 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev, * other. */ mutex_lock(&ioas->mutex); - list_for_each_entry(hwpt, &ioas->hwpt_list, hwpt_item) { - if (!hwpt->auto_domain) + list_for_each_entry(hwpt_paging, &ioas->hwpt_list, hwpt_item) { + if (!hwpt_paging->auto_domain) continue; + hwpt = &hwpt_paging->common; if (!iommufd_lock_obj(&hwpt->obj)) continue; destroy_hwpt = (*do_attach)(idev, hwpt); if (IS_ERR(destroy_hwpt)) { - iommufd_put_object(&hwpt->obj); + iommufd_put_object(idev->ictx, &hwpt->obj); /* * -EINVAL means the domain is incompatible with the * device. Other error codes should propagate to @@ -551,16 +600,17 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev, goto out_unlock; } *pt_id = hwpt->obj.id; - iommufd_put_object(&hwpt->obj); + iommufd_put_object(idev->ictx, &hwpt->obj); goto out_unlock; } - hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev, - immediate_attach); - if (IS_ERR(hwpt)) { - destroy_hwpt = ERR_CAST(hwpt); + hwpt_paging = iommufd_hwpt_paging_alloc(idev->ictx, ioas, idev, 0, + immediate_attach, NULL); + if (IS_ERR(hwpt_paging)) { + destroy_hwpt = ERR_CAST(hwpt_paging); goto out_unlock; } + hwpt = &hwpt_paging->common; if (!immediate_attach) { destroy_hwpt = (*do_attach)(idev, hwpt); @@ -570,7 +620,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev, destroy_hwpt = NULL; } - hwpt->auto_domain = true; + hwpt_paging->auto_domain = true; *pt_id = hwpt->obj.id; iommufd_object_finalize(idev->ictx, &hwpt->obj); @@ -595,7 +645,8 @@ static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id, return PTR_ERR(pt_obj); switch (pt_obj->type) { - case IOMMUFD_OBJ_HW_PAGETABLE: { + case IOMMUFD_OBJ_HWPT_NESTED: + case IOMMUFD_OBJ_HWPT_PAGING: { struct iommufd_hw_pagetable *hwpt = container_of(pt_obj, struct iommufd_hw_pagetable, obj); @@ -618,7 +669,7 @@ static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id, destroy_hwpt = ERR_PTR(-EINVAL); goto out_put_pt_obj; } - iommufd_put_object(pt_obj); + iommufd_put_object(idev->ictx, pt_obj); /* This destruction has to be after we unlock everything */ if (destroy_hwpt) @@ -626,15 +677,15 @@ static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id, return 0; out_put_pt_obj: - iommufd_put_object(pt_obj); + iommufd_put_object(idev->ictx, pt_obj); return PTR_ERR(destroy_hwpt); } /** * iommufd_device_attach - Connect a device to an iommu_domain * @idev: device to attach - * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE - * Output the IOMMUFD_OBJ_HW_PAGETABLE ID + * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HWPT_PAGING + * Output the IOMMUFD_OBJ_HWPT_PAGING ID * * This connects the device to an iommu_domain, either automatically or manually * selected. Once this completes the device could do DMA. @@ -662,8 +713,8 @@ EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, IOMMUFD); /** * iommufd_device_replace - Change the device's iommu_domain * @idev: device to change - * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE - * Output the IOMMUFD_OBJ_HW_PAGETABLE ID + * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HWPT_PAGING + * Output the IOMMUFD_OBJ_HWPT_PAGING ID * * This is the same as:: * @@ -758,7 +809,7 @@ static int iommufd_access_change_ioas_id(struct iommufd_access *access, u32 id) if (IS_ERR(ioas)) return PTR_ERR(ioas); rc = iommufd_access_change_ioas(access, ioas); - iommufd_put_object(&ioas->obj); + iommufd_put_object(access->ictx, &ioas->obj); return rc; } @@ -907,7 +958,7 @@ void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova, access->ops->unmap(access->data, iova, length); - iommufd_put_object(&access->obj); + iommufd_put_object(access->ictx, &access->obj); xa_lock(&ioas->iopt.access_list); } xa_unlock(&ioas->iopt.access_list); @@ -1201,10 +1252,14 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd) */ cmd->data_len = data_len; + cmd->out_capabilities = 0; + if (device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING)) + cmd->out_capabilities |= IOMMU_HW_CAP_DIRTY_TRACKING; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); out_free: kfree(data); out_put: - iommufd_put_object(&idev->obj); + iommufd_put_object(ucmd->ictx, &idev->obj); return rc; } diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c new file mode 100644 index 000000000000..54d6cd20a673 --- /dev/null +++ b/drivers/iommu/iommufd/fault.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation + */ +#define pr_fmt(fmt) "iommufd: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../iommu-priv.h" +#include "iommufd_private.h" + +static int iommufd_fault_iopf_enable(struct iommufd_device *idev) +{ + struct device *dev = idev->dev; + int ret; + + /* + * Once we turn on PCI/PRI support for VF, the response failure code + * should not be forwarded to the hardware due to PRI being a shared + * resource between PF and VFs. There is no coordination for this + * shared capability. This waits for a vPRI reset to recover. + */ + if (dev_is_pci(dev) && to_pci_dev(dev)->is_virtfn) + return -EINVAL; + + mutex_lock(&idev->iopf_lock); + /* Device iopf has already been on. */ + if (++idev->iopf_enabled > 1) { + mutex_unlock(&idev->iopf_lock); + return 0; + } + + ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF); + if (ret) + --idev->iopf_enabled; + mutex_unlock(&idev->iopf_lock); + + return ret; +} + +static void iommufd_fault_iopf_disable(struct iommufd_device *idev) +{ + mutex_lock(&idev->iopf_lock); + if (!WARN_ON(idev->iopf_enabled == 0)) { + if (--idev->iopf_enabled == 0) + iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF); + } + mutex_unlock(&idev->iopf_lock); +} + +static int __fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + struct iommufd_attach_handle *handle; + int ret; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->idev = idev; + ret = iommu_attach_group_handle(hwpt->domain, idev->igroup->group, + &handle->handle); + if (ret) + kfree(handle); + + return ret; +} + +int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + int ret; + + if (!hwpt->fault) + return -EINVAL; + + ret = iommufd_fault_iopf_enable(idev); + if (ret) + return ret; + + ret = __fault_domain_attach_dev(hwpt, idev); + if (ret) + iommufd_fault_iopf_disable(idev); + + return ret; +} + +static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt, + struct iommufd_attach_handle *handle) +{ + struct iommufd_fault *fault = hwpt->fault; + struct iopf_group *group, *next; + unsigned long index; + + if (!fault) + return; + + mutex_lock(&fault->mutex); + list_for_each_entry_safe(group, next, &fault->deliver, node) { + if (group->attach_handle != &handle->handle) + continue; + list_del(&group->node); + iopf_group_response(group, IOMMU_PAGE_RESP_INVALID); + iopf_free_group(group); + } + + xa_for_each(&fault->response, index, group) { + if (group->attach_handle != &handle->handle) + continue; + xa_erase(&fault->response, index); + iopf_group_response(group, IOMMU_PAGE_RESP_INVALID); + iopf_free_group(group); + } + mutex_unlock(&fault->mutex); +} + +static struct iommufd_attach_handle * +iommufd_device_get_attach_handle(struct iommufd_device *idev) +{ + struct iommu_attach_handle *handle; + + handle = iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0); + if (!handle) + return NULL; + + return to_iommufd_handle(handle); +} + +void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + struct iommufd_attach_handle *handle; + + handle = iommufd_device_get_attach_handle(idev); + iommu_detach_group_handle(hwpt->domain, idev->igroup->group); + iommufd_auto_response_faults(hwpt, handle); + iommufd_fault_iopf_disable(idev); + kfree(handle); +} + +static int __fault_domain_replace_dev(struct iommufd_device *idev, + struct iommufd_hw_pagetable *hwpt, + struct iommufd_hw_pagetable *old) +{ + struct iommufd_attach_handle *handle, *curr = NULL; + int ret; + + if (old->fault) + curr = iommufd_device_get_attach_handle(idev); + + if (hwpt->fault) { + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->handle.domain = hwpt->domain; + handle->idev = idev; + ret = iommu_replace_group_handle(idev->igroup->group, + hwpt->domain, &handle->handle); + } else { + ret = iommu_replace_group_handle(idev->igroup->group, + hwpt->domain, NULL); + } + + if (!ret && curr) { + iommufd_auto_response_faults(old, curr); + kfree(curr); + } + + return ret; +} + +int iommufd_fault_domain_replace_dev(struct iommufd_device *idev, + struct iommufd_hw_pagetable *hwpt, + struct iommufd_hw_pagetable *old) +{ + bool iopf_off = !hwpt->fault && old->fault; + bool iopf_on = hwpt->fault && !old->fault; + int ret; + + if (iopf_on) { + ret = iommufd_fault_iopf_enable(idev); + if (ret) + return ret; + } + + ret = __fault_domain_replace_dev(idev, hwpt, old); + if (ret) { + if (iopf_on) + iommufd_fault_iopf_disable(idev); + return ret; + } + + if (iopf_off) + iommufd_fault_iopf_disable(idev); + + return 0; +} + +void iommufd_fault_destroy(struct iommufd_object *obj) +{ + struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj); + struct iopf_group *group, *next; + + /* + * The iommufd object's reference count is zero at this point. + * We can be confident that no other threads are currently + * accessing this pointer. Therefore, acquiring the mutex here + * is unnecessary. + */ + list_for_each_entry_safe(group, next, &fault->deliver, node) { + list_del(&group->node); + iopf_group_response(group, IOMMU_PAGE_RESP_INVALID); + iopf_free_group(group); + } +} + +static void iommufd_compose_fault_message(struct iommu_fault *fault, + struct iommu_hwpt_pgfault *hwpt_fault, + struct iommufd_device *idev, + u32 cookie) +{ + hwpt_fault->flags = fault->prm.flags; + hwpt_fault->dev_id = idev->obj.id; + hwpt_fault->pasid = fault->prm.pasid; + hwpt_fault->grpid = fault->prm.grpid; + hwpt_fault->perm = fault->prm.perm; + hwpt_fault->addr = fault->prm.addr; + hwpt_fault->length = 0; + hwpt_fault->cookie = cookie; +} + +static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf, + size_t count, loff_t *ppos) +{ + size_t fault_size = sizeof(struct iommu_hwpt_pgfault); + struct iommufd_fault *fault = filep->private_data; + struct iommu_hwpt_pgfault data; + struct iommufd_device *idev; + struct iopf_group *group; + struct iopf_fault *iopf; + size_t done = 0; + int rc = 0; + + if (*ppos || count % fault_size) + return -ESPIPE; + + mutex_lock(&fault->mutex); + while (!list_empty(&fault->deliver) && count > done) { + group = list_first_entry(&fault->deliver, + struct iopf_group, node); + + if (group->fault_count * fault_size > count - done) + break; + + rc = xa_alloc(&fault->response, &group->cookie, group, + xa_limit_32b, GFP_KERNEL); + if (rc) + break; + + idev = to_iommufd_handle(group->attach_handle)->idev; + list_for_each_entry(iopf, &group->faults, list) { + iommufd_compose_fault_message(&iopf->fault, + &data, idev, + group->cookie); + if (copy_to_user(buf + done, &data, fault_size)) { + xa_erase(&fault->response, group->cookie); + rc = -EFAULT; + break; + } + done += fault_size; + } + + list_del(&group->node); + } + mutex_unlock(&fault->mutex); + + return done == 0 ? rc : done; +} + +static ssize_t iommufd_fault_fops_write(struct file *filep, const char __user *buf, + size_t count, loff_t *ppos) +{ + size_t response_size = sizeof(struct iommu_hwpt_page_response); + struct iommufd_fault *fault = filep->private_data; + struct iommu_hwpt_page_response response; + struct iopf_group *group; + size_t done = 0; + int rc = 0; + + if (*ppos || count % response_size) + return -ESPIPE; + + mutex_lock(&fault->mutex); + while (count > done) { + rc = copy_from_user(&response, buf + done, response_size); + if (rc) + break; + + group = xa_erase(&fault->response, response.cookie); + if (!group) { + rc = -EINVAL; + break; + } + + iopf_group_response(group, response.code); + iopf_free_group(group); + done += response_size; + } + mutex_unlock(&fault->mutex); + + return done == 0 ? rc : done; +} + +static __poll_t iommufd_fault_fops_poll(struct file *filep, + struct poll_table_struct *wait) +{ + struct iommufd_fault *fault = filep->private_data; + __poll_t pollflags = EPOLLOUT; + + poll_wait(filep, &fault->wait_queue, wait); + mutex_lock(&fault->mutex); + if (!list_empty(&fault->deliver)) + pollflags |= EPOLLIN | EPOLLRDNORM; + mutex_unlock(&fault->mutex); + + return pollflags; +} + +static int iommufd_fault_fops_release(struct inode *inode, struct file *filep) +{ + struct iommufd_fault *fault = filep->private_data; + + refcount_dec(&fault->obj.users); + iommufd_ctx_put(fault->ictx); + return 0; +} + +static const struct file_operations iommufd_fault_fops = { + .owner = THIS_MODULE, + .open = nonseekable_open, + .read = iommufd_fault_fops_read, + .write = iommufd_fault_fops_write, + .poll = iommufd_fault_fops_poll, + .release = iommufd_fault_fops_release, + .llseek = no_llseek, +}; + +int iommufd_fault_alloc(struct iommufd_ucmd *ucmd) +{ + struct iommu_fault_alloc *cmd = ucmd->cmd; + struct iommufd_fault *fault; + struct file *filep; + int fdno; + int rc; + + if (cmd->flags) + return -EOPNOTSUPP; + + fault = iommufd_object_alloc(ucmd->ictx, fault, IOMMUFD_OBJ_FAULT); + if (IS_ERR(fault)) + return PTR_ERR(fault); + + fault->ictx = ucmd->ictx; + INIT_LIST_HEAD(&fault->deliver); + xa_init_flags(&fault->response, XA_FLAGS_ALLOC1); + mutex_init(&fault->mutex); + init_waitqueue_head(&fault->wait_queue); + + filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops, + fault, O_RDWR); + if (IS_ERR(filep)) { + rc = PTR_ERR(filep); + goto out_abort; + } + + refcount_inc(&fault->obj.users); + iommufd_ctx_get(fault->ictx); + fault->filep = filep; + + fdno = get_unused_fd_flags(O_CLOEXEC); + if (fdno < 0) { + rc = fdno; + goto out_fput; + } + + cmd->out_fault_id = fault->obj.id; + cmd->out_fault_fd = fdno; + + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); + if (rc) + goto out_put_fdno; + iommufd_object_finalize(ucmd->ictx, &fault->obj); + + fd_install(fdno, fault->filep); + + return 0; +out_put_fdno: + put_unused_fd(fdno); +out_fput: + fput(filep); + refcount_dec(&fault->obj.users); + iommufd_ctx_put(fault->ictx); +out_abort: + iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj); + + return rc; +} + +int iommufd_fault_iopf_handler(struct iopf_group *group) +{ + struct iommufd_hw_pagetable *hwpt; + struct iommufd_fault *fault; + + hwpt = group->attach_handle->domain->fault_data; + fault = hwpt->fault; + + mutex_lock(&fault->mutex); + list_add_tail(&group->node, &fault->deliver); + mutex_unlock(&fault->mutex); + + wake_up_interruptible(&fault->wait_queue); + + return 0; +} diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index cf2c1504e20d..0350f9004741 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -5,62 +5,92 @@ #include #include +#include "../iommu-priv.h" #include "iommufd_private.h" -void iommufd_hw_pagetable_destroy(struct iommufd_object *obj) +static void __iommufd_hwpt_destroy(struct iommufd_hw_pagetable *hwpt) { - struct iommufd_hw_pagetable *hwpt = - container_of(obj, struct iommufd_hw_pagetable, obj); + if (hwpt->domain) + iommu_domain_free(hwpt->domain); - if (!list_empty(&hwpt->hwpt_item)) { - mutex_lock(&hwpt->ioas->mutex); - list_del(&hwpt->hwpt_item); - mutex_unlock(&hwpt->ioas->mutex); + if (hwpt->fault) + refcount_dec(&hwpt->fault->obj.users); +} - iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain); - } +void iommufd_hwpt_paging_destroy(struct iommufd_object *obj) +{ + struct iommufd_hwpt_paging *hwpt_paging = + container_of(obj, struct iommufd_hwpt_paging, common.obj); - if (hwpt->domain) - iommu_domain_free(hwpt->domain); + if (!list_empty(&hwpt_paging->hwpt_item)) { + mutex_lock(&hwpt_paging->ioas->mutex); + list_del(&hwpt_paging->hwpt_item); + mutex_unlock(&hwpt_paging->ioas->mutex); + + iopt_table_remove_domain(&hwpt_paging->ioas->iopt, + hwpt_paging->common.domain); + } - refcount_dec(&hwpt->ioas->obj.users); + __iommufd_hwpt_destroy(&hwpt_paging->common); + refcount_dec(&hwpt_paging->ioas->obj.users); } -void iommufd_hw_pagetable_abort(struct iommufd_object *obj) +void iommufd_hwpt_paging_abort(struct iommufd_object *obj) { - struct iommufd_hw_pagetable *hwpt = - container_of(obj, struct iommufd_hw_pagetable, obj); + struct iommufd_hwpt_paging *hwpt_paging = + container_of(obj, struct iommufd_hwpt_paging, common.obj); /* The ioas->mutex must be held until finalize is called. */ - lockdep_assert_held(&hwpt->ioas->mutex); + lockdep_assert_held(&hwpt_paging->ioas->mutex); - if (!list_empty(&hwpt->hwpt_item)) { - list_del_init(&hwpt->hwpt_item); - iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain); + if (!list_empty(&hwpt_paging->hwpt_item)) { + list_del_init(&hwpt_paging->hwpt_item); + iopt_table_remove_domain(&hwpt_paging->ioas->iopt, + hwpt_paging->common.domain); } - iommufd_hw_pagetable_destroy(obj); + iommufd_hwpt_paging_destroy(obj); } -int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt) +void iommufd_hwpt_nested_destroy(struct iommufd_object *obj) { - if (hwpt->enforce_cache_coherency) + struct iommufd_hwpt_nested *hwpt_nested = + container_of(obj, struct iommufd_hwpt_nested, common.obj); + + __iommufd_hwpt_destroy(&hwpt_nested->common); + refcount_dec(&hwpt_nested->parent->common.obj.users); +} + +void iommufd_hwpt_nested_abort(struct iommufd_object *obj) +{ + iommufd_hwpt_nested_destroy(obj); +} + +static int +iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging) +{ + struct iommu_domain *paging_domain = hwpt_paging->common.domain; + + if (hwpt_paging->enforce_cache_coherency) return 0; - if (hwpt->domain->ops->enforce_cache_coherency) - hwpt->enforce_cache_coherency = - hwpt->domain->ops->enforce_cache_coherency( - hwpt->domain); - if (!hwpt->enforce_cache_coherency) + if (paging_domain->ops->enforce_cache_coherency) + hwpt_paging->enforce_cache_coherency = + paging_domain->ops->enforce_cache_coherency( + paging_domain); + if (!hwpt_paging->enforce_cache_coherency) return -EINVAL; return 0; } /** - * iommufd_hw_pagetable_alloc() - Get an iommu_domain for a device + * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device * @ictx: iommufd context * @ioas: IOAS to associate the domain with * @idev: Device to get an iommu_domain for + * @flags: Flags from userspace * @immediate_attach: True if idev should be attached to the hwpt + * @user_data: The user provided driver specific data describing the domain to + * create * * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT * will be linked to the given ioas and upon return the underlying iommu_domain @@ -70,28 +100,53 @@ int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt) * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on * the returned hwpt. */ -struct iommufd_hw_pagetable * -iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, - struct iommufd_device *idev, bool immediate_attach) +struct iommufd_hwpt_paging * +iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, + struct iommufd_device *idev, u32 flags, + bool immediate_attach, + const struct iommu_user_data *user_data) { + const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT | + IOMMU_HWPT_ALLOC_DIRTY_TRACKING; + const struct iommu_ops *ops = dev_iommu_ops(idev->dev); + struct iommufd_hwpt_paging *hwpt_paging; struct iommufd_hw_pagetable *hwpt; int rc; lockdep_assert_held(&ioas->mutex); - hwpt = iommufd_object_alloc(ictx, hwpt, IOMMUFD_OBJ_HW_PAGETABLE); - if (IS_ERR(hwpt)) - return hwpt; + if ((flags || user_data) && !ops->domain_alloc_user) + return ERR_PTR(-EOPNOTSUPP); + if (flags & ~valid_flags) + return ERR_PTR(-EOPNOTSUPP); + + hwpt_paging = __iommufd_object_alloc( + ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj); + if (IS_ERR(hwpt_paging)) + return ERR_CAST(hwpt_paging); + hwpt = &hwpt_paging->common; - INIT_LIST_HEAD(&hwpt->hwpt_item); + INIT_LIST_HEAD(&hwpt_paging->hwpt_item); /* Pairs with iommufd_hw_pagetable_destroy() */ refcount_inc(&ioas->obj.users); - hwpt->ioas = ioas; + hwpt_paging->ioas = ioas; + hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; - hwpt->domain = iommu_domain_alloc(idev->dev->bus); - if (!hwpt->domain) { - rc = -ENOMEM; - goto out_abort; + if (ops->domain_alloc_user) { + hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL, + user_data); + if (IS_ERR(hwpt->domain)) { + rc = PTR_ERR(hwpt->domain); + hwpt->domain = NULL; + goto out_abort; + } + hwpt->domain->owner = ops; + } else { + hwpt->domain = iommu_domain_alloc(idev->dev->bus); + if (!hwpt->domain) { + rc = -ENOMEM; + goto out_abort; + } } /* @@ -100,9 +155,16 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, * doing any maps. It is an iommu driver bug to report * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on * a new domain. + * + * The cache coherency mode must be configured here and unchanged later. + * Note that a HWPT (non-CC) created for a device (non-CC) can be later + * reused by another device (either non-CC or CC). However, A HWPT (CC) + * created for a device (CC) cannot be reused by another device (non-CC) + * but only devices (CC). Instead user space in this case would need to + * allocate a separate HWPT (non-CC). */ if (idev->enforce_cache_coherency) { - rc = iommufd_hw_pagetable_enforce_cc(hwpt); + rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging); if (WARN_ON(rc)) goto out_abort; } @@ -119,11 +181,11 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, goto out_abort; } - rc = iopt_table_add_domain(&hwpt->ioas->iopt, hwpt->domain); + rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain); if (rc) goto out_detach; - list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list); - return hwpt; + list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list); + return hwpt_paging; out_detach: if (immediate_attach) @@ -133,32 +195,139 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, return ERR_PTR(rc); } +/** + * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device + * @ictx: iommufd context + * @parent: Parent PAGING-type hwpt to associate the domain with + * @idev: Device to get an iommu_domain for + * @flags: Flags from userspace + * @user_data: user_data pointer. Must be valid + * + * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as + * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of + * being a parent. + */ +static struct iommufd_hwpt_nested * +iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, + struct iommufd_hwpt_paging *parent, + struct iommufd_device *idev, u32 flags, + const struct iommu_user_data *user_data) +{ + const struct iommu_ops *ops = dev_iommu_ops(idev->dev); + struct iommufd_hwpt_nested *hwpt_nested; + struct iommufd_hw_pagetable *hwpt; + int rc; + + if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) || + !user_data->len || !ops->domain_alloc_user) + return ERR_PTR(-EOPNOTSUPP); + if (parent->auto_domain || !parent->nest_parent || + parent->common.domain->owner != ops) + return ERR_PTR(-EINVAL); + + hwpt_nested = __iommufd_object_alloc( + ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj); + if (IS_ERR(hwpt_nested)) + return ERR_CAST(hwpt_nested); + hwpt = &hwpt_nested->common; + + refcount_inc(&parent->common.obj.users); + hwpt_nested->parent = parent; + + hwpt->domain = ops->domain_alloc_user(idev->dev, + flags & ~IOMMU_HWPT_FAULT_ID_VALID, + parent->common.domain, user_data); + if (IS_ERR(hwpt->domain)) { + rc = PTR_ERR(hwpt->domain); + hwpt->domain = NULL; + goto out_abort; + } + hwpt->domain->owner = ops; + + if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) { + rc = -EINVAL; + goto out_abort; + } + return hwpt_nested; + +out_abort: + iommufd_object_abort_and_destroy(ictx, &hwpt->obj); + return ERR_PTR(rc); +} + int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) { struct iommu_hwpt_alloc *cmd = ucmd->cmd; + const struct iommu_user_data user_data = { + .type = cmd->data_type, + .uptr = u64_to_user_ptr(cmd->data_uptr), + .len = cmd->data_len, + }; struct iommufd_hw_pagetable *hwpt; + struct iommufd_ioas *ioas = NULL; + struct iommufd_object *pt_obj; struct iommufd_device *idev; - struct iommufd_ioas *ioas; int rc; - if (cmd->flags || cmd->__reserved) + if (cmd->__reserved) return -EOPNOTSUPP; + if (cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) + return -EINVAL; idev = iommufd_get_device(ucmd, cmd->dev_id); if (IS_ERR(idev)) return PTR_ERR(idev); - ioas = iommufd_get_ioas(ucmd->ictx, cmd->pt_id); - if (IS_ERR(ioas)) { - rc = PTR_ERR(ioas); + pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY); + if (IS_ERR(pt_obj)) { + rc = -EINVAL; goto out_put_idev; } - mutex_lock(&ioas->mutex); - hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, idev, false); - if (IS_ERR(hwpt)) { - rc = PTR_ERR(hwpt); - goto out_unlock; + if (pt_obj->type == IOMMUFD_OBJ_IOAS) { + struct iommufd_hwpt_paging *hwpt_paging; + + ioas = container_of(pt_obj, struct iommufd_ioas, obj); + mutex_lock(&ioas->mutex); + hwpt_paging = iommufd_hwpt_paging_alloc( + ucmd->ictx, ioas, idev, cmd->flags, false, + user_data.len ? &user_data : NULL); + if (IS_ERR(hwpt_paging)) { + rc = PTR_ERR(hwpt_paging); + goto out_unlock; + } + hwpt = &hwpt_paging->common; + } else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) { + struct iommufd_hwpt_nested *hwpt_nested; + + hwpt_nested = iommufd_hwpt_nested_alloc( + ucmd->ictx, + container_of(pt_obj, struct iommufd_hwpt_paging, + common.obj), + idev, cmd->flags, &user_data); + if (IS_ERR(hwpt_nested)) { + rc = PTR_ERR(hwpt_nested); + goto out_unlock; + } + hwpt = &hwpt_nested->common; + } else { + rc = -EINVAL; + goto out_put_pt; + } + + if (cmd->flags & IOMMU_HWPT_FAULT_ID_VALID) { + struct iommufd_fault *fault; + + fault = iommufd_get_fault(ucmd, cmd->fault_id); + if (IS_ERR(fault)) { + rc = PTR_ERR(fault); + goto out_hwpt; + } + hwpt->fault = fault; + hwpt->domain->iopf_handler = iommufd_fault_iopf_handler; + hwpt->domain->fault_data = hwpt; + refcount_inc(&fault->obj.users); + iommufd_put_object(ucmd->ictx, &fault->obj); } cmd->out_hwpt_id = hwpt->obj.id; @@ -171,9 +340,59 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) out_hwpt: iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj); out_unlock: - mutex_unlock(&ioas->mutex); - iommufd_put_object(&ioas->obj); + if (ioas) + mutex_unlock(&ioas->mutex); +out_put_pt: + iommufd_put_object(ucmd->ictx, pt_obj); out_put_idev: - iommufd_put_object(&idev->obj); + iommufd_put_object(ucmd->ictx, &idev->obj); + return rc; +} + +int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd) +{ + struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd; + struct iommufd_hwpt_paging *hwpt_paging; + struct iommufd_ioas *ioas; + int rc = -EOPNOTSUPP; + bool enable; + + if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE) + return rc; + + hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); + if (IS_ERR(hwpt_paging)) + return PTR_ERR(hwpt_paging); + + ioas = hwpt_paging->ioas; + enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE; + + rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain, + enable); + + iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); + return rc; +} + +int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd) +{ + struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd; + struct iommufd_hwpt_paging *hwpt_paging; + struct iommufd_ioas *ioas; + int rc = -EOPNOTSUPP; + + if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) || + cmd->__reserved) + return -EOPNOTSUPP; + + hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); + if (IS_ERR(hwpt_paging)) + return PTR_ERR(hwpt_paging); + + ioas = hwpt_paging->ioas; + rc = iopt_read_and_clear_dirty_data( + &ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd); + + iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); return rc; } diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index e76b22939994..9f193c933de6 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "io_pagetable.h" #include "double_span.h" @@ -432,6 +433,177 @@ int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, return 0; } +struct iova_bitmap_fn_arg { + unsigned long flags; + struct io_pagetable *iopt; + struct iommu_domain *domain; + struct iommu_dirty_bitmap *dirty; +}; + +static int __iommu_read_and_clear_dirty(struct iova_bitmap *bitmap, + unsigned long iova, size_t length, + void *opaque) +{ + struct iopt_area *area; + struct iopt_area_contig_iter iter; + struct iova_bitmap_fn_arg *arg = opaque; + struct iommu_domain *domain = arg->domain; + struct iommu_dirty_bitmap *dirty = arg->dirty; + const struct iommu_dirty_ops *ops = domain->dirty_ops; + unsigned long last_iova = iova + length - 1; + unsigned long flags = arg->flags; + int ret; + + iopt_for_each_contig_area(&iter, area, arg->iopt, iova, last_iova) { + unsigned long last = min(last_iova, iopt_area_last_iova(area)); + + ret = ops->read_and_clear_dirty(domain, iter.cur_iova, + last - iter.cur_iova + 1, flags, + dirty); + if (ret) + return ret; + } + + if (!iopt_area_contig_done(&iter)) + return -EINVAL; + return 0; +} + +static int +iommu_read_and_clear_dirty(struct iommu_domain *domain, + struct io_pagetable *iopt, unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + struct iommu_iotlb_gather gather; + struct iommu_dirty_bitmap dirty; + struct iova_bitmap_fn_arg arg; + struct iova_bitmap *iter; + int ret = 0; + + if (!ops || !ops->read_and_clear_dirty) + return -EOPNOTSUPP; + + iter = iova_bitmap_alloc(bitmap->iova, bitmap->length, + bitmap->page_size, + u64_to_user_ptr(bitmap->data)); + if (IS_ERR(iter)) + return -ENOMEM; + + iommu_dirty_bitmap_init(&dirty, iter, &gather); + + arg.flags = flags; + arg.iopt = iopt; + arg.domain = domain; + arg.dirty = &dirty; + iova_bitmap_for_each(iter, &arg, __iommu_read_and_clear_dirty); + + if (!(flags & IOMMU_DIRTY_NO_CLEAR)) + iommu_iotlb_sync(domain, &gather); + + iova_bitmap_free(iter); + + return ret; +} + +int iommufd_check_iova_range(struct io_pagetable *iopt, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + size_t iommu_pgsize = iopt->iova_alignment; + u64 last_iova; + + if (check_add_overflow(bitmap->iova, bitmap->length - 1, &last_iova)) + return -EOVERFLOW; + + if (bitmap->iova > ULONG_MAX || last_iova > ULONG_MAX) + return -EOVERFLOW; + + if ((bitmap->iova & (iommu_pgsize - 1)) || + ((last_iova + 1) & (iommu_pgsize - 1))) + return -EINVAL; + + if (!bitmap->page_size) + return -EINVAL; + + if ((bitmap->iova & (bitmap->page_size - 1)) || + ((last_iova + 1) & (bitmap->page_size - 1))) + return -EINVAL; + + return 0; +} + +int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain, + unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + int ret; + + ret = iommufd_check_iova_range(iopt, bitmap); + if (ret) + return ret; + + down_read(&iopt->iova_rwsem); + ret = iommu_read_and_clear_dirty(domain, iopt, flags, bitmap); + up_read(&iopt->iova_rwsem); + + return ret; +} + +static int iopt_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + struct iommu_iotlb_gather gather; + struct iommu_dirty_bitmap dirty; + struct iopt_area *area; + int ret = 0; + + lockdep_assert_held_read(&iopt->iova_rwsem); + + iommu_dirty_bitmap_init(&dirty, NULL, &gather); + + for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; + area = iopt_area_iter_next(area, 0, ULONG_MAX)) { + if (!area->pages) + continue; + + ret = ops->read_and_clear_dirty(domain, iopt_area_iova(area), + iopt_area_length(area), 0, + &dirty); + if (ret) + break; + } + + iommu_iotlb_sync(domain, &gather); + return ret; +} + +int iopt_set_dirty_tracking(struct io_pagetable *iopt, + struct iommu_domain *domain, bool enable) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + int ret = 0; + + if (!ops) + return -EOPNOTSUPP; + + down_read(&iopt->iova_rwsem); + + /* Clear dirty bits from PTEs to ensure a clean snapshot */ + if (enable) { + ret = iopt_clear_dirty_data(iopt, domain); + if (ret) + goto out_unlock; + } + + ret = ops->set_dirty_tracking(domain, enable); + +out_unlock: + up_read(&iopt->iova_rwsem); + return ret; +} + int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova, unsigned long length, struct list_head *pages_list) { diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c index 0407e2b758ef..157a89b993e4 100644 --- a/drivers/iommu/iommufd/ioas.c +++ b/drivers/iommu/iommufd/ioas.c @@ -105,7 +105,7 @@ int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd) rc = -EMSGSIZE; out_put: up_read(&ioas->iopt.iova_rwsem); - iommufd_put_object(&ioas->obj); + iommufd_put_object(ucmd->ictx, &ioas->obj); return rc; } @@ -175,7 +175,7 @@ int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd) interval_tree_remove(node, &allowed_iova); kfree(container_of(node, struct iopt_allowed, node)); } - iommufd_put_object(&ioas->obj); + iommufd_put_object(ucmd->ictx, &ioas->obj); return rc; } @@ -232,7 +232,7 @@ int iommufd_ioas_map(struct iommufd_ucmd *ucmd) cmd->iova = iova; rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); out_put: - iommufd_put_object(&ioas->obj); + iommufd_put_object(ucmd->ictx, &ioas->obj); return rc; } @@ -266,7 +266,7 @@ int iommufd_ioas_copy(struct iommufd_ucmd *ucmd) return PTR_ERR(src_ioas); rc = iopt_get_pages(&src_ioas->iopt, cmd->src_iova, cmd->length, &pages_list); - iommufd_put_object(&src_ioas->obj); + iommufd_put_object(ucmd->ictx, &src_ioas->obj); if (rc) return rc; @@ -287,7 +287,7 @@ int iommufd_ioas_copy(struct iommufd_ucmd *ucmd) cmd->dst_iova = iova; rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); out_put_dst: - iommufd_put_object(&dst_ioas->obj); + iommufd_put_object(ucmd->ictx, &dst_ioas->obj); out_pages: iopt_free_pages_list(&pages_list); return rc; @@ -323,7 +323,7 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd) rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); out_put: - iommufd_put_object(&ioas->obj); + iommufd_put_object(ucmd->ictx, &ioas->obj); return rc; } @@ -401,6 +401,6 @@ int iommufd_ioas_option(struct iommufd_ucmd *ucmd) rc = -EOPNOTSUPP; } - iommufd_put_object(&ioas->obj); + iommufd_put_object(ucmd->ictx, &ioas->obj); return rc; } diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 2c58670011fe..c921e87464cd 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -8,6 +8,10 @@ #include #include #include +#include +#include +#include +#include "../iommu-priv.h" struct iommu_domain; struct iommu_group; @@ -18,6 +22,7 @@ struct iommufd_ctx { struct file *file; struct xarray objects; struct xarray groups; + wait_queue_head_t destroy_wait; u8 account_mode; /* Compatibility with VFIO no iommu */ @@ -70,6 +75,13 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova, unsigned long length, unsigned long *unmapped); int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped); +int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain, + unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap); +int iopt_set_dirty_tracking(struct io_pagetable *iopt, + struct iommu_domain *domain, bool enable); + void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova, unsigned long length); int iopt_table_add_domain(struct io_pagetable *iopt, @@ -113,9 +125,11 @@ enum iommufd_object_type { IOMMUFD_OBJ_NONE, IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE, IOMMUFD_OBJ_DEVICE, - IOMMUFD_OBJ_HW_PAGETABLE, + IOMMUFD_OBJ_HWPT_PAGING, + IOMMUFD_OBJ_HWPT_NESTED, IOMMUFD_OBJ_IOAS, IOMMUFD_OBJ_ACCESS, + IOMMUFD_OBJ_FAULT, #ifdef CONFIG_IOMMUFD_TEST IOMMUFD_OBJ_SELFTEST, #endif @@ -124,7 +138,7 @@ enum iommufd_object_type { /* Base struct for all objects with a userspace ID handle. */ struct iommufd_object { - struct rw_semaphore destroy_rwsem; + refcount_t shortterm_users; refcount_t users; enum iommufd_object_type type; unsigned int id; @@ -132,10 +146,15 @@ struct iommufd_object { static inline bool iommufd_lock_obj(struct iommufd_object *obj) { - if (!down_read_trylock(&obj->destroy_rwsem)) + if (!refcount_inc_not_zero(&obj->users)) return false; - if (!refcount_inc_not_zero(&obj->users)) { - up_read(&obj->destroy_rwsem); + if (!refcount_inc_not_zero(&obj->shortterm_users)) { + /* + * If the caller doesn't already have a ref on obj this must be + * called under the xa_lock. Otherwise the caller is holding a + * ref on users. Thus it cannot be one before this decrement. + */ + refcount_dec(&obj->users); return false; } return true; @@ -143,10 +162,16 @@ static inline bool iommufd_lock_obj(struct iommufd_object *obj) struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id, enum iommufd_object_type type); -static inline void iommufd_put_object(struct iommufd_object *obj) +static inline void iommufd_put_object(struct iommufd_ctx *ictx, + struct iommufd_object *obj) { + /* + * Users first, then shortterm so that REMOVE_WAIT_SHORTTERM never sees + * a spurious !0 users with a 0 shortterm_users. + */ refcount_dec(&obj->users); - up_read(&obj->destroy_rwsem); + if (refcount_dec_and_test(&obj->shortterm_users)) + wake_up_interruptible_all(&ictx->destroy_wait); } void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj); @@ -154,24 +179,56 @@ void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx, struct iommufd_object *obj); void iommufd_object_finalize(struct iommufd_ctx *ictx, struct iommufd_object *obj); -void __iommufd_object_destroy_user(struct iommufd_ctx *ictx, - struct iommufd_object *obj, bool allow_fail); + +enum { + REMOVE_WAIT_SHORTTERM = 1, +}; +int iommufd_object_remove(struct iommufd_ctx *ictx, + struct iommufd_object *to_destroy, u32 id, + unsigned int flags); + +/* + * The caller holds a users refcount and wants to destroy the object. At this + * point the caller has no shortterm_users reference and at least the xarray + * will be holding one. + */ static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx, struct iommufd_object *obj) { - __iommufd_object_destroy_user(ictx, obj, false); + int ret; + + ret = iommufd_object_remove(ictx, obj, obj->id, REMOVE_WAIT_SHORTTERM); + + /* + * If there is a bug and we couldn't destroy the object then we did put + * back the caller's users refcount and will eventually try to free it + * again during close. + */ + WARN_ON(ret); } -static inline void iommufd_object_deref_user(struct iommufd_ctx *ictx, - struct iommufd_object *obj) + +/* + * The HWPT allocated by autodomains is used in possibly many devices and + * is automatically destroyed when its refcount reaches zero. + * + * If userspace uses the HWPT manually, even for a short term, then it will + * disrupt this refcounting and the auto-free in the kernel will not work. + * Userspace that tries to use the automatically allocated HWPT must be careful + * to ensure that it is consistently destroyed, eg by not racing accesses + * and by not attaching an automatic HWPT to a device manually. + */ +static inline void +iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx, + struct iommufd_object *obj) { - __iommufd_object_destroy_user(ictx, obj, true); + iommufd_object_remove(ictx, obj, obj->id, 0); } struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, size_t size, enum iommufd_object_type type); -#define iommufd_object_alloc(ictx, ptr, type) \ +#define __iommufd_object_alloc(ictx, ptr, type, obj) \ container_of(_iommufd_object_alloc( \ ictx, \ sizeof(*(ptr)) + BUILD_BUG_ON_ZERO( \ @@ -180,6 +237,9 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, type), \ typeof(*(ptr)), obj) +#define iommufd_object_alloc(ictx, ptr, type) \ + __iommufd_object_alloc(ictx, ptr, type, obj) + /* * The IO Address Space (IOAS) pagetable is a virtual page table backed by the * io_pagetable object. It is a user controlled mapping of IOVA -> PFNs. The @@ -222,6 +282,8 @@ int iommufd_option_rlimit_mode(struct iommu_option *cmd, struct iommufd_ctx *ictx); int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd); +int iommufd_check_iova_range(struct io_pagetable *iopt, + struct iommu_hwpt_get_dirty_bitmap *bitmap); /* * A HW pagetable is called an iommu_domain inside the kernel. This user object @@ -231,35 +293,77 @@ int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd); */ struct iommufd_hw_pagetable { struct iommufd_object obj; - struct iommufd_ioas *ioas; struct iommu_domain *domain; + struct iommufd_fault *fault; +}; + +struct iommufd_hwpt_paging { + struct iommufd_hw_pagetable common; + struct iommufd_ioas *ioas; bool auto_domain : 1; bool enforce_cache_coherency : 1; bool msi_cookie : 1; + bool nest_parent : 1; /* Head at iommufd_ioas::hwpt_list */ struct list_head hwpt_item; }; -struct iommufd_hw_pagetable * -iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, - struct iommufd_device *idev, bool immediate_attach); +struct iommufd_hwpt_nested { + struct iommufd_hw_pagetable common; + struct iommufd_hwpt_paging *parent; +}; + +static inline bool hwpt_is_paging(struct iommufd_hw_pagetable *hwpt) +{ + return hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING; +} + +static inline struct iommufd_hwpt_paging * +to_hwpt_paging(struct iommufd_hw_pagetable *hwpt) +{ + return container_of(hwpt, struct iommufd_hwpt_paging, common); +} + +static inline struct iommufd_hwpt_paging * +iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id) +{ + return container_of(iommufd_get_object(ucmd->ictx, id, + IOMMUFD_OBJ_HWPT_PAGING), + struct iommufd_hwpt_paging, common.obj); +} +int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd); +int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd); + +struct iommufd_hwpt_paging * +iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, + struct iommufd_device *idev, u32 flags, + bool immediate_attach, + const struct iommu_user_data *user_data); int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt); int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, struct iommufd_device *idev); struct iommufd_hw_pagetable * iommufd_hw_pagetable_detach(struct iommufd_device *idev); -void iommufd_hw_pagetable_destroy(struct iommufd_object *obj); -void iommufd_hw_pagetable_abort(struct iommufd_object *obj); +void iommufd_hwpt_paging_destroy(struct iommufd_object *obj); +void iommufd_hwpt_paging_abort(struct iommufd_object *obj); +void iommufd_hwpt_nested_destroy(struct iommufd_object *obj); +void iommufd_hwpt_nested_abort(struct iommufd_object *obj); int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd); static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx, struct iommufd_hw_pagetable *hwpt) { - lockdep_assert_not_held(&hwpt->ioas->mutex); - if (hwpt->auto_domain) - iommufd_object_deref_user(ictx, &hwpt->obj); - else - refcount_dec(&hwpt->obj.users); + if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) { + struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt); + + lockdep_assert_not_held(&hwpt_paging->ioas->mutex); + + if (hwpt_paging->auto_domain) { + iommufd_object_put_and_try_destroy(ictx, &hwpt->obj); + return; + } + } + refcount_dec(&hwpt->obj.users); } struct iommufd_group { @@ -285,6 +389,9 @@ struct iommufd_device { /* always the physical device */ struct device *dev; bool enforce_cache_coherency; + /* protect iopf_enabled counter */ + struct mutex iopf_lock; + unsigned int iopf_enabled; }; static inline struct iommufd_device * @@ -316,6 +423,80 @@ void iopt_remove_access(struct io_pagetable *iopt, u32 iopt_access_list_id); void iommufd_access_destroy_object(struct iommufd_object *obj); +/* + * An iommufd_fault object represents an interface to deliver I/O page faults + * to the user space. These objects are created/destroyed by the user space and + * associated with hardware page table objects during page-table allocation. + */ +struct iommufd_fault { + struct iommufd_object obj; + struct iommufd_ctx *ictx; + struct file *filep; + + /* The lists of outstanding faults protected by below mutex. */ + struct mutex mutex; + struct list_head deliver; + struct xarray response; + + struct wait_queue_head wait_queue; +}; + +struct iommufd_attach_handle { + struct iommu_attach_handle handle; + struct iommufd_device *idev; +}; + +/* Convert an iommu attach handle to iommufd handle. */ +#define to_iommufd_handle(hdl) container_of(hdl, struct iommufd_attach_handle, handle) + +static inline struct iommufd_fault * +iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id) +{ + return container_of(iommufd_get_object(ucmd->ictx, id, + IOMMUFD_OBJ_FAULT), + struct iommufd_fault, obj); +} + +int iommufd_fault_alloc(struct iommufd_ucmd *ucmd); +void iommufd_fault_destroy(struct iommufd_object *obj); +int iommufd_fault_iopf_handler(struct iopf_group *group); + +int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev); +void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev); +int iommufd_fault_domain_replace_dev(struct iommufd_device *idev, + struct iommufd_hw_pagetable *hwpt, + struct iommufd_hw_pagetable *old); + +static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + if (hwpt->fault) + return iommufd_fault_domain_attach_dev(hwpt, idev); + + return iommu_attach_group(hwpt->domain, idev->igroup->group); +} + +static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + if (hwpt->fault) + iommufd_fault_domain_detach_dev(hwpt, idev); + + iommu_detach_group(hwpt->domain, idev->igroup->group); +} + +static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev, + struct iommufd_hw_pagetable *hwpt, + struct iommufd_hw_pagetable *old) +{ + if (old->fault || hwpt->fault) + return iommufd_fault_domain_replace_dev(idev, hwpt, old); + + return iommu_group_replace_domain(idev->igroup->group, hwpt->domain); +} + #ifdef CONFIG_IOMMUFD_TEST int iommufd_test(struct iommufd_ucmd *ucmd); void iommufd_selftest_destroy(struct iommufd_object *obj); diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h index 3f3644375bf1..1827f11de865 100644 --- a/drivers/iommu/iommufd/iommufd_test.h +++ b/drivers/iommu/iommufd/iommufd_test.h @@ -19,6 +19,9 @@ enum { IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT, IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE, IOMMU_TEST_OP_ACCESS_REPLACE_IOAS, + IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS, + IOMMU_TEST_OP_DIRTY, + IOMMU_TEST_OP_TRIGGER_IOPF, }; enum { @@ -40,6 +43,15 @@ enum { MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES = 1 << 0, }; +enum { + MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0, +}; + +enum { + MOCK_NESTED_DOMAIN_IOTLB_ID_MAX = 3, + MOCK_NESTED_DOMAIN_IOTLB_NUM = 4, +}; + struct iommu_test_cmd { __u32 size; __u32 op; @@ -56,6 +68,13 @@ struct iommu_test_cmd { /* out_idev_id is the standard iommufd_bind object */ __u32 out_idev_id; } mock_domain; + struct { + __u32 out_stdev_id; + __u32 out_hwpt_id; + __u32 out_idev_id; + /* Expand mock_domain to set mock device flags */ + __u32 dev_flags; + } mock_domain_flags; struct { __u32 pt_id; } mock_domain_replace; @@ -95,6 +114,21 @@ struct iommu_test_cmd { struct { __u32 ioas_id; } access_replace_ioas; + struct { + __u32 flags; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 uptr; + __aligned_u64 out_nr_dirty; + } dirty; + struct { + __u32 dev_id; + __u32 pasid; + __u32 grpid; + __u32 perm; + __u64 addr; + } trigger_iopf; }; __u32 last; }; @@ -109,4 +143,17 @@ struct iommu_test_hw_info { __u32 test_reg; }; +/* Should not be equal to any defined value in enum iommu_hwpt_data_type */ +#define IOMMU_HWPT_DATA_SELFTEST 0xdead +#define IOMMU_TEST_IOTLB_DEFAULT 0xbadbeef + +/** + * struct iommu_hwpt_selftest + * + * @iotlb: default mock iotlb value, IOMMU_TEST_IOTLB_DEFAULT + */ +struct iommu_hwpt_selftest { + __u32 iotlb; +}; + #endif diff --git a/drivers/vfio/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c similarity index 91% rename from drivers/vfio/iova_bitmap.c rename to drivers/iommu/iommufd/iova_bitmap.c index 38b51613ecca..89e6e11157f0 100644 --- a/drivers/vfio/iova_bitmap.c +++ b/drivers/iommu/iommufd/iova_bitmap.c @@ -113,6 +113,9 @@ struct iova_bitmap { /* length of the IOVA range for the whole bitmap */ size_t length; + + /* length of the IOVA range set ahead the pinned pages */ + unsigned long set_ahead_length; }; /* @@ -269,6 +272,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, iova_bitmap_free(bitmap); return ERR_PTR(rc); } +EXPORT_SYMBOL_NS_GPL(iova_bitmap_alloc, IOMMUFD); /** * iova_bitmap_free() - Frees an IOVA bitmap object @@ -290,6 +294,7 @@ void iova_bitmap_free(struct iova_bitmap *bitmap) kfree(bitmap); } +EXPORT_SYMBOL_NS_GPL(iova_bitmap_free, IOMMUFD); /* * Returns the remaining bitmap indexes from mapped_total_index to process for @@ -340,6 +345,32 @@ static bool iova_bitmap_done(struct iova_bitmap *bitmap) return bitmap->mapped_base_index >= bitmap->mapped_total_index; } +static int iova_bitmap_set_ahead(struct iova_bitmap *bitmap, + size_t set_ahead_length) +{ + int ret = 0; + + while (set_ahead_length > 0 && !iova_bitmap_done(bitmap)) { + unsigned long length = iova_bitmap_mapped_length(bitmap); + unsigned long iova = iova_bitmap_mapped_iova(bitmap); + + ret = iova_bitmap_get(bitmap); + if (ret) + break; + + length = min(length, set_ahead_length); + iova_bitmap_set(bitmap, iova, length); + + set_ahead_length -= length; + bitmap->mapped_base_index += + iova_bitmap_offset_to_index(bitmap, length - 1) + 1; + iova_bitmap_put(bitmap); + } + + bitmap->set_ahead_length = 0; + return ret; +} + /* * Advances to the next range, releases the current pinned * pages and pins the next set of bitmap pages. @@ -356,6 +387,15 @@ static int iova_bitmap_advance(struct iova_bitmap *bitmap) if (iova_bitmap_done(bitmap)) return 0; + /* Iterate, set and skip any bits requested for next iteration */ + if (bitmap->set_ahead_length) { + int ret; + + ret = iova_bitmap_set_ahead(bitmap, bitmap->set_ahead_length); + if (ret) + return ret; + } + /* When advancing the index we pin the next set of bitmap pages */ return iova_bitmap_get(bitmap); } @@ -388,6 +428,7 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, return ret; } +EXPORT_SYMBOL_NS_GPL(iova_bitmap_for_each, IOMMUFD); /** * iova_bitmap_set() - Records an IOVA range in bitmap @@ -424,5 +465,10 @@ void iova_bitmap_set(struct iova_bitmap *bitmap, kunmap_local(kaddr); cur_bit += nbits; } while (cur_bit <= last_bit); + + if (unlikely(cur_bit <= last_bit)) { + bitmap->set_ahead_length = + ((last_bit - cur_bit + 1) << bitmap->mapped.pgshift); + } } -EXPORT_SYMBOL_GPL(iova_bitmap_set); +EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD); diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index e71523cbd0de..9ea903a2cb0c 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -33,7 +33,6 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, size_t size, enum iommufd_object_type type) { - static struct lock_class_key obj_keys[IOMMUFD_OBJ_MAX]; struct iommufd_object *obj; int rc; @@ -41,15 +40,8 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, if (!obj) return ERR_PTR(-ENOMEM); obj->type = type; - /* - * In most cases the destroy_rwsem is obtained with try so it doesn't - * interact with lockdep, however on destroy we have to sleep. This - * means if we have to destroy an object while holding a get on another - * object it triggers lockdep. Using one locking class per object type - * is a simple and reasonable way to avoid this. - */ - __init_rwsem(&obj->destroy_rwsem, "iommufd_object::destroy_rwsem", - &obj_keys[type]); + /* Starts out bias'd by 1 until it is removed from the xarray */ + refcount_set(&obj->shortterm_users, 1); refcount_set(&obj->users, 1); /* @@ -129,92 +121,113 @@ struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id, return obj; } +static int iommufd_object_dec_wait_shortterm(struct iommufd_ctx *ictx, + struct iommufd_object *to_destroy) +{ + if (refcount_dec_and_test(&to_destroy->shortterm_users)) + return 0; + + if (wait_event_timeout(ictx->destroy_wait, + refcount_read(&to_destroy->shortterm_users) == + 0, + msecs_to_jiffies(10000))) + return 0; + + pr_crit("Time out waiting for iommufd object to become free\n"); + refcount_inc(&to_destroy->shortterm_users); + return -EBUSY; +} + /* * Remove the given object id from the xarray if the only reference to the - * object is held by the xarray. The caller must call ops destroy(). + * object is held by the xarray. */ -static struct iommufd_object *iommufd_object_remove(struct iommufd_ctx *ictx, - u32 id, bool extra_put) +int iommufd_object_remove(struct iommufd_ctx *ictx, + struct iommufd_object *to_destroy, u32 id, + unsigned int flags) { struct iommufd_object *obj; XA_STATE(xas, &ictx->objects, id); - - xa_lock(&ictx->objects); - obj = xas_load(&xas); - if (xa_is_zero(obj) || !obj) { - obj = ERR_PTR(-ENOENT); - goto out_xa; - } + bool zerod_shortterm = false; + int ret; /* - * If the caller is holding a ref on obj we put it here under the - * spinlock. + * The purpose of the shortterm_users is to ensure deterministic + * destruction of objects used by external drivers and destroyed by this + * function. Any temporary increment of the refcount must increment + * shortterm_users, such as during ioctl execution. */ - if (extra_put) + if (flags & REMOVE_WAIT_SHORTTERM) { + ret = iommufd_object_dec_wait_shortterm(ictx, to_destroy); + if (ret) { + /* + * We have a bug. Put back the callers reference and + * defer cleaning this object until close. + */ + refcount_dec(&to_destroy->users); + return ret; + } + zerod_shortterm = true; + } + + xa_lock(&ictx->objects); + obj = xas_load(&xas); + if (to_destroy) { + /* + * If the caller is holding a ref on obj we put it here under + * the spinlock. + */ refcount_dec(&obj->users); + if (WARN_ON(obj != to_destroy)) { + ret = -ENOENT; + goto err_xa; + } + } else if (xa_is_zero(obj) || !obj) { + ret = -ENOENT; + goto err_xa; + } + if (!refcount_dec_if_one(&obj->users)) { - obj = ERR_PTR(-EBUSY); - goto out_xa; + ret = -EBUSY; + goto err_xa; } xas_store(&xas, NULL); if (ictx->vfio_ioas == container_of(obj, struct iommufd_ioas, obj)) ictx->vfio_ioas = NULL; - -out_xa: xa_unlock(&ictx->objects); - /* The returned object reference count is zero */ - return obj; -} - -/* - * The caller holds a users refcount and wants to destroy the object. Returns - * true if the object was destroyed. In all cases the caller no longer has a - * reference on obj. - */ -void __iommufd_object_destroy_user(struct iommufd_ctx *ictx, - struct iommufd_object *obj, bool allow_fail) -{ - struct iommufd_object *ret; - - /* - * The purpose of the destroy_rwsem is to ensure deterministic - * destruction of objects used by external drivers and destroyed by this - * function. Any temporary increment of the refcount must hold the read - * side of this, such as during ioctl execution. - */ - down_write(&obj->destroy_rwsem); - ret = iommufd_object_remove(ictx, obj->id, true); - up_write(&obj->destroy_rwsem); - - if (allow_fail && IS_ERR(ret)) - return; - /* - * If there is a bug and we couldn't destroy the object then we did put - * back the caller's refcount and will eventually try to free it again - * during close. + * Since users is zero any positive users_shortterm must be racing + * iommufd_put_object(), or we have a bug. */ - if (WARN_ON(IS_ERR(ret))) - return; + if (!zerod_shortterm) { + ret = iommufd_object_dec_wait_shortterm(ictx, obj); + if (WARN_ON(ret)) + return ret; + } iommufd_object_ops[obj->type].destroy(obj); kfree(obj); + return 0; + +err_xa: + if (zerod_shortterm) { + /* Restore the xarray owned reference */ + refcount_set(&obj->shortterm_users, 1); + } + xa_unlock(&ictx->objects); + + /* The returned object reference count is zero */ + return ret; } static int iommufd_destroy(struct iommufd_ucmd *ucmd) { struct iommu_destroy *cmd = ucmd->cmd; - struct iommufd_object *obj; - obj = iommufd_object_remove(ucmd->ictx, cmd->id, false); - if (IS_ERR(obj)) - return PTR_ERR(obj); - iommufd_object_ops[obj->type].destroy(obj); - kfree(obj); - return 0; + return iommufd_object_remove(ucmd->ictx, NULL, cmd->id, 0); } static int iommufd_fops_open(struct inode *inode, struct file *filp) @@ -238,6 +251,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp) xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT); xa_init(&ictx->groups); ictx->file = filp; + init_waitqueue_head(&ictx->destroy_wait); filp->private_data = ictx; return 0; } @@ -305,8 +319,11 @@ static int iommufd_option(struct iommufd_ucmd *ucmd) union ucmd_buffer { struct iommu_destroy destroy; + struct iommu_fault_alloc fault; struct iommu_hw_info info; struct iommu_hwpt_alloc hwpt; + struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap; + struct iommu_hwpt_set_dirty_tracking set_dirty_tracking; struct iommu_ioas_alloc alloc; struct iommu_ioas_allow_iovas allow_iovas; struct iommu_ioas_copy ioas_copy; @@ -338,10 +355,16 @@ struct iommufd_ioctl_op { } static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = { IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id), + IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_fault_alloc, struct iommu_fault_alloc, + out_fault_fd), IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info, __reserved), IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc, __reserved), + IOCTL_OP(IOMMU_HWPT_GET_DIRTY_BITMAP, iommufd_hwpt_get_dirty_bitmap, + struct iommu_hwpt_get_dirty_bitmap, data), + IOCTL_OP(IOMMU_HWPT_SET_DIRTY_TRACKING, iommufd_hwpt_set_dirty_tracking, + struct iommu_hwpt_set_dirty_tracking, __reserved), IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl, struct iommu_ioas_alloc, out_ioas_id), IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas, @@ -482,9 +505,16 @@ static const struct iommufd_object_ops iommufd_object_ops[] = { [IOMMUFD_OBJ_IOAS] = { .destroy = iommufd_ioas_destroy, }, - [IOMMUFD_OBJ_HW_PAGETABLE] = { - .destroy = iommufd_hw_pagetable_destroy, - .abort = iommufd_hw_pagetable_abort, + [IOMMUFD_OBJ_HWPT_PAGING] = { + .destroy = iommufd_hwpt_paging_destroy, + .abort = iommufd_hwpt_paging_abort, + }, + [IOMMUFD_OBJ_HWPT_NESTED] = { + .destroy = iommufd_hwpt_nested_destroy, + .abort = iommufd_hwpt_nested_abort, + }, + [IOMMUFD_OBJ_FAULT] = { + .destroy = iommufd_fault_destroy, }, #ifdef CONFIG_IOMMUFD_TEST [IOMMUFD_OBJ_SELFTEST] = { @@ -552,5 +582,6 @@ MODULE_ALIAS_MISCDEV(VFIO_MINOR); MODULE_ALIAS("devname:vfio/vfio"); #endif MODULE_IMPORT_NS(IOMMUFD_INTERNAL); +MODULE_IMPORT_NS(IOMMUFD); MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices"); MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 00b794d74e03..00e9fa8256ad 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -20,10 +20,26 @@ static DECLARE_FAULT_ATTR(fail_iommufd); static struct dentry *dbgfs_root; static struct platform_device *selftest_iommu_dev; +static const struct iommu_ops mock_ops; +static struct iommu_domain_ops domain_nested_ops; size_t iommufd_test_memory_limit = 65536; +struct mock_bus_type { + struct bus_type bus; + struct notifier_block nb; +}; + +static struct mock_bus_type iommufd_mock_bus_type = { + .bus = { + .name = "iommufd_mock", + }, +}; + +static atomic_t mock_dev_num; + enum { + MOCK_DIRTY_TRACK = 1, MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2, /* @@ -36,6 +52,7 @@ enum { _MOCK_PFN_START = MOCK_PFN_MASK + 1, MOCK_PFN_START_IOVA = _MOCK_PFN_START, MOCK_PFN_LAST_IOVA = _MOCK_PFN_START, + MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1, }; /* @@ -97,20 +114,28 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, if (IS_ERR(ioas)) return; *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova); - iommufd_put_object(&ioas->obj); + iommufd_put_object(ucmd->ictx, &ioas->obj); } struct mock_iommu_domain { + unsigned long flags; struct iommu_domain domain; struct xarray pfns; }; +struct mock_iommu_domain_nested { + struct iommu_domain domain; + struct mock_iommu_domain *parent; + u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM]; +}; + enum selftest_obj_type { TYPE_IDEV, }; struct mock_dev { struct device dev; + unsigned long flags; }; struct selftest_obj { @@ -126,18 +151,18 @@ struct selftest_obj { }; }; -static void mock_domain_blocking_free(struct iommu_domain *domain) -{ -} - static int mock_domain_nop_attach(struct iommu_domain *domain, struct device *dev) { + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + + if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) + return -EINVAL; + return 0; } static const struct iommu_domain_ops mock_blocking_ops = { - .free = mock_domain_blocking_free, .attach_dev = mock_domain_nop_attach, }; @@ -161,15 +186,96 @@ static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type) return info; } -static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) +static int mock_domain_set_dirty_tracking(struct iommu_domain *domain, + bool enable) { - struct mock_iommu_domain *mock; + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + unsigned long flags = mock->flags; - if (iommu_domain_type == IOMMU_DOMAIN_BLOCKED) - return &mock_blocking_domain; + if (enable && !domain->dirty_ops) + return -EINVAL; - if (iommu_domain_type != IOMMU_DOMAIN_UNMANAGED) - return NULL; + /* No change? */ + if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK))) + return 0; + + flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK); + + mock->flags = flags; + return 0; +} + +static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock, + unsigned long iova, size_t page_size, + unsigned long flags) +{ + unsigned long cur, end = iova + page_size - 1; + bool dirty = false; + void *ent, *old; + + for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) { + ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); + if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) + continue; + + dirty = true; + /* Clear dirty */ + if (!(flags & IOMMU_DIRTY_NO_CLEAR)) { + unsigned long val; + + val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; + old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE, + xa_mk_value(val), GFP_KERNEL); + WARN_ON_ONCE(ent != old); + } + } + + return dirty; +} + +static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + unsigned long end = iova + size; + void *ent; + + if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap) + return -EINVAL; + + do { + unsigned long pgsize = MOCK_IO_PAGE_SIZE; + unsigned long head; + + ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); + if (!ent) { + iova += pgsize; + continue; + } + + head = iova & ~(pgsize - 1); + + /* Clear dirty */ + if (mock_test_and_clear_dirty(mock, head, pgsize, flags)) + iommu_dirty_bitmap_record(dirty, head, pgsize); + iova = head + pgsize; + } while (iova < end); + + return 0; +} + +static const struct iommu_dirty_ops dirty_ops = { + .set_dirty_tracking = mock_domain_set_dirty_tracking, + .read_and_clear_dirty = mock_domain_read_and_clear_dirty, +}; + +static struct iommu_domain *mock_domain_alloc_paging(struct device *dev) +{ + struct mock_iommu_domain *mock; mock = kzalloc(sizeof(*mock), GFP_KERNEL); if (!mock) @@ -177,10 +283,87 @@ static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) mock->domain.geometry.aperture_start = MOCK_APERTURE_START; mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST; mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE; + mock->domain.ops = mock_ops.default_domain_ops; + mock->domain.type = IOMMU_DOMAIN_UNMANAGED; xa_init(&mock->pfns); return &mock->domain; } +static struct iommu_domain * +__mock_domain_alloc_nested(struct mock_iommu_domain *mock_parent, + const struct iommu_hwpt_selftest *user_cfg) +{ + struct mock_iommu_domain_nested *mock_nested; + int i; + + mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL); + if (!mock_nested) + return ERR_PTR(-ENOMEM); + mock_nested->parent = mock_parent; + mock_nested->domain.ops = &domain_nested_ops; + mock_nested->domain.type = IOMMU_DOMAIN_NESTED; + for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++) + mock_nested->iotlb[i] = user_cfg->iotlb; + return &mock_nested->domain; +} + +static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) +{ + if (iommu_domain_type == IOMMU_DOMAIN_BLOCKED) + return &mock_blocking_domain; + if (iommu_domain_type == IOMMU_DOMAIN_UNMANAGED) + return mock_domain_alloc_paging(NULL); + return NULL; +} + +static struct iommu_domain * +mock_domain_alloc_user(struct device *dev, u32 flags, + struct iommu_domain *parent, + const struct iommu_user_data *user_data) +{ + struct mock_iommu_domain *mock_parent; + struct iommu_hwpt_selftest user_cfg; + int rc; + + /* must be mock_domain */ + if (!parent) { + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; + bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY; + struct iommu_domain *domain; + + if (flags & (~(IOMMU_HWPT_ALLOC_NEST_PARENT | + IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) + return ERR_PTR(-EOPNOTSUPP); + if (user_data || (has_dirty_flag && no_dirty_ops)) + return ERR_PTR(-EOPNOTSUPP); + domain = mock_domain_alloc_paging(NULL); + if (!domain) + return ERR_PTR(-ENOMEM); + if (has_dirty_flag) + container_of(domain, struct mock_iommu_domain, domain) + ->domain.dirty_ops = &dirty_ops; + return domain; + } + + /* must be mock_domain_nested */ + if (user_data->type != IOMMU_HWPT_DATA_SELFTEST || flags) + return ERR_PTR(-EOPNOTSUPP); + if (!parent || parent->ops != mock_ops.default_domain_ops) + return ERR_PTR(-EINVAL); + + mock_parent = container_of(parent, struct mock_iommu_domain, domain); + if (!mock_parent) + return ERR_PTR(-EINVAL); + + rc = iommu_copy_struct_from_user(&user_cfg, user_data, + IOMMU_HWPT_DATA_SELFTEST, iotlb); + if (rc) + return ERR_PTR(rc); + + return __mock_domain_alloc_nested(mock_parent, &user_cfg); +} + static void mock_domain_free(struct iommu_domain *domain) { struct mock_iommu_domain *mock = @@ -258,7 +441,7 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain, for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); - WARN_ON(!ent); + /* * iommufd generates unmaps that must be a strict * superset of the map's performend So every starting @@ -268,13 +451,13 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain, * passed to map_pages */ if (first) { - WARN_ON(!(xa_to_value(ent) & - MOCK_PFN_START_IOVA)); + WARN_ON(ent && !(xa_to_value(ent) & + MOCK_PFN_START_IOVA)); first = false; } if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) - WARN_ON(!(xa_to_value(ent) & - MOCK_PFN_LAST_IOVA)); + WARN_ON(ent && !(xa_to_value(ent) & + MOCK_PFN_LAST_IOVA)); iova += MOCK_IO_PAGE_SIZE; ret += MOCK_IO_PAGE_SIZE; @@ -298,34 +481,74 @@ static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain, static bool mock_domain_capable(struct device *dev, enum iommu_cap cap) { - return cap == IOMMU_CAP_CACHE_COHERENCY; -} + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); -static void mock_domain_set_plaform_dma_ops(struct device *dev) -{ - /* - * mock doesn't setup default domains because we can't hook into the - * normal probe path - */ + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + return true; + case IOMMU_CAP_DIRTY_TRACKING: + return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY); + default: + break; + } + + return false; } +static struct iopf_queue *mock_iommu_iopf_queue; + static struct iommu_device mock_iommu_device = { }; static struct iommu_device *mock_probe_device(struct device *dev) { + if (dev->bus != &iommufd_mock_bus_type.bus) + return ERR_PTR(-ENODEV); return &mock_iommu_device; } +static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *msg) +{ +} + +static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat) +{ + if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue) + return -ENODEV; + + return iopf_queue_add_device(mock_iommu_iopf_queue, dev); +} + +static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) +{ + if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue) + return -ENODEV; + + iopf_queue_remove_device(mock_iommu_iopf_queue, dev); + + return 0; +} + static const struct iommu_ops mock_ops = { + /* + * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type() + * because it is zero. + */ + .default_domain = &mock_blocking_domain, + .blocked_domain = &mock_blocking_domain, .owner = THIS_MODULE, .pgsize_bitmap = MOCK_IO_PAGE_SIZE, .hw_info = mock_domain_hw_info, - .domain_alloc = mock_domain_alloc, + .domain_alloc_paging = mock_domain_alloc_paging, + .domain_alloc_user = mock_domain_alloc_user, .capable = mock_domain_capable, - .set_platform_dma_ops = mock_domain_set_plaform_dma_ops, .device_group = generic_device_group, .probe_device = mock_probe_device, + .page_response = mock_domain_page_response, + .dev_enable_feat = mock_dev_enable_feat, + .dev_disable_feat = mock_dev_disable_feat, + .user_pasid_table = true, .default_domain_ops = &(struct iommu_domain_ops){ .free = mock_domain_free, @@ -336,38 +559,66 @@ static const struct iommu_ops mock_ops = { }, }; +static void mock_domain_free_nested(struct iommu_domain *domain) +{ + struct mock_iommu_domain_nested *mock_nested = + container_of(domain, struct mock_iommu_domain_nested, domain); + + kfree(mock_nested); +} + +static struct iommu_domain_ops domain_nested_ops = { + .free = mock_domain_free_nested, + .attach_dev = mock_domain_nop_attach, +}; + static inline struct iommufd_hw_pagetable * -get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, - struct mock_iommu_domain **mock) +__get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type) { - struct iommufd_hw_pagetable *hwpt; struct iommufd_object *obj; - obj = iommufd_get_object(ucmd->ictx, mockpt_id, - IOMMUFD_OBJ_HW_PAGETABLE); + obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type); if (IS_ERR(obj)) return ERR_CAST(obj); - hwpt = container_of(obj, struct iommufd_hw_pagetable, obj); - if (hwpt->domain->ops != mock_ops.default_domain_ops) { - iommufd_put_object(&hwpt->obj); + return container_of(obj, struct iommufd_hw_pagetable, obj); +} + +static inline struct iommufd_hw_pagetable * +get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, + struct mock_iommu_domain **mock) +{ + struct iommufd_hw_pagetable *hwpt; + + hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING); + if (IS_ERR(hwpt)) + return hwpt; + if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED || + hwpt->domain->ops != mock_ops.default_domain_ops) { + iommufd_put_object(ucmd->ictx, &hwpt->obj); return ERR_PTR(-EINVAL); } *mock = container_of(hwpt->domain, struct mock_iommu_domain, domain); return hwpt; } -struct mock_bus_type { - struct bus_type bus; - struct notifier_block nb; -}; - -static struct mock_bus_type iommufd_mock_bus_type = { - .bus = { - .name = "iommufd_mock", - }, -}; +static inline struct iommufd_hw_pagetable * +get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id, + struct mock_iommu_domain_nested **mock_nested) +{ + struct iommufd_hw_pagetable *hwpt; -static atomic_t mock_dev_num; + hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED); + if (IS_ERR(hwpt)) + return hwpt; + if (hwpt->domain->type != IOMMU_DOMAIN_NESTED || + hwpt->domain->ops != &domain_nested_ops) { + iommufd_put_object(ucmd->ictx, &hwpt->obj); + return ERR_PTR(-EINVAL); + } + *mock_nested = container_of(hwpt->domain, + struct mock_iommu_domain_nested, domain); + return hwpt; +} static void mock_dev_release(struct device *dev) { @@ -377,16 +628,20 @@ static void mock_dev_release(struct device *dev) kfree(mdev); } -static struct mock_dev *mock_dev_create(void) +static struct mock_dev *mock_dev_create(unsigned long dev_flags) { struct mock_dev *mdev; int rc; + if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY)) + return ERR_PTR(-EINVAL); + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return ERR_PTR(-ENOMEM); device_initialize(&mdev->dev); + mdev->flags = dev_flags; mdev->dev.release = mock_dev_release; mdev->dev.bus = &iommufd_mock_bus_type.bus; @@ -395,7 +650,7 @@ static struct mock_dev *mock_dev_create(void) if (rc) goto err_put; - rc = device_add(&mdev->dev); + rc = iommu_mock_device_add(&mdev->dev, &mock_iommu.iommu_dev); if (rc) goto err_put; return mdev; @@ -422,6 +677,7 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, struct iommufd_device *idev; struct selftest_obj *sobj; u32 pt_id = cmd->id; + u32 dev_flags = 0; u32 idev_id; int rc; @@ -432,7 +688,10 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, sobj->idev.ictx = ucmd->ictx; sobj->type = TYPE_IDEV; - sobj->idev.mock_dev = mock_dev_create(); + if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS) + dev_flags = cmd->mock_domain_flags.dev_flags; + + sobj->idev.mock_dev = mock_dev_create(dev_flags); if (IS_ERR(sobj->idev.mock_dev)) { rc = PTR_ERR(sobj->idev.mock_dev); goto out_sobj; @@ -503,7 +762,7 @@ static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd, rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); out_dev_obj: - iommufd_put_object(dev_obj); + iommufd_put_object(ucmd->ictx, dev_obj); return rc; } @@ -521,7 +780,7 @@ static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd, down_write(&ioas->iopt.iova_rwsem); rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL); up_write(&ioas->iopt.iova_rwsem); - iommufd_put_object(&ioas->obj); + iommufd_put_object(ucmd->ictx, &ioas->obj); return rc; } @@ -576,7 +835,7 @@ static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd, rc = 0; out_put: - iommufd_put_object(&hwpt->obj); + iommufd_put_object(ucmd->ictx, &hwpt->obj); return rc; } @@ -992,6 +1251,98 @@ static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE); static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH == __IOMMUFD_ACCESS_RW_SLOW_PATH); +static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, + unsigned long iova, size_t length, + unsigned long page_size, void __user *uptr, + u32 flags) +{ + unsigned long bitmap_size, i, max; + struct iommu_test_cmd *cmd = ucmd->cmd; + struct iommufd_hw_pagetable *hwpt; + struct mock_iommu_domain *mock; + int rc, count = 0; + void *tmp; + + if (!page_size || !length || iova % page_size || length % page_size || + !uptr) + return -EINVAL; + + hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + if (!(mock->flags & MOCK_DIRTY_TRACK)) { + rc = -EINVAL; + goto out_put; + } + + max = length / page_size; + bitmap_size = max / BITS_PER_BYTE; + + tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT); + if (!tmp) { + rc = -ENOMEM; + goto out_put; + } + + if (copy_from_user(tmp, uptr, bitmap_size)) { + rc = -EFAULT; + goto out_free; + } + + for (i = 0; i < max; i++) { + unsigned long cur = iova + i * page_size; + void *ent, *old; + + if (!test_bit(i, (unsigned long *)tmp)) + continue; + + ent = xa_load(&mock->pfns, cur / page_size); + if (ent) { + unsigned long val; + + val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA; + old = xa_store(&mock->pfns, cur / page_size, + xa_mk_value(val), GFP_KERNEL); + WARN_ON_ONCE(ent != old); + count++; + } + } + + cmd->dirty.out_nr_dirty = count; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); +out_free: + kvfree(tmp); +out_put: + iommufd_put_object(ucmd->ictx, &hwpt->obj); + return rc; +} + +static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd, + struct iommu_test_cmd *cmd) +{ + struct iopf_fault event = { }; + struct iommufd_device *idev; + + idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id); + if (IS_ERR(idev)) + return PTR_ERR(idev); + + event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; + if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID) + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + event.fault.type = IOMMU_FAULT_PAGE_REQ; + event.fault.prm.addr = cmd->trigger_iopf.addr; + event.fault.prm.pasid = cmd->trigger_iopf.pasid; + event.fault.prm.grpid = cmd->trigger_iopf.grpid; + event.fault.prm.perm = cmd->trigger_iopf.perm; + + iommu_report_device_fault(idev->dev, &event); + iommufd_put_object(ucmd->ictx, &idev->obj); + + return 0; +} + void iommufd_selftest_destroy(struct iommufd_object *obj) { struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj); @@ -1015,6 +1366,7 @@ int iommufd_test(struct iommufd_ucmd *ucmd) cmd->add_reserved.start, cmd->add_reserved.length); case IOMMU_TEST_OP_MOCK_DOMAIN: + case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS: return iommufd_test_mock_domain(ucmd, cmd); case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE: return iommufd_test_mock_domain_replace( @@ -1056,6 +1408,14 @@ int iommufd_test(struct iommufd_ucmd *ucmd) return -EINVAL; iommufd_test_memory_limit = cmd->memory_limit.limit; return 0; + case IOMMU_TEST_OP_DIRTY: + return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova, + cmd->dirty.length, + cmd->dirty.page_size, + u64_to_user_ptr(cmd->dirty.uptr), + cmd->dirty.flags); + case IOMMU_TEST_OP_TRIGGER_IOPF: + return iommufd_test_trigger_iopf(ucmd, cmd); default: return -EOPNOTSUPP; } @@ -1097,6 +1457,9 @@ int __init iommufd_test_init(void) &iommufd_mock_bus_type.nb); if (rc) goto err_sysfs; + + mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq"); + return 0; err_sysfs: @@ -1112,6 +1475,11 @@ int __init iommufd_test_init(void) void iommufd_test_exit(void) { + if (mock_iommu_iopf_queue) { + iopf_queue_free(mock_iommu_iopf_queue); + mock_iommu_iopf_queue = NULL; + } + iommu_device_sysfs_remove(&mock_iommu_device); iommu_device_unregister_bus(&mock_iommu_device, &iommufd_mock_bus_type.bus, diff --git a/drivers/iommu/iommufd/vfio_compat.c b/drivers/iommu/iommufd/vfio_compat.c index 6c810bf80f99..a3ad5f0b6c59 100644 --- a/drivers/iommu/iommufd/vfio_compat.c +++ b/drivers/iommu/iommufd/vfio_compat.c @@ -41,7 +41,7 @@ int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id) if (IS_ERR(ioas)) return PTR_ERR(ioas); *out_ioas_id = ioas->obj.id; - iommufd_put_object(&ioas->obj); + iommufd_put_object(ictx, &ioas->obj); return 0; } EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_get_id, IOMMUFD_VFIO); @@ -98,7 +98,7 @@ int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx) if (ictx->vfio_ioas && iommufd_lock_obj(&ictx->vfio_ioas->obj)) { ret = 0; - iommufd_put_object(&ictx->vfio_ioas->obj); + iommufd_put_object(ictx, &ictx->vfio_ioas->obj); goto out_abort; } ictx->vfio_ioas = ioas; @@ -133,7 +133,7 @@ int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd) if (IS_ERR(ioas)) return PTR_ERR(ioas); cmd->ioas_id = ioas->obj.id; - iommufd_put_object(&ioas->obj); + iommufd_put_object(ucmd->ictx, &ioas->obj); return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); case IOMMU_VFIO_IOAS_SET: @@ -143,7 +143,7 @@ int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd) xa_lock(&ucmd->ictx->objects); ucmd->ictx->vfio_ioas = ioas; xa_unlock(&ucmd->ictx->objects); - iommufd_put_object(&ioas->obj); + iommufd_put_object(ucmd->ictx, &ioas->obj); return 0; case IOMMU_VFIO_IOAS_CLEAR: @@ -190,7 +190,7 @@ static int iommufd_vfio_map_dma(struct iommufd_ctx *ictx, unsigned int cmd, iova = map.iova; rc = iopt_map_user_pages(ictx, &ioas->iopt, &iova, u64_to_user_ptr(map.vaddr), map.size, iommu_prot, 0); - iommufd_put_object(&ioas->obj); + iommufd_put_object(ictx, &ioas->obj); return rc; } @@ -249,13 +249,13 @@ static int iommufd_vfio_unmap_dma(struct iommufd_ctx *ictx, unsigned int cmd, rc = -EFAULT; err_put: - iommufd_put_object(&ioas->obj); + iommufd_put_object(ictx, &ioas->obj); return rc; } static int iommufd_vfio_cc_iommu(struct iommufd_ctx *ictx) { - struct iommufd_hw_pagetable *hwpt; + struct iommufd_hwpt_paging *hwpt_paging; struct iommufd_ioas *ioas; int rc = 1; @@ -264,15 +264,15 @@ static int iommufd_vfio_cc_iommu(struct iommufd_ctx *ictx) return PTR_ERR(ioas); mutex_lock(&ioas->mutex); - list_for_each_entry(hwpt, &ioas->hwpt_list, hwpt_item) { - if (!hwpt->enforce_cache_coherency) { + list_for_each_entry(hwpt_paging, &ioas->hwpt_list, hwpt_item) { + if (!hwpt_paging->enforce_cache_coherency) { rc = 0; break; } } mutex_unlock(&ioas->mutex); - iommufd_put_object(&ioas->obj); + iommufd_put_object(ictx, &ioas->obj); return rc; } @@ -349,7 +349,7 @@ static int iommufd_vfio_set_iommu(struct iommufd_ctx *ictx, unsigned long type) */ if (type == VFIO_TYPE1_IOMMU) rc = iopt_disable_large_pages(&ioas->iopt); - iommufd_put_object(&ioas->obj); + iommufd_put_object(ictx, &ioas->obj); return rc; } @@ -511,7 +511,7 @@ static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx, out_put: up_read(&ioas->iopt.iova_rwsem); - iommufd_put_object(&ioas->obj); + iommufd_put_object(ictx, &ioas->obj); return rc; } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 65ff69477c43..eaabae761577 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -295,6 +295,18 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, mmu->utlb_ctx[utlb] = domain->context_id; } +/* + * Disable MMU translation for the microTLB. + */ +static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, + unsigned int utlb) +{ + struct ipmmu_vmsa_device *mmu = domain->mmu; + + ipmmu_imuctr_write(mmu, utlb, 0); + mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; +} + static void ipmmu_tlb_flush_all(void *cookie) { struct ipmmu_vmsa_domain *domain = cookie; @@ -551,13 +563,10 @@ static irqreturn_t ipmmu_irq(int irq, void *dev) * IOMMU Operations */ -static struct iommu_domain *ipmmu_domain_alloc(unsigned type) +static struct iommu_domain *ipmmu_domain_alloc_paging(struct device *dev) { struct ipmmu_vmsa_domain *domain; - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) - return NULL; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; @@ -627,6 +636,36 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, return 0; } +static int ipmmu_iommu_identity_attach(struct iommu_domain *identity_domain, + struct device *dev) +{ + struct iommu_domain *io_domain = iommu_get_domain_for_dev(dev); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct ipmmu_vmsa_domain *domain; + unsigned int i; + + if (io_domain == identity_domain || !io_domain) + return 0; + + domain = to_vmsa_domain(io_domain); + for (i = 0; i < fwspec->num_ids; ++i) + ipmmu_utlb_disable(domain, fwspec->ids[i]); + + /* + * TODO: Optimize by disabling the context when no device is attached. + */ + return 0; +} + +static struct iommu_domain_ops ipmmu_iommu_identity_ops = { + .attach_dev = ipmmu_iommu_identity_attach, +}; + +static struct iommu_domain ipmmu_iommu_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &ipmmu_iommu_identity_ops, +}; + static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) @@ -849,7 +888,8 @@ static struct iommu_group *ipmmu_find_group(struct device *dev) } static const struct iommu_ops ipmmu_ops = { - .domain_alloc = ipmmu_domain_alloc, + .identity_domain = &ipmmu_iommu_identity_domain, + .domain_alloc_paging = ipmmu_domain_alloc_paging, .probe_device = ipmmu_probe_device, .release_device = ipmmu_release_device, .probe_finalize = ipmmu_probe_finalize, diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 79d89bad5132..a163cee0b724 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -302,13 +302,10 @@ static void __program_context(void __iomem *base, int ctx, SET_M(base, ctx, 1); } -static struct iommu_domain *msm_iommu_domain_alloc(unsigned type) +static struct iommu_domain *msm_iommu_domain_alloc_paging(struct device *dev) { struct msm_priv *priv; - if (type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) goto fail_nomem; @@ -443,15 +440,20 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) return ret; } -static void msm_iommu_set_platform_dma(struct device *dev) +static int msm_iommu_identity_attach(struct iommu_domain *identity_domain, + struct device *dev) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct msm_priv *priv = to_msm_priv(domain); + struct msm_priv *priv; unsigned long flags; struct msm_iommu_dev *iommu; struct msm_iommu_ctx_dev *master; - int ret; + int ret = 0; + + if (domain == identity_domain || !domain) + return 0; + priv = to_msm_priv(domain); free_io_pgtable_ops(priv->iop); spin_lock_irqsave(&msm_iommu_lock, flags); @@ -468,8 +470,18 @@ static void msm_iommu_set_platform_dma(struct device *dev) } fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); + return ret; } +static struct iommu_domain_ops msm_iommu_identity_ops = { + .attach_dev = msm_iommu_identity_attach, +}; + +static struct iommu_domain msm_iommu_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &msm_iommu_identity_ops, +}; + static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t pa, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) @@ -675,10 +687,10 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) } static struct iommu_ops msm_iommu_ops = { - .domain_alloc = msm_iommu_domain_alloc, + .identity_domain = &msm_iommu_identity_domain, + .domain_alloc_paging = msm_iommu_domain_alloc_paging, .probe_device = msm_iommu_probe_device, .device_group = generic_device_group, - .set_platform_dma_ops = msm_iommu_set_platform_dma, .pgsize_bitmap = MSM_IOMMU_PGSIZES, .of_xlate = qcom_iommu_of_xlate, .default_domain_ops = &(const struct iommu_domain_ops) { diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 06c0770ff894..d773bd26097d 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -688,13 +688,10 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, return 0; } -static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) +static struct iommu_domain *mtk_iommu_domain_alloc_paging(struct device *dev) { struct mtk_iommu_domain *dom; - if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - dom = kzalloc(sizeof(*dom), GFP_KERNEL); if (!dom) return NULL; @@ -776,6 +773,28 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, return ret; } +static int mtk_iommu_identity_attach(struct iommu_domain *identity_domain, + struct device *dev) +{ + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); + + if (domain == identity_domain || !domain) + return 0; + + mtk_iommu_config(data, dev, false, 0); + return 0; +} + +static struct iommu_domain_ops mtk_iommu_identity_ops = { + .attach_dev = mtk_iommu_identity_attach, +}; + +static struct iommu_domain mtk_iommu_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &mtk_iommu_identity_ops, +}; + static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) @@ -843,16 +862,11 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, static struct iommu_device *mtk_iommu_probe_device(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct mtk_iommu_data *data; + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); struct device_link *link; struct device *larbdev; unsigned int larbid, larbidx, i; - if (!fwspec || fwspec->ops != &mtk_iommu_ops) - return ERR_PTR(-ENODEV); /* Not a iommu client device */ - - data = dev_iommu_priv_get(dev); - if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) return &data->iommu; @@ -995,7 +1009,8 @@ static void mtk_iommu_get_resv_regions(struct device *dev, } static const struct iommu_ops mtk_iommu_ops = { - .domain_alloc = mtk_iommu_domain_alloc, + .identity_domain = &mtk_iommu_identity_domain, + .domain_alloc_paging = mtk_iommu_domain_alloc_paging, .probe_device = mtk_iommu_probe_device, .release_device = mtk_iommu_release_device, .device_group = mtk_iommu_device_group, diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index f1754efcfe74..32cc8341d372 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -270,13 +270,10 @@ static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data) return 0; } -static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type) +static struct iommu_domain *mtk_iommu_v1_domain_alloc_paging(struct device *dev) { struct mtk_iommu_v1_domain *dom; - if (type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - dom = kzalloc(sizeof(*dom), GFP_KERNEL); if (!dom) return NULL; @@ -319,13 +316,24 @@ static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device return 0; } -static void mtk_iommu_v1_set_platform_dma(struct device *dev) +static int mtk_iommu_v1_identity_attach(struct iommu_domain *identity_domain, + struct device *dev) { struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev); mtk_iommu_v1_config(data, dev, false); + return 0; } +static struct iommu_domain_ops mtk_iommu_v1_identity_ops = { + .attach_dev = mtk_iommu_v1_identity_attach, +}; + +static struct iommu_domain mtk_iommu_v1_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &mtk_iommu_v1_identity_ops, +}; + static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) @@ -441,11 +449,6 @@ static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_arg return 0; } -static int mtk_iommu_v1_def_domain_type(struct device *dev) -{ - return IOMMU_DOMAIN_UNMANAGED; -} - static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); @@ -478,9 +481,6 @@ static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev) idx++; } - if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops) - return ERR_PTR(-ENODEV); /* Not a iommu client device */ - data = dev_iommu_priv_get(dev); /* Link the consumer device with the smi-larb device(supplier) */ @@ -578,14 +578,13 @@ static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data) } static const struct iommu_ops mtk_iommu_v1_ops = { - .domain_alloc = mtk_iommu_v1_domain_alloc, + .identity_domain = &mtk_iommu_v1_identity_domain, + .domain_alloc_paging = mtk_iommu_v1_domain_alloc_paging, .probe_device = mtk_iommu_v1_probe_device, .probe_finalize = mtk_iommu_v1_probe_finalize, .release_device = mtk_iommu_v1_release_device, - .def_domain_type = mtk_iommu_v1_def_domain_type, .device_group = generic_device_group, .pgsize_bitmap = MT2701_IOMMU_PAGE_SIZE, - .set_platform_dma_ops = mtk_iommu_v1_set_platform_dma, .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = mtk_iommu_v1_attach_device, diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 537e402f9bba..4efa5a348862 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1234,24 +1234,25 @@ static int omap_iommu_probe(struct platform_device *pdev) if (err) goto out_group; - err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev); - if (err) - goto out_sysfs; + obj->has_iommu_driver = true; } + err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev); + if (err) + goto out_sysfs; + pm_runtime_enable(obj->dev); omap_iommu_debugfs_add(obj); dev_info(&pdev->dev, "%s registered\n", obj->name); - /* Re-probe bus to probe device attached to this IOMMU */ - bus_iommu_probe(&platform_bus_type); - return 0; out_sysfs: iommu_device_sysfs_remove(&obj->iommu); + if (obj->has_iommu_driver) + iommu_device_sysfs_remove(&obj->iommu); out_group: iommu_group_put(obj->group); return err; @@ -1261,13 +1262,10 @@ static void omap_iommu_remove(struct platform_device *pdev) { struct omap_iommu *obj = platform_get_drvdata(pdev); - if (obj->group) { - iommu_group_put(obj->group); - obj->group = NULL; - + if (obj->has_iommu_driver) iommu_device_sysfs_remove(&obj->iommu); - iommu_device_unregister(&obj->iommu); - } + + iommu_device_unregister(&obj->iommu); omap_iommu_debugfs_remove(obj); @@ -1555,23 +1553,35 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, omap_domain->dev = NULL; } -static void omap_iommu_set_platform_dma(struct device *dev) +static int omap_iommu_identity_attach(struct iommu_domain *identity_domain, + struct device *dev) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct omap_iommu_domain *omap_domain = to_omap_domain(domain); + struct omap_iommu_domain *omap_domain; + + if (domain == identity_domain || !domain) + return 0; + omap_domain = to_omap_domain(domain); spin_lock(&omap_domain->lock); _omap_iommu_detach_dev(omap_domain, dev); spin_unlock(&omap_domain->lock); + return 0; } -static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) +static struct iommu_domain_ops omap_iommu_identity_ops = { + .attach_dev = omap_iommu_identity_attach, +}; + +static struct iommu_domain omap_iommu_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &omap_iommu_identity_ops, +}; + +static struct iommu_domain *omap_iommu_domain_alloc_paging(struct device *dev) { struct omap_iommu_domain *omap_domain; - if (type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); if (!omap_domain) return NULL; @@ -1731,12 +1741,19 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev) return group; } +static int omap_iommu_of_xlate(struct device *dev, const struct of_phandle_args *args) +{ + /* TODO: collect args->np to save re-parsing in probe above */ + return 0; +} + static const struct iommu_ops omap_iommu_ops = { - .domain_alloc = omap_iommu_domain_alloc, + .identity_domain = &omap_iommu_identity_domain, + .domain_alloc_paging = omap_iommu_domain_alloc_paging, .probe_device = omap_iommu_probe_device, .release_device = omap_iommu_release_device, .device_group = omap_iommu_device_group, - .set_platform_dma_ops = omap_iommu_set_platform_dma, + .of_xlate = omap_iommu_of_xlate, .pgsize_bitmap = OMAP_IOMMU_PGSIZES, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = omap_iommu_attach_dev, diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 36fec26d2a04..527a4395a1b4 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -989,13 +989,8 @@ static int rk_iommu_identity_attach(struct iommu_domain *identity_domain, return 0; } -static void rk_iommu_identity_free(struct iommu_domain *domain) -{ -} - static struct iommu_domain_ops rk_identity_ops = { .attach_dev = rk_iommu_identity_attach, - .free = rk_iommu_identity_free, }; static struct iommu_domain rk_identity_domain = { @@ -1003,13 +998,6 @@ static struct iommu_domain rk_identity_domain = { .ops = &rk_identity_ops, }; -#ifdef CONFIG_ARM -static void rk_iommu_set_platform_dma(struct device *dev) -{ - WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev)); -} -#endif - static int rk_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { @@ -1055,16 +1043,10 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, return ret; } -static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) +static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev) { struct rk_iommu_domain *rk_domain; - if (type == IOMMU_DOMAIN_IDENTITY) - return &rk_identity_domain; - - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) - return NULL; - if (!dma_dev) return NULL; @@ -1185,13 +1167,11 @@ static int rk_iommu_of_xlate(struct device *dev, } static const struct iommu_ops rk_iommu_ops = { - .domain_alloc = rk_iommu_domain_alloc, + .identity_domain = &rk_identity_domain, + .domain_alloc_paging = rk_iommu_domain_alloc_paging, .probe_device = rk_iommu_probe_device, .release_device = rk_iommu_release_device, .device_group = rk_iommu_device_group, -#ifdef CONFIG_ARM - .set_platform_dma_ops = rk_iommu_set_platform_dma, -#endif .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, .of_xlate = rk_iommu_of_xlate, .default_domain_ops = &(const struct iommu_domain_ops) { diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index fbf59a8db29b..5695ad71d60e 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -39,13 +39,10 @@ static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap) } } -static struct iommu_domain *s390_domain_alloc(unsigned domain_type) +static struct iommu_domain *s390_domain_alloc_paging(struct device *dev) { struct s390_domain *s390_domain; - if (domain_type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL); if (!s390_domain) return NULL; @@ -142,14 +139,31 @@ static int s390_iommu_attach_device(struct iommu_domain *domain, return 0; } -static void s390_iommu_set_platform_dma(struct device *dev) +/* + * Switch control over the IOMMU to S390's internal dma_api ops + */ +static int s390_iommu_platform_attach(struct iommu_domain *platform_domain, + struct device *dev) { struct zpci_dev *zdev = to_zpci_dev(dev); + if (!zdev->s390_domain) + return 0; + __s390_iommu_detach_device(zdev); zpci_dma_init_device(zdev); + return 0; } +static struct iommu_domain_ops s390_iommu_platform_ops = { + .attach_dev = s390_iommu_platform_attach, +}; + +static struct iommu_domain s390_iommu_platform_domain = { + .type = IOMMU_DOMAIN_PLATFORM, + .ops = &s390_iommu_platform_ops, +}; + static void s390_iommu_get_resv_regions(struct device *dev, struct list_head *list) { @@ -428,12 +442,12 @@ void zpci_destroy_iommu(struct zpci_dev *zdev) } static const struct iommu_ops s390_iommu_ops = { + .default_domain = &s390_iommu_platform_domain, .capable = s390_iommu_capable, - .domain_alloc = s390_domain_alloc, + .domain_alloc_paging = s390_domain_alloc_paging, .probe_device = s390_iommu_probe_device, .release_device = s390_iommu_release_device, .device_group = generic_device_group, - .set_platform_dma_ops = s390_iommu_set_platform_dma, .pgsize_bitmap = SZ_4K, .get_resv_regions = s390_iommu_get_resv_regions, .default_domain_ops = &(const struct iommu_domain_ops) { diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c index c8e79a2d8b4c..525ff3610d39 100644 --- a/drivers/iommu/sprd-iommu.c +++ b/drivers/iommu/sprd-iommu.c @@ -134,13 +134,10 @@ sprd_iommu_pgt_size(struct iommu_domain *domain) SPRD_IOMMU_PAGE_SHIFT) * sizeof(u32); } -static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type) +static struct iommu_domain *sprd_iommu_domain_alloc_paging(struct device *dev) { struct sprd_iommu_domain *dom; - if (domain_type != IOMMU_DOMAIN_DMA && domain_type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - dom = kzalloc(sizeof(*dom), GFP_KERNEL); if (!dom) return NULL; @@ -388,13 +385,7 @@ static phys_addr_t sprd_iommu_iova_to_phys(struct iommu_domain *domain, static struct iommu_device *sprd_iommu_probe_device(struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct sprd_iommu_device *sdev; - - if (!fwspec || fwspec->ops != &sprd_iommu_ops) - return ERR_PTR(-ENODEV); - - sdev = dev_iommu_priv_get(dev); + struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev); return &sdev->iommu; } @@ -421,7 +412,7 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) static const struct iommu_ops sprd_iommu_ops = { - .domain_alloc = sprd_iommu_domain_alloc, + .domain_alloc_paging = sprd_iommu_domain_alloc_paging, .probe_device = sprd_iommu_probe_device, .device_group = sprd_iommu_device_group, .of_xlate = sprd_iommu_of_xlate, diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index 94bd7f25f6f2..85ce3dc0bdd5 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -668,14 +668,11 @@ static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain, sun50i_iova_get_page_offset(iova); } -static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type) +static struct iommu_domain * +sun50i_iommu_domain_alloc_paging(struct device *dev) { struct sun50i_iommu_domain *sun50i_domain; - if (type != IOMMU_DOMAIN_DMA && - type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL); if (!sun50i_domain) return NULL; @@ -758,21 +755,32 @@ static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu, iommu->domain = NULL; } -static void sun50i_iommu_detach_device(struct iommu_domain *domain, - struct device *dev) +static int sun50i_iommu_identity_attach(struct iommu_domain *identity_domain, + struct device *dev) { - struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); struct sun50i_iommu *iommu = dev_iommu_priv_get(dev); + struct sun50i_iommu_domain *sun50i_domain; dev_dbg(dev, "Detaching from IOMMU domain\n"); - if (iommu->domain != domain) - return; + if (iommu->domain == identity_domain) + return 0; + sun50i_domain = to_sun50i_domain(iommu->domain); if (refcount_dec_and_test(&sun50i_domain->refcnt)) sun50i_iommu_detach_domain(iommu, sun50i_domain); + return 0; } +static struct iommu_domain_ops sun50i_iommu_identity_ops = { + .attach_dev = sun50i_iommu_identity_attach, +}; + +static struct iommu_domain sun50i_iommu_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &sun50i_iommu_identity_ops, +}; + static int sun50i_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { @@ -790,8 +798,7 @@ static int sun50i_iommu_attach_device(struct iommu_domain *domain, if (iommu->domain == domain) return 0; - if (iommu->domain) - sun50i_iommu_detach_device(iommu->domain, dev); + sun50i_iommu_identity_attach(&sun50i_iommu_identity_domain, dev); sun50i_iommu_attach_domain(iommu, sun50i_domain); @@ -828,9 +835,10 @@ static int sun50i_iommu_of_xlate(struct device *dev, } static const struct iommu_ops sun50i_iommu_ops = { + .identity_domain = &sun50i_iommu_identity_domain, .pgsize_bitmap = SZ_4K, .device_group = sun50i_iommu_device_group, - .domain_alloc = sun50i_iommu_domain_alloc, + .domain_alloc_paging = sun50i_iommu_domain_alloc_paging, .of_xlate = sun50i_iommu_of_xlate, .probe_device = sun50i_iommu_probe_device, .default_domain_ops = &(const struct iommu_domain_ops) { @@ -986,6 +994,7 @@ static int sun50i_iommu_probe(struct platform_device *pdev) if (!iommu) return -ENOMEM; spin_lock_init(&iommu->iommu_lock); + iommu->domain = &sun50i_iommu_identity_domain; platform_set_drvdata(pdev, iommu); iommu->dev = &pdev->dev; diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c deleted file mode 100644 index a482ff838b53..000000000000 --- a/drivers/iommu/tegra-gart.c +++ /dev/null @@ -1,371 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * IOMMU API for Graphics Address Relocation Table on Tegra20 - * - * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved. - * - * Author: Hiroshi DOYU - */ - -#define dev_fmt(fmt) "gart: " fmt - -#include -#include -#include -#include -#include -#include -#include - -#include - -#define GART_REG_BASE 0x24 -#define GART_CONFIG (0x24 - GART_REG_BASE) -#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE) -#define GART_ENTRY_DATA (0x2c - GART_REG_BASE) - -#define GART_ENTRY_PHYS_ADDR_VALID BIT(31) - -#define GART_PAGE_SHIFT 12 -#define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT) -#define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT) - -/* bitmap of the page sizes currently supported */ -#define GART_IOMMU_PGSIZES (GART_PAGE_SIZE) - -struct gart_device { - void __iomem *regs; - u32 *savedata; - unsigned long iovmm_base; /* offset to vmm_area start */ - unsigned long iovmm_end; /* offset to vmm_area end */ - spinlock_t pte_lock; /* for pagetable */ - spinlock_t dom_lock; /* for active domain */ - unsigned int active_devices; /* number of active devices */ - struct iommu_domain *active_domain; /* current active domain */ - struct iommu_device iommu; /* IOMMU Core handle */ - struct device *dev; -}; - -static struct gart_device *gart_handle; /* unique for a system */ - -static bool gart_debug; - -/* - * Any interaction between any block on PPSB and a block on APB or AHB - * must have these read-back to ensure the APB/AHB bus transaction is - * complete before initiating activity on the PPSB block. - */ -#define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG) - -#define for_each_gart_pte(gart, iova) \ - for (iova = gart->iovmm_base; \ - iova < gart->iovmm_end; \ - iova += GART_PAGE_SIZE) - -static inline void gart_set_pte(struct gart_device *gart, - unsigned long iova, unsigned long pte) -{ - writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); - writel_relaxed(pte, gart->regs + GART_ENTRY_DATA); -} - -static inline unsigned long gart_read_pte(struct gart_device *gart, - unsigned long iova) -{ - unsigned long pte; - - writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); - pte = readl_relaxed(gart->regs + GART_ENTRY_DATA); - - return pte; -} - -static void do_gart_setup(struct gart_device *gart, const u32 *data) -{ - unsigned long iova; - - for_each_gart_pte(gart, iova) - gart_set_pte(gart, iova, data ? *(data++) : 0); - - writel_relaxed(1, gart->regs + GART_CONFIG); - FLUSH_GART_REGS(gart); -} - -static inline bool gart_iova_range_invalid(struct gart_device *gart, - unsigned long iova, size_t bytes) -{ - return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE || - iova + bytes > gart->iovmm_end); -} - -static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova) -{ - return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID); -} - -static int gart_iommu_attach_dev(struct iommu_domain *domain, - struct device *dev) -{ - struct gart_device *gart = gart_handle; - int ret = 0; - - spin_lock(&gart->dom_lock); - - if (gart->active_domain && gart->active_domain != domain) { - ret = -EINVAL; - } else if (dev_iommu_priv_get(dev) != domain) { - dev_iommu_priv_set(dev, domain); - gart->active_domain = domain; - gart->active_devices++; - } - - spin_unlock(&gart->dom_lock); - - return ret; -} - -static void gart_iommu_set_platform_dma(struct device *dev) -{ - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct gart_device *gart = gart_handle; - - spin_lock(&gart->dom_lock); - - if (dev_iommu_priv_get(dev) == domain) { - dev_iommu_priv_set(dev, NULL); - - if (--gart->active_devices == 0) - gart->active_domain = NULL; - } - - spin_unlock(&gart->dom_lock); -} - -static struct iommu_domain *gart_iommu_domain_alloc(unsigned type) -{ - struct iommu_domain *domain; - - if (type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - - domain = kzalloc(sizeof(*domain), GFP_KERNEL); - if (domain) { - domain->geometry.aperture_start = gart_handle->iovmm_base; - domain->geometry.aperture_end = gart_handle->iovmm_end - 1; - domain->geometry.force_aperture = true; - } - - return domain; -} - -static void gart_iommu_domain_free(struct iommu_domain *domain) -{ - WARN_ON(gart_handle->active_domain == domain); - kfree(domain); -} - -static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova, - unsigned long pa) -{ - if (unlikely(gart_debug && gart_pte_valid(gart, iova))) { - dev_err(gart->dev, "Page entry is in-use\n"); - return -EINVAL; - } - - gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa); - - return 0; -} - -static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t pa, size_t bytes, int prot, gfp_t gfp) -{ - struct gart_device *gart = gart_handle; - int ret; - - if (gart_iova_range_invalid(gart, iova, bytes)) - return -EINVAL; - - spin_lock(&gart->pte_lock); - ret = __gart_iommu_map(gart, iova, (unsigned long)pa); - spin_unlock(&gart->pte_lock); - - return ret; -} - -static inline int __gart_iommu_unmap(struct gart_device *gart, - unsigned long iova) -{ - if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) { - dev_err(gart->dev, "Page entry is invalid\n"); - return -EINVAL; - } - - gart_set_pte(gart, iova, 0); - - return 0; -} - -static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t bytes, struct iommu_iotlb_gather *gather) -{ - struct gart_device *gart = gart_handle; - int err; - - if (gart_iova_range_invalid(gart, iova, bytes)) - return 0; - - spin_lock(&gart->pte_lock); - err = __gart_iommu_unmap(gart, iova); - spin_unlock(&gart->pte_lock); - - return err ? 0 : bytes; -} - -static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, - dma_addr_t iova) -{ - struct gart_device *gart = gart_handle; - unsigned long pte; - - if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE)) - return -EINVAL; - - spin_lock(&gart->pte_lock); - pte = gart_read_pte(gart, iova); - spin_unlock(&gart->pte_lock); - - return pte & GART_PAGE_MASK; -} - -static struct iommu_device *gart_iommu_probe_device(struct device *dev) -{ - if (!dev_iommu_fwspec_get(dev)) - return ERR_PTR(-ENODEV); - - return &gart_handle->iommu; -} - -static int gart_iommu_of_xlate(struct device *dev, - struct of_phandle_args *args) -{ - return 0; -} - -static void gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, - size_t size) -{ - FLUSH_GART_REGS(gart_handle); -} - -static void gart_iommu_sync(struct iommu_domain *domain, - struct iommu_iotlb_gather *gather) -{ - size_t length = gather->end - gather->start + 1; - - gart_iommu_sync_map(domain, gather->start, length); -} - -static const struct iommu_ops gart_iommu_ops = { - .domain_alloc = gart_iommu_domain_alloc, - .probe_device = gart_iommu_probe_device, - .device_group = generic_device_group, - .set_platform_dma_ops = gart_iommu_set_platform_dma, - .pgsize_bitmap = GART_IOMMU_PGSIZES, - .of_xlate = gart_iommu_of_xlate, - .default_domain_ops = &(const struct iommu_domain_ops) { - .attach_dev = gart_iommu_attach_dev, - .map = gart_iommu_map, - .unmap = gart_iommu_unmap, - .iova_to_phys = gart_iommu_iova_to_phys, - .iotlb_sync_map = gart_iommu_sync_map, - .iotlb_sync = gart_iommu_sync, - .free = gart_iommu_domain_free, - } -}; - -int tegra_gart_suspend(struct gart_device *gart) -{ - u32 *data = gart->savedata; - unsigned long iova; - - /* - * All GART users shall be suspended at this point. Disable - * address translation to trap all GART accesses as invalid - * memory accesses. - */ - writel_relaxed(0, gart->regs + GART_CONFIG); - FLUSH_GART_REGS(gart); - - for_each_gart_pte(gart, iova) - *(data++) = gart_read_pte(gart, iova); - - return 0; -} - -int tegra_gart_resume(struct gart_device *gart) -{ - do_gart_setup(gart, gart->savedata); - - return 0; -} - -struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc) -{ - struct gart_device *gart; - struct resource *res; - int err; - - BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT); - - /* the GART memory aperture is required */ - res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1); - if (!res) { - dev_err(dev, "Memory aperture resource unavailable\n"); - return ERR_PTR(-ENXIO); - } - - gart = kzalloc(sizeof(*gart), GFP_KERNEL); - if (!gart) - return ERR_PTR(-ENOMEM); - - gart_handle = gart; - - gart->dev = dev; - gart->regs = mc->regs + GART_REG_BASE; - gart->iovmm_base = res->start; - gart->iovmm_end = res->end + 1; - spin_lock_init(&gart->pte_lock); - spin_lock_init(&gart->dom_lock); - - do_gart_setup(gart, NULL); - - err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart"); - if (err) - goto free_gart; - - err = iommu_device_register(&gart->iommu, &gart_iommu_ops, dev); - if (err) - goto remove_sysfs; - - gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE * - sizeof(u32)); - if (!gart->savedata) { - err = -ENOMEM; - goto unregister_iommu; - } - - return gart; - -unregister_iommu: - iommu_device_unregister(&gart->iommu); -remove_sysfs: - iommu_device_sysfs_remove(&gart->iommu); -free_gart: - kfree(gart); - - return ERR_PTR(err); -} - -module_param(gart_debug, bool, 0644); -MODULE_PARM_DESC(gart_debug, "Enable GART debugging"); diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index e445f80d0226..1764a63347b0 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -272,13 +272,10 @@ static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) clear_bit(id, smmu->asids); } -static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) +static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev) { struct tegra_smmu_as *as; - if (type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - as = kzalloc(sizeof(*as), GFP_KERNEL); if (!as) return NULL; @@ -511,23 +508,39 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain, return err; } -static void tegra_smmu_set_platform_dma(struct device *dev) +static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain, + struct device *dev) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct tegra_smmu_as *as = to_smmu_as(domain); - struct tegra_smmu *smmu = as->smmu; + struct tegra_smmu_as *as; + struct tegra_smmu *smmu; unsigned int index; if (!fwspec) - return; + return -ENODEV; + + if (domain == identity_domain || !domain) + return 0; + as = to_smmu_as(domain); + smmu = as->smmu; for (index = 0; index < fwspec->num_ids; index++) { tegra_smmu_disable(smmu, fwspec->ids[index], as->id); tegra_smmu_as_unprepare(smmu, as); } + return 0; } +static struct iommu_domain_ops tegra_smmu_identity_ops = { + .attach_dev = tegra_smmu_identity_attach, +}; + +static struct iommu_domain tegra_smmu_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &tegra_smmu_identity_ops, +}; + static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, u32 value) { @@ -962,11 +975,22 @@ static int tegra_smmu_of_xlate(struct device *dev, return iommu_fwspec_add_ids(dev, &id, 1); } +static int tegra_smmu_def_domain_type(struct device *dev) +{ + /* + * FIXME: For now we want to run all translation in IDENTITY mode, due + * to some device quirks. Better would be to just quirk the troubled + * devices. + */ + return IOMMU_DOMAIN_IDENTITY; +} + static const struct iommu_ops tegra_smmu_ops = { - .domain_alloc = tegra_smmu_domain_alloc, + .identity_domain = &tegra_smmu_identity_domain, + .def_domain_type = &tegra_smmu_def_domain_type, + .domain_alloc_paging = tegra_smmu_domain_alloc_paging, .probe_device = tegra_smmu_probe_device, .device_group = tegra_smmu_device_group, - .set_platform_dma_ops = tegra_smmu_set_platform_dma, .of_xlate = tegra_smmu_of_xlate, .pgsize_bitmap = SZ_4K, .default_domain_ops = &(const struct iommu_domain_ops) { diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 17dcd826f5c2..bb2e795a80d0 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -969,9 +969,6 @@ static struct iommu_device *viommu_probe_device(struct device *dev) struct viommu_dev *viommu = NULL; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - if (!fwspec || fwspec->ops != &viommu_ops) - return ERR_PTR(-ENODEV); - viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); if (!viommu) return ERR_PTR(-ENODEV); diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index 67d6e70b4eab..a083921a8968 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c @@ -979,35 +979,6 @@ static int tegra_mc_probe(struct platform_device *pdev) } } - if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && !mc->soc->smmu) { - mc->gart = tegra_gart_probe(&pdev->dev, mc); - if (IS_ERR(mc->gart)) { - dev_err(&pdev->dev, "failed to probe GART: %ld\n", - PTR_ERR(mc->gart)); - mc->gart = NULL; - } - } - - return 0; -} - -static int __maybe_unused tegra_mc_suspend(struct device *dev) -{ - struct tegra_mc *mc = dev_get_drvdata(dev); - - if (mc->soc->ops && mc->soc->ops->suspend) - return mc->soc->ops->suspend(mc); - - return 0; -} - -static int __maybe_unused tegra_mc_resume(struct device *dev) -{ - struct tegra_mc *mc = dev_get_drvdata(dev); - - if (mc->soc->ops && mc->soc->ops->resume) - return mc->soc->ops->resume(mc); - return 0; } @@ -1020,15 +991,10 @@ static void tegra_mc_sync_state(struct device *dev) icc_sync_state(dev); } -static const struct dev_pm_ops tegra_mc_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(tegra_mc_suspend, tegra_mc_resume) -}; - static struct platform_driver tegra_mc_driver = { .driver = { .name = "tegra-mc", .of_match_table = tegra_mc_of_match, - .pm = &tegra_mc_pm_ops, .suppress_bind_attrs = true, .sync_state = tegra_mc_sync_state, }, diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c index 544bfd216a22..aa4b97d5e732 100644 --- a/drivers/memory/tegra/tegra20.c +++ b/drivers/memory/tegra/tegra20.c @@ -688,32 +688,6 @@ static int tegra20_mc_probe(struct tegra_mc *mc) return 0; } -static int tegra20_mc_suspend(struct tegra_mc *mc) -{ - int err; - - if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) { - err = tegra_gart_suspend(mc->gart); - if (err < 0) - return err; - } - - return 0; -} - -static int tegra20_mc_resume(struct tegra_mc *mc) -{ - int err; - - if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) { - err = tegra_gart_resume(mc->gart); - if (err < 0) - return err; - } - - return 0; -} - static irqreturn_t tegra20_mc_handle_irq(int irq, void *data) { struct tegra_mc *mc = data; @@ -789,8 +763,6 @@ static irqreturn_t tegra20_mc_handle_irq(int irq, void *data) static const struct tegra_mc_ops tegra20_mc_ops = { .probe = tegra20_mc_probe, - .suspend = tegra20_mc_suspend, - .resume = tegra20_mc_resume, .handle_irq = tegra20_mc_handle_irq, }; diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile index c82ea032d352..68c05705200f 100644 --- a/drivers/vfio/Makefile +++ b/drivers/vfio/Makefile @@ -1,8 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_VFIO) += vfio.o -vfio-y += vfio_main.o \ - iova_bitmap.o +vfio-y += vfio_main.o vfio-$(CONFIG_VFIO_DEVICE_CDEV) += device_cdev.o vfio-$(CONFIG_VFIO_GROUP) += group.o vfio-$(CONFIG_IOMMUFD) += iommufd.o diff --git a/drivers/vfio/pci/mlx5/Kconfig b/drivers/vfio/pci/mlx5/Kconfig index 7088edc4fb28..c3ced56b7787 100644 --- a/drivers/vfio/pci/mlx5/Kconfig +++ b/drivers/vfio/pci/mlx5/Kconfig @@ -3,6 +3,7 @@ config MLX5_VFIO_PCI tristate "VFIO support for MLX5 PCI devices" depends on MLX5_CORE select VFIO_PCI_CORE + select IOMMUFD_DRIVER help This provides migration support for MLX5 devices using the VFIO framework. diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index 42ec574a8622..5cf2b491d15a 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -1376,6 +1376,7 @@ static struct pci_driver mlx5vf_pci_driver = { module_pci_driver(mlx5vf_pci_driver); +MODULE_IMPORT_NS(IOMMUFD); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Max Gurtovoy "); MODULE_AUTHOR("Yishai Hadas "); diff --git a/drivers/vfio/pci/pds/Kconfig b/drivers/vfio/pci/pds/Kconfig index 6eceef7b028a..fec9b167c7b9 100644 --- a/drivers/vfio/pci/pds/Kconfig +++ b/drivers/vfio/pci/pds/Kconfig @@ -5,6 +5,7 @@ config PDS_VFIO_PCI tristate "VFIO support for PDS PCI devices" depends on PDS_CORE && PCI_IOV select VFIO_PCI_CORE + select IOMMUFD_DRIVER help This provides generic PCI support for PDS devices using the VFIO framework. diff --git a/drivers/vfio/pci/pds/pci_drv.c b/drivers/vfio/pci/pds/pci_drv.c index caffa1a2cf59..a34dda516629 100644 --- a/drivers/vfio/pci/pds/pci_drv.c +++ b/drivers/vfio/pci/pds/pci_drv.c @@ -204,6 +204,7 @@ static struct pci_driver pds_vfio_pci_driver = { module_pci_driver(pds_vfio_pci_driver); +MODULE_IMPORT_NS(IOMMUFD); MODULE_DESCRIPTION(PDS_VFIO_DRV_DESCRIPTION); MODULE_AUTHOR("Brett Creeley "); MODULE_LICENSE("GPL"); diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c index edb631e5e7ec..b78dde0805b4 100644 --- a/drivers/vfio/vfio_main.c +++ b/drivers/vfio/vfio_main.c @@ -1694,6 +1694,7 @@ static void __exit vfio_cleanup(void) module_init(vfio_init); module_exit(vfio_cleanup); +MODULE_IMPORT_NS(IOMMUFD); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(DRIVER_AUTHOR); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index d9c20ae23b63..a117d8274667 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -624,6 +624,8 @@ struct acpi_pci_root { /* helper */ +struct iommu_ops; + bool acpi_dma_supported(const struct acpi_device *adev); enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); int acpi_iommu_fwspec_init(struct device *dev, u32 id, diff --git a/include/linux/device.h b/include/linux/device.h index 3627b26b243e..72bae73b1edd 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -42,7 +42,6 @@ struct class; struct subsys_private; struct device_node; struct fwnode_handle; -struct iommu_ops; struct iommu_group; struct dev_pin_info; struct dev_iommu; diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h index ae10c4322754..e25aab08f873 100644 --- a/include/linux/device/bus.h +++ b/include/linux/device/bus.h @@ -62,9 +62,6 @@ struct fwnode_handle; * this bus. * @pm: Power management operations of this bus, callback the specific * device driver's pm-ops. - * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU - * driver implementations to a bus and allow the driver to do - * bus-specific setup * @need_parent_lock: When probing or removing a device on this bus, the * device core should lock the device's parent. * @@ -104,8 +101,6 @@ struct bus_type { const struct dev_pm_ops *pm; - const struct iommu_ops *iommu_ops; - bool need_parent_lock; }; diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index f2fc203fb8a1..a52e508d1869 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -11,6 +11,7 @@ #include struct cma; +struct iommu_ops; /* * Values for struct dma_map_ops.flags: diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 1b7a44b35616..25142a0e2fc2 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -166,6 +166,10 @@ struct io_pgtable_ops { struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); + int (*read_and_clear_dirty)(struct io_pgtable_ops *ops, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); }; /** diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b6ef263e85c0..0379be56dc91 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -13,7 +13,7 @@ #include #include #include -#include +#include #define IOMMU_READ (1 << 0) #define IOMMU_WRITE (1 << 1) @@ -37,10 +37,117 @@ struct bus_type; struct device; struct iommu_domain; struct iommu_domain_ops; +struct iommu_dirty_ops; struct notifier_block; struct iommu_sva; -struct iommu_fault_event; struct iommu_dma_cookie; +struct iommu_fault_param; + +#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ +#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ +#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */ +#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */ + +/* Generic fault types, can be expanded IRQ remapping fault */ +enum iommu_fault_type { + IOMMU_FAULT_PAGE_REQ = 1, /* page request fault */ +}; + +/** + * struct iommu_fault_page_request - Page Request data + * @flags: encodes whether the corresponding fields are valid and whether this + * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values). + * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response + * must have the same PASID value as the page request. When it is clear, + * the page response should not have a PASID. + * @pasid: Process Address Space ID + * @grpid: Page Request Group Index + * @perm: requested page permissions (IOMMU_FAULT_PERM_* values) + * @addr: page address + * @private_data: device-specific private information + */ +struct iommu_fault_page_request { +#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0) +#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1) +#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2) +#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3) + u32 flags; + u32 pasid; + u32 grpid; + u32 perm; + u64 addr; + u64 private_data[2]; +}; + +/** + * struct iommu_fault - Generic fault data + * @type: fault type from &enum iommu_fault_type + * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ + */ +struct iommu_fault { + u32 type; + struct iommu_fault_page_request prm; +}; + +/** + * enum iommu_page_response_code - Return status of fault handlers + * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables + * populated, retry the access. This is "Success" in PCI PRI. + * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from + * this device if possible. This is "Response Failure" in PCI PRI. + * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the + * access. This is "Invalid Request" in PCI PRI. + */ +enum iommu_page_response_code { + IOMMU_PAGE_RESP_SUCCESS = 0, + IOMMU_PAGE_RESP_INVALID, + IOMMU_PAGE_RESP_FAILURE, +}; + +/** + * struct iommu_page_response - Generic page response information + * @pasid: Process Address Space ID + * @grpid: Page Request Group Index + * @code: response code from &enum iommu_page_response_code + */ +struct iommu_page_response { + u32 pasid; + u32 grpid; + u32 code; +}; + +struct iopf_fault { + struct iommu_fault fault; + /* node for pending lists */ + struct list_head list; +}; + +struct iopf_group { + struct iopf_fault last_fault; + struct list_head faults; + size_t fault_count; + /* list node for iommu_fault_param::faults */ + struct list_head pending_node; + struct work_struct work; + struct iommu_attach_handle *attach_handle; + /* The device's fault data parameter. */ + struct iommu_fault_param *fault_param; + /* Used by handler provider to hook the group on its own lists. */ + struct list_head node; + u32 cookie; +}; + +/** + * struct iopf_queue - IO Page Fault queue + * @wq: the fault workqueue + * @devices: devices attached to this queue + * @lock: protects the device list + */ +struct iopf_queue { + struct workqueue_struct *wq; + struct list_head devices; + struct mutex lock; +}; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 @@ -48,7 +155,6 @@ struct iommu_dma_cookie; typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); -typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); struct iommu_domain_geometry { dma_addr_t aperture_start; /* First address that can be mapped */ @@ -64,6 +170,10 @@ struct iommu_domain_geometry { #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ +#define __IOMMU_DOMAIN_PLATFORM (1U << 5) + +#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested + on a stage-2 translation */ #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ /* @@ -81,6 +191,8 @@ struct iommu_domain_geometry { * invalidation. * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses * represented by mm_struct's. + * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own + * dma_api stuff. Do not use in new drivers. */ #define IOMMU_DOMAIN_BLOCKED (0U) #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) @@ -91,15 +203,18 @@ struct iommu_domain_geometry { __IOMMU_DOMAIN_DMA_API | \ __IOMMU_DOMAIN_DMA_FQ) #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) +#define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM) +#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED) struct iommu_domain { unsigned type; const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + const struct iommu_ops *owner; /* Whose domain_alloc we came from */ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ struct iommu_domain_geometry geometry; struct iommu_dma_cookie *iova_cookie; - enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault, - void *data); + int (*iopf_handler)(struct iopf_group *group); void *fault_data; union { struct { @@ -109,6 +224,11 @@ struct iommu_domain { struct { /* IOMMU_DOMAIN_SVA */ struct mm_struct *mm; int users; + /* + * Next iommu_domain in mm->iommu_mm->sva-domains list + * protected by iommu_sva_lock. + */ + struct list_head next; }; }; }; @@ -133,6 +253,7 @@ enum iommu_cap { * usefully support the non-strict DMA flush queue. */ IOMMU_CAP_DEFERRED_FLUSH, + IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ }; /* These are the possible reserved region types */ @@ -227,6 +348,90 @@ struct iommu_iotlb_gather { bool queued; }; +/** + * struct iommu_dirty_bitmap - Dirty IOVA bitmap state + * @bitmap: IOVA bitmap + * @gather: Range information for a pending IOTLB flush + */ +struct iommu_dirty_bitmap { + struct iova_bitmap *bitmap; + struct iommu_iotlb_gather *gather; +}; + +/* Read but do not clear any dirty bits */ +#define IOMMU_DIRTY_NO_CLEAR (1 << 0) + +/** + * struct iommu_dirty_ops - domain specific dirty tracking operations + * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain + * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled + * into a bitmap, with a bit represented as a page. + * Reads the dirty PTE bits and clears it from IO + * pagetables. + */ +struct iommu_dirty_ops { + int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); + int (*read_and_clear_dirty)(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); +}; + +/** + * struct iommu_user_data - iommu driver specific user space data info + * @type: The data type of the user buffer + * @uptr: Pointer to the user buffer for copy_from_user() + * @len: The length of the user buffer in bytes + * + * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h + * @type, @uptr and @len should be just copied from an iommufd core uAPI struct. + */ +struct iommu_user_data { + unsigned int type; + void __user *uptr; + size_t len; +}; + +/** + * __iommu_copy_struct_from_user - Copy iommu driver specific user space data + * @dst_data: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @src_data: Pointer to a struct iommu_user_data for user space data info + * @data_type: The data type of the @dst_data. Must match with @src_data.type + * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) + * @min_len: Initial length of user data structure for backward compatibility. + * This should be offsetofend using the last member in the user data + * struct that was initially added to include/uapi/linux/iommufd.h + */ +static inline int __iommu_copy_struct_from_user( + void *dst_data, const struct iommu_user_data *src_data, + unsigned int data_type, size_t data_len, size_t min_len) +{ + if (WARN_ON(!dst_data || !src_data)) + return -EINVAL; + if (src_data->type != data_type) + return -EINVAL; + if (src_data->len < min_len || data_len < src_data->len) + return -EINVAL; + return copy_struct_from_user(dst_data, data_len, src_data->uptr, + src_data->len); +} + +/** + * iommu_copy_struct_from_user - Copy iommu driver specific user space data + * @kdst: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @user_data: Pointer to a struct iommu_user_data for user space data info + * @data_type: The data type of the @kdst. Must match with @user_data->type + * @min_last: The last member of the data structure @kdst points in the initial + * version. + * Return 0 for success, otherwise -error. + */ +#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \ + __iommu_copy_struct_from_user(kdst, user_data, data_type, \ + sizeof(*kdst), \ + offsetofend(typeof(*kdst), min_last)) + /** * struct iommu_ops - iommu ops and capabilities * @capable: check capability @@ -234,7 +439,22 @@ struct iommu_iotlb_gather { * op is allocated in the iommu driver and freed by the caller after * use. The information type is one of enum iommu_hw_info_type defined * in include/uapi/linux/iommufd.h. - * @domain_alloc: allocate iommu domain + * @domain_alloc: allocate and return an iommu domain if success. Otherwise + * NULL is returned. The domain is not fully initialized until + * the caller iommu_domain_alloc() returns. + * @domain_alloc_paging: Allocate an iommu_domain that can be used for + * UNMANAGED, DMA, and DMA_FQ domain types. + * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. + * @domain_alloc_user: Allocate an iommu domain corresponding to the input + * parameters as defined in include/uapi/linux/iommufd.h. + * Unlike @domain_alloc, it is called only by IOMMUFD and + * must fully initialize the new domain before return. + * Upon success, if the @user_data is valid and the @parent + * points to a kernel-managed domain, the new domain must be + * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be + * NULL while the @user_data can be optionally provided, the + * new domain must support __IOMMU_DOMAIN_PAGING. + * Upon failure, ERR_PTR must be returned. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -260,6 +480,17 @@ struct iommu_iotlb_gather { * will be blocked by the hardware. * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops + * @identity_domain: An always available, always attachable identity + * translation. + * @blocked_domain: An always available, always attachable blocking + * translation. + * @default_domain: If not NULL this will always be set as the default domain. + * This should be an IDENTITY/BLOCKED/PLATFORM domain. + * Do not use in new drivers. + * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is + * no user domain for each PASID and the I/O page faults are + * forwarded through the user domain attached to the device + * RID. */ struct iommu_ops { bool (*capable)(struct device *dev, enum iommu_cap); @@ -267,6 +498,12 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); + struct iommu_domain *(*domain_alloc_user)( + struct device *dev, u32 flags, struct iommu_domain *parent, + const struct iommu_user_data *user_data); + struct iommu_domain *(*domain_alloc_paging)(struct device *dev); + struct iommu_domain *(*domain_alloc_sva)(struct device *dev, + struct mm_struct *mm); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); @@ -284,16 +521,20 @@ struct iommu_ops { int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); - int (*page_response)(struct device *dev, - struct iommu_fault_event *evt, - struct iommu_page_response *msg); + void (*page_response)(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *msg); int (*def_domain_type)(struct device *dev); - void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid); + void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain); const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; struct module *owner; + struct iommu_domain *identity_domain; + struct iommu_domain *blocked_domain; + struct iommu_domain *default_domain; + u8 user_pasid_table:1; }; /** @@ -378,39 +619,35 @@ struct iommu_device { u32 max_pasids; }; -/** - * struct iommu_fault_event - Generic fault event - * - * Can represent recoverable faults such as a page requests or - * unrecoverable faults such as DMA or IRQ remapping faults. - * - * @fault: fault descriptor - * @list: pending fault event list, used for tracking responses - */ -struct iommu_fault_event { - struct iommu_fault fault; - struct list_head list; -}; - /** * struct iommu_fault_param - per-device IOMMU fault data - * @handler: Callback function to handle IOMMU faults at device level - * @data: handler private data - * @faults: holds the pending faults which needs response * @lock: protect pending faults list + * @users: user counter to manage the lifetime of the data + * @rcu: rcu head for kfree_rcu() + * @dev: the device that owns this param + * @queue: IOPF queue + * @queue_list: index into queue->devices + * @partial: faults that are part of a Page Request Group for which the last + * request hasn't been submitted yet. + * @faults: holds the pending faults which need response */ struct iommu_fault_param { - iommu_dev_fault_handler_t handler; - void *data; - struct list_head faults; struct mutex lock; + refcount_t users; + struct rcu_head rcu; + + struct device *dev; + struct iopf_queue *queue; + struct list_head queue_list; + + struct list_head partial; + struct list_head faults; }; /** * struct dev_iommu - Collection of per-device IOMMU data * * @fault_param: IOMMU detected device fault reporting data - * @iopf_param: I/O Page Fault queue and data * @fwspec: IOMMU fwspec data * @iommu_dev: IOMMU device this device is linked to * @priv: IOMMU Driver private data @@ -424,8 +661,7 @@ struct iommu_fault_param { */ struct dev_iommu { struct mutex lock; - struct iommu_fault_param *fault_param; - struct iopf_device_param *iopf_param; + struct iommu_fault_param __rcu *fault_param; struct iommu_fwspec *fwspec; struct iommu_device *iommu_dev; void *priv; @@ -518,16 +754,6 @@ extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, extern struct iommu_group *iommu_group_get(struct device *dev); extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); extern void iommu_group_put(struct iommu_group *group); -extern int iommu_register_device_fault_handler(struct device *dev, - iommu_dev_fault_handler_t handler, - void *data); - -extern int iommu_unregister_device_fault_handler(struct device *dev); - -extern int iommu_report_device_fault(struct device *dev, - struct iommu_fault_event *evt); -extern int iommu_page_response(struct device *dev, - struct iommu_page_response *msg); extern int iommu_group_id(struct iommu_group *group); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); @@ -632,6 +858,28 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return gather && gather->queued; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ + if (gather) + iommu_iotlb_gather_init(gather); + + dirty->bitmap = bitmap; + dirty->gather = gather; +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ + if (dirty->bitmap) + iova_bitmap_set(dirty->bitmap, iova, length); + + if (dirty->gather) + iommu_iotlb_gather_add_range(dirty->gather, iova, length); +} + /* PCI device grouping function */ extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ @@ -662,12 +910,28 @@ struct iommu_fwspec { /* ATS is supported */ #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) +/* + * An iommu attach handle represents a relationship between an iommu domain + * and a PASID or RID of a device. It is allocated and managed by the component + * that manages the domain and is stored in the iommu group during the time the + * domain is attached. + */ +struct iommu_attach_handle { + struct iommu_domain *domain; +}; + /** * struct iommu_sva - handle to a device-mm bond */ struct iommu_sva { + struct iommu_attach_handle handle; struct device *dev; - struct iommu_domain *domain; + refcount_t users; +}; + +struct iommu_mm_data { + u32 pasid; + struct list_head sva_domains; }; int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, @@ -719,15 +983,11 @@ bool iommu_group_dma_owner_claimed(struct iommu_group *group); int iommu_device_claim_dma_owner(struct device *dev, void *owner); void iommu_device_release_dma_owner(struct device *dev); -struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, - struct mm_struct *mm); int iommu_attach_device_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid); + struct device *dev, ioasid_t pasid, + struct iommu_attach_handle *handle); void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid); -struct iommu_domain * -iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, - unsigned int type); ioasid_t iommu_alloc_global_pasid(struct device *dev); void iommu_free_global_pasid(ioasid_t pasid); #else /* CONFIG_IOMMU_API */ @@ -738,6 +998,8 @@ struct iommu_fwspec {}; struct iommu_device {}; struct iommu_fault_param {}; struct iommu_iotlb_gather {}; +struct iommu_dirty_bitmap {}; +struct iommu_dirty_ops {}; static inline bool iommu_present(const struct bus_type *bus) { @@ -907,31 +1169,6 @@ static inline void iommu_group_put(struct iommu_group *group) { } -static inline -int iommu_register_device_fault_handler(struct device *dev, - iommu_dev_fault_handler_t handler, - void *data) -{ - return -ENODEV; -} - -static inline int iommu_unregister_device_fault_handler(struct device *dev) -{ - return 0; -} - -static inline -int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) -{ - return -ENODEV; -} - -static inline int iommu_page_response(struct device *dev, - struct iommu_page_response *msg) -{ - return -ENODEV; -} - static inline int iommu_group_id(struct iommu_group *group) { return -ENODEV; @@ -970,6 +1207,18 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return false; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } @@ -1068,14 +1317,9 @@ static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner) return -ENODEV; } -static inline struct iommu_domain * -iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm) -{ - return NULL; -} - static inline int iommu_attach_device_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid) + struct device *dev, ioasid_t pasid, + struct iommu_attach_handle *handle) { return -ENODEV; } @@ -1085,13 +1329,6 @@ static inline void iommu_detach_device_pasid(struct iommu_domain *domain, { } -static inline struct iommu_domain * -iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, - unsigned int type) -{ - return NULL; -} - static inline ioasid_t iommu_alloc_global_pasid(struct device *dev) { return IOMMU_PASID_INVALID; @@ -1100,6 +1337,14 @@ static inline ioasid_t iommu_alloc_global_pasid(struct device *dev) static inline void iommu_free_global_pasid(ioasid_t pasid) {} #endif /* CONFIG_IOMMU_API */ +#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API) +void iommu_group_mutex_assert(struct device *dev); +#else +static inline void iommu_group_mutex_assert(struct device *dev) +{ +} +#endif + /** * iommu_map_sgtable - Map the given buffer to the IOMMU domain * @domain: The IOMMU domain to perform the mapping @@ -1181,15 +1426,33 @@ static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream return false; } -#ifdef CONFIG_IOMMU_SVA +#ifdef CONFIG_IOMMU_MM_DATA static inline void mm_pasid_init(struct mm_struct *mm) { - mm->pasid = IOMMU_PASID_INVALID; + /* + * During dup_mm(), a new mm will be memcpy'd from an old one and that makes + * the new mm and the old one point to a same iommu_mm instance. When either + * one of the two mms gets released, the iommu_mm instance is freed, leaving + * the other mm running into a use-after-free/double-free problem. To avoid + * the problem, zeroing the iommu_mm pointer of a new mm is needed here. + */ + mm->iommu_mm = NULL; } + static inline bool mm_valid_pasid(struct mm_struct *mm) { - return mm->pasid != IOMMU_PASID_INVALID; + return READ_ONCE(mm->iommu_mm); +} + +static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) +{ + struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm); + + if (!iommu_mm) + return IOMMU_PASID_INVALID; + return iommu_mm->pasid; } + void mm_pasid_drop(struct mm_struct *mm); struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm); @@ -1212,7 +1475,70 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) } static inline void mm_pasid_init(struct mm_struct *mm) {} static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; } + +static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) +{ + return IOMMU_PASID_INVALID; +} + static inline void mm_pasid_drop(struct mm_struct *mm) {} #endif /* CONFIG_IOMMU_SVA */ +#ifdef CONFIG_IOMMU_IOPF +int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev); +void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev); +int iopf_queue_flush_dev(struct device *dev); +struct iopf_queue *iopf_queue_alloc(const char *name); +void iopf_queue_free(struct iopf_queue *queue); +int iopf_queue_discard_partial(struct iopf_queue *queue); +void iopf_free_group(struct iopf_group *group); +int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt); +void iopf_group_response(struct iopf_group *group, + enum iommu_page_response_code status); +#else +static inline int +iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) +{ + return -ENODEV; +} + +static inline void +iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) +{ +} + +static inline int iopf_queue_flush_dev(struct device *dev) +{ + return -ENODEV; +} + +static inline struct iopf_queue *iopf_queue_alloc(const char *name) +{ + return NULL; +} + +static inline void iopf_queue_free(struct iopf_queue *queue) +{ +} + +static inline int iopf_queue_discard_partial(struct iopf_queue *queue) +{ + return -ENODEV; +} + +static inline void iopf_free_group(struct iopf_group *group) +{ +} + +static inline int +iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) +{ + return -ENODEV; +} + +static inline void iopf_group_response(struct iopf_group *group, + enum iommu_page_response_code status) +{ +} +#endif /* CONFIG_IOMMU_IOPF */ #endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h index c006cf0a25f3..1c338f5e5b7a 100644 --- a/include/linux/iova_bitmap.h +++ b/include/linux/iova_bitmap.h @@ -7,6 +7,7 @@ #define _IOVA_BITMAP_H_ #include +#include struct iova_bitmap; @@ -14,6 +15,7 @@ typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap, unsigned long iova, size_t length, void *opaque); +#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, unsigned long page_size, u64 __user *data); @@ -22,5 +24,29 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, iova_bitmap_fn_t fn); void iova_bitmap_set(struct iova_bitmap *bitmap, unsigned long iova, size_t length); +#else +static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, + size_t length, + unsigned long page_size, + u64 __user *data) +{ + return NULL; +} + +static inline void iova_bitmap_free(struct iova_bitmap *bitmap) +{ +} + +static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, + iova_bitmap_fn_t fn) +{ + return -EOPNOTSUPP; +} + +static inline void iova_bitmap_set(struct iova_bitmap *bitmap, + unsigned long iova, size_t length) +{ +} +#endif #endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e77d4a5c0bac..7c15db924154 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -730,6 +730,7 @@ struct mm_cid { #endif struct kioctx_table; +struct iommu_mm_data; struct mm_struct { struct { /* @@ -941,8 +942,8 @@ struct mm_struct { #endif struct work_struct async_put_work; -#ifdef CONFIG_IOMMU_SVA - u32 pasid; +#ifdef CONFIG_IOMMU_MM_DATA + struct iommu_mm_data *iommu_mm; #endif #ifdef CONFIG_KSM /* diff --git a/include/linux/sched.h b/include/linux/sched.h index cb38eee732fd..a084bc96e8d0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -953,7 +953,7 @@ struct task_struct { /* Recursion prevention for eventfd_signal() */ unsigned in_eventfd:1; #endif -#ifdef CONFIG_IOMMU_SVA +#ifdef CONFIG_ARCH_HAS_CPU_PASID unsigned pasid_activated:1; #endif #ifdef CONFIG_CPU_SUP_INTEL diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index a5ef84944a06..71ae37d3bedd 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -96,7 +96,6 @@ struct tegra_smmu_soc { struct tegra_mc; struct tegra_smmu; -struct gart_device; #ifdef CONFIG_TEGRA_IOMMU_SMMU struct tegra_smmu *tegra_smmu_probe(struct device *dev, @@ -116,28 +115,6 @@ static inline void tegra_smmu_remove(struct tegra_smmu *smmu) } #endif -#ifdef CONFIG_TEGRA_IOMMU_GART -struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc); -int tegra_gart_suspend(struct gart_device *gart); -int tegra_gart_resume(struct gart_device *gart); -#else -static inline struct gart_device * -tegra_gart_probe(struct device *dev, struct tegra_mc *mc) -{ - return ERR_PTR(-ENODEV); -} - -static inline int tegra_gart_suspend(struct gart_device *gart) -{ - return -ENODEV; -} - -static inline int tegra_gart_resume(struct gart_device *gart) -{ - return -ENODEV; -} -#endif - struct tegra_mc_reset { const char *name; unsigned long id; @@ -185,8 +162,6 @@ struct tegra_mc_ops { */ int (*probe)(struct tegra_mc *mc); void (*remove)(struct tegra_mc *mc); - int (*suspend)(struct tegra_mc *mc); - int (*resume)(struct tegra_mc *mc); irqreturn_t (*handle_irq)(int irq, void *data); int (*probe_device)(struct tegra_mc *mc, struct device *dev); }; @@ -225,7 +200,6 @@ struct tegra_mc { struct tegra_bpmp *bpmp; struct device *dev; struct tegra_smmu *smmu; - struct gart_device *gart; void __iomem *regs; void __iomem *bcast_ch_regs; void __iomem **ch_regs; diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h deleted file mode 100644 index 65d8b0234f69..000000000000 --- a/include/uapi/linux/iommu.h +++ /dev/null @@ -1,161 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * IOMMU user API definitions - */ - -#ifndef _UAPI_IOMMU_H -#define _UAPI_IOMMU_H - -#include - -#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ -#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ -#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */ -#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */ - -/* Generic fault types, can be expanded IRQ remapping fault */ -enum iommu_fault_type { - IOMMU_FAULT_DMA_UNRECOV = 1, /* unrecoverable fault */ - IOMMU_FAULT_PAGE_REQ, /* page request fault */ -}; - -enum iommu_fault_reason { - IOMMU_FAULT_REASON_UNKNOWN = 0, - - /* Could not access the PASID table (fetch caused external abort) */ - IOMMU_FAULT_REASON_PASID_FETCH, - - /* PASID entry is invalid or has configuration errors */ - IOMMU_FAULT_REASON_BAD_PASID_ENTRY, - - /* - * PASID is out of range (e.g. exceeds the maximum PASID - * supported by the IOMMU) or disabled. - */ - IOMMU_FAULT_REASON_PASID_INVALID, - - /* - * An external abort occurred fetching (or updating) a translation - * table descriptor - */ - IOMMU_FAULT_REASON_WALK_EABT, - - /* - * Could not access the page table entry (Bad address), - * actual translation fault - */ - IOMMU_FAULT_REASON_PTE_FETCH, - - /* Protection flag check failed */ - IOMMU_FAULT_REASON_PERMISSION, - - /* access flag check failed */ - IOMMU_FAULT_REASON_ACCESS, - - /* Output address of a translation stage caused Address Size fault */ - IOMMU_FAULT_REASON_OOR_ADDRESS, -}; - -/** - * struct iommu_fault_unrecoverable - Unrecoverable fault data - * @reason: reason of the fault, from &enum iommu_fault_reason - * @flags: parameters of this fault (IOMMU_FAULT_UNRECOV_* values) - * @pasid: Process Address Space ID - * @perm: requested permission access using by the incoming transaction - * (IOMMU_FAULT_PERM_* values) - * @addr: offending page address - * @fetch_addr: address that caused a fetch abort, if any - */ -struct iommu_fault_unrecoverable { - __u32 reason; -#define IOMMU_FAULT_UNRECOV_PASID_VALID (1 << 0) -#define IOMMU_FAULT_UNRECOV_ADDR_VALID (1 << 1) -#define IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID (1 << 2) - __u32 flags; - __u32 pasid; - __u32 perm; - __u64 addr; - __u64 fetch_addr; -}; - -/** - * struct iommu_fault_page_request - Page Request data - * @flags: encodes whether the corresponding fields are valid and whether this - * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values). - * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response - * must have the same PASID value as the page request. When it is clear, - * the page response should not have a PASID. - * @pasid: Process Address Space ID - * @grpid: Page Request Group Index - * @perm: requested page permissions (IOMMU_FAULT_PERM_* values) - * @addr: page address - * @private_data: device-specific private information - */ -struct iommu_fault_page_request { -#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0) -#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1) -#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2) -#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3) - __u32 flags; - __u32 pasid; - __u32 grpid; - __u32 perm; - __u64 addr; - __u64 private_data[2]; -}; - -/** - * struct iommu_fault - Generic fault data - * @type: fault type from &enum iommu_fault_type - * @padding: reserved for future use (should be zero) - * @event: fault event, when @type is %IOMMU_FAULT_DMA_UNRECOV - * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ - * @padding2: sets the fault size to allow for future extensions - */ -struct iommu_fault { - __u32 type; - __u32 padding; - union { - struct iommu_fault_unrecoverable event; - struct iommu_fault_page_request prm; - __u8 padding2[56]; - }; -}; - -/** - * enum iommu_page_response_code - Return status of fault handlers - * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables - * populated, retry the access. This is "Success" in PCI PRI. - * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from - * this device if possible. This is "Response Failure" in PCI PRI. - * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the - * access. This is "Invalid Request" in PCI PRI. - */ -enum iommu_page_response_code { - IOMMU_PAGE_RESP_SUCCESS = 0, - IOMMU_PAGE_RESP_INVALID, - IOMMU_PAGE_RESP_FAILURE, -}; - -/** - * struct iommu_page_response - Generic page response information - * @argsz: User filled size of this data - * @version: API version of this structure - * @flags: encodes whether the corresponding fields are valid - * (IOMMU_FAULT_PAGE_RESPONSE_* values) - * @pasid: Process Address Space ID - * @grpid: Page Request Group Index - * @code: response code from &enum iommu_page_response_code - */ -struct iommu_page_response { - __u32 argsz; -#define IOMMU_PAGE_RESP_VERSION_1 1 - __u32 version; -#define IOMMU_PAGE_RESP_PASID_VALID (1 << 0) - __u32 flags; - __u32 pasid; - __u32 grpid; - __u32 code; -}; - -#endif /* _UAPI_IOMMU_H */ diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index b4ba0c0cbab6..cd47af424f7e 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -47,6 +47,9 @@ enum { IOMMUFD_CMD_VFIO_IOAS, IOMMUFD_CMD_HWPT_ALLOC, IOMMUFD_CMD_GET_HW_INFO, + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, + IOMMUFD_CMD_FAULT_QUEUE_ALLOC, }; /** @@ -347,20 +350,62 @@ struct iommu_vfio_ioas { }; #define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS) +/** + * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation + * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as + * the parent HWPT in a nesting configuration. + * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is + * enforced on device attachment + * @IOMMU_HWPT_FAULT_ID_VALID: The fault_id field of hwpt allocation data is + * valid. + */ +enum iommufd_hwpt_alloc_flags { + IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, + IOMMU_HWPT_FAULT_ID_VALID = 1 << 2, +}; + +/** + * enum iommu_hwpt_data_type - IOMMU HWPT Data Type + * @IOMMU_HWPT_DATA_NONE: no data + */ +enum iommu_hwpt_data_type { + IOMMU_HWPT_DATA_NONE, +}; + /** * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC) * @size: sizeof(struct iommu_hwpt_alloc) - * @flags: Must be 0 + * @flags: Combination of enum iommufd_hwpt_alloc_flags * @dev_id: The device to allocate this HWPT for - * @pt_id: The IOAS to connect this HWPT to + * @pt_id: The IOAS or HWPT to connect this HWPT to * @out_hwpt_id: The ID of the new HWPT * @__reserved: Must be 0 + * @data_type: One of enum iommu_hwpt_data_type + * @data_len: Length of the type specific data + * @data_uptr: User pointer to the type specific data + * @fault_id: The ID of IOMMUFD_FAULT object. Valid only if flags field of + * IOMMU_HWPT_FAULT_ID_VALID is set. + * @__reserved2: Padding to 64-bit alignment. Must be 0. * * Explicitly allocate a hardware page table object. This is the same object * type that is returned by iommufd_device_attach() and represents the * underlying iommu driver's iommu_domain kernel object. * - * A HWPT will be created with the IOVA mappings from the given IOAS. + * A kernel-managed HWPT will be created with the mappings from the given + * IOAS via the @pt_id. The @data_type for this allocation must be set to + * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a + * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags. + * + * A user-managed nested HWPT will be created from a given parent HWPT via + * @pt_id, in which the parent HWPT must be allocated previously via the + * same ioctl from a given IOAS (@pt_id). In this case, the @data_type + * must be set to a pre-defined type corresponding to an I/O page table + * type supported by the underlying IOMMU hardware. + * + * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and + * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr + * must be given. */ struct iommu_hwpt_alloc { __u32 size; @@ -369,6 +414,11 @@ struct iommu_hwpt_alloc { __u32 pt_id; __u32 out_hwpt_id; __u32 __reserved; + __u32 data_type; + __u32 data_len; + __aligned_u64 data_uptr; + __u32 fault_id; + __u32 __reserved2; }; #define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC) @@ -404,6 +454,20 @@ enum iommu_hw_info_type { IOMMU_HW_INFO_TYPE_INTEL_VTD, }; +/** + * enum iommufd_hw_capabilities + * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking + * If available, it means the following APIs + * are supported: + * + * IOMMU_HWPT_GET_DIRTY_BITMAP + * IOMMU_HWPT_SET_DIRTY_TRACKING + * + */ +enum iommufd_hw_capabilities { + IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0, +}; + /** * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO) * @size: sizeof(struct iommu_hw_info) @@ -415,6 +479,8 @@ enum iommu_hw_info_type { * the iommu type specific hardware information data * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. + * @out_capabilities: Output the generic iommu capability info type as defined + * in the enum iommu_hw_capabilities. * @__reserved: Must be 0 * * Query an iommu type specific hardware information data from an iommu behind @@ -439,6 +505,180 @@ struct iommu_hw_info { __aligned_u64 data_uptr; __u32 out_data_type; __u32 __reserved; + __aligned_u64 out_capabilities; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) + +/* + * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty + * tracking + * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking + */ +enum iommufd_hwpt_set_dirty_tracking_flags { + IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1, +}; + +/** + * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING) + * @size: sizeof(struct iommu_hwpt_set_dirty_tracking) + * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @__reserved: Must be 0 + * + * Toggle dirty tracking on an HW pagetable. + */ +struct iommu_hwpt_set_dirty_tracking { + __u32 size; + __u32 flags; + __u32 hwpt_id; + __u32 __reserved; +}; +#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING) + +/** + * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits + * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing + * any dirty bits metadata. This flag + * can be passed in the expectation + * where the next operation is an unmap + * of the same IOVA range. + * + */ +enum iommufd_hwpt_get_dirty_bitmap_flags { + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1, +}; + +/** + * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP) + * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap) + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags + * @__reserved: Must be 0 + * @iova: base IOVA of the bitmap first bit + * @length: IOVA range size + * @page_size: page size granularity of each bit in the bitmap + * @data: bitmap where to set the dirty bits. The bitmap bits each + * represent a page_size which you deviate from an arbitrary iova. + * + * Checking a given IOVA is dirty: + * + * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64)) + * + * Walk the IOMMU pagetables for a given IOVA range to return a bitmap + * with the dirty IOVAs. In doing so it will also by default clear any + * dirty bit metadata set in the IOPTE. + */ +struct iommu_hwpt_get_dirty_bitmap { + __u32 size; + __u32 hwpt_id; + __u32 flags; + __u32 __reserved; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 data; +}; +#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP) + +/** + * enum iommu_hwpt_pgfault_flags - flags for struct iommu_hwpt_pgfault + * @IOMMU_PGFAULT_FLAGS_PASID_VALID: The pasid field of the fault data is + * valid. + * @IOMMU_PGFAULT_FLAGS_LAST_PAGE: It's the last fault of a fault group. + */ +enum iommu_hwpt_pgfault_flags { + IOMMU_PGFAULT_FLAGS_PASID_VALID = (1 << 0), + IOMMU_PGFAULT_FLAGS_LAST_PAGE = (1 << 1), +}; + +/** + * enum iommu_hwpt_pgfault_perm - perm bits for struct iommu_hwpt_pgfault + * @IOMMU_PGFAULT_PERM_READ: request for read permission + * @IOMMU_PGFAULT_PERM_WRITE: request for write permission + * @IOMMU_PGFAULT_PERM_EXEC: (PCIE 10.4.1) request with a PASID that has the + * Execute Requested bit set in PASID TLP Prefix. + * @IOMMU_PGFAULT_PERM_PRIV: (PCIE 10.4.1) request with a PASID that has the + * Privileged Mode Requested bit set in PASID TLP + * Prefix. + */ +enum iommu_hwpt_pgfault_perm { + IOMMU_PGFAULT_PERM_READ = (1 << 0), + IOMMU_PGFAULT_PERM_WRITE = (1 << 1), + IOMMU_PGFAULT_PERM_EXEC = (1 << 2), + IOMMU_PGFAULT_PERM_PRIV = (1 << 3), +}; + +/** + * struct iommu_hwpt_pgfault - iommu page fault data + * @flags: Combination of enum iommu_hwpt_pgfault_flags + * @dev_id: id of the originated device + * @pasid: Process Address Space ID + * @grpid: Page Request Group Index + * @perm: Combination of enum iommu_hwpt_pgfault_perm + * @addr: Fault address + * @length: a hint of how much data the requestor is expecting to fetch. For + * example, if the PRI initiator knows it is going to do a 10MB + * transfer, it could fill in 10MB and the OS could pre-fault in + * 10MB of IOVA. It's default to 0 if there's no such hint. + * @cookie: kernel-managed cookie identifying a group of fault messages. The + * cookie number encoded in the last page fault of the group should + * be echoed back in the response message. + */ +struct iommu_hwpt_pgfault { + __u32 flags; + __u32 dev_id; + __u32 pasid; + __u32 grpid; + __u32 perm; + __u64 addr; + __u32 length; + __u32 cookie; +}; + +/** + * enum iommufd_page_response_code - Return status of fault handlers + * @IOMMUFD_PAGE_RESP_SUCCESS: Fault has been handled and the page tables + * populated, retry the access. This is the + * "Success" defined in PCI 10.4.2.1. + * @IOMMUFD_PAGE_RESP_INVALID: Could not handle this fault, don't retry the + * access. This is the "Invalid Request" in PCI + * 10.4.2.1. + * @IOMMUFD_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from + * this device if possible. This is the "Response + * Failure" in PCI 10.4.2.1. + */ +enum iommufd_page_response_code { + IOMMUFD_PAGE_RESP_SUCCESS = 0, + IOMMUFD_PAGE_RESP_INVALID, + IOMMUFD_PAGE_RESP_FAILURE, +}; + +/** + * struct iommu_hwpt_page_response - IOMMU page fault response + * @cookie: The kernel-managed cookie reported in the fault message. + * @code: One of response code in enum iommufd_page_response_code. + */ +struct iommu_hwpt_page_response { + __u32 cookie; + __u32 code; +}; + +/** + * struct iommu_fault_alloc - ioctl(IOMMU_FAULT_QUEUE_ALLOC) + * @size: sizeof(struct iommu_fault_alloc) + * @flags: Must be 0 + * @out_fault_id: The ID of the new FAULT + * @out_fault_fd: The fd of the new FAULT + * + * Explicitly allocate a fault handling object. + */ +struct iommu_fault_alloc { + __u32 size; + __u32 flags; + __u32 out_fault_id; + __u32 out_fault_fd; +}; +#define IOMMU_FAULT_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_FAULT_QUEUE_ALLOC) #endif diff --git a/kernel/fork.c b/kernel/fork.c index 7966c9a1c163..1b78ddd2cd70 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1185,7 +1185,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->use_memdelay = 0; #endif -#ifdef CONFIG_IOMMU_SVA +#ifdef CONFIG_ARCH_HAS_CPU_PASID tsk->pasid_activated = 0; #endif diff --git a/mm/Kconfig b/mm/Kconfig index c11cd01169e8..22f79ce92a2a 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1282,6 +1282,9 @@ config LOCK_MM_AND_FIND_VMA bool depends on !STACK_GROWSUP +config IOMMU_MM_DATA + bool + source "mm/damon/Kconfig" endmenu diff --git a/mm/init-mm.c b/mm/init-mm.c index cfd367822cdd..24c809379274 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -44,9 +44,6 @@ struct mm_struct init_mm = { #endif .user_ns = &init_user_ns, .cpu_bitmap = CPU_BITS_NONE, -#ifdef CONFIG_IOMMU_SVA - .pasid = IOMMU_PASID_INVALID, -#endif INIT_MM_CONTEXT(init_mm) }; diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 890a81f4ff61..4e9329e09e93 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -12,6 +12,7 @@ static unsigned long HUGEPAGE_SIZE; #define MOCK_PAGE_SIZE (PAGE_SIZE / 2) +#define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE) static unsigned long get_huge_page_size(void) { @@ -114,6 +115,7 @@ TEST_F(iommufd, cmd_length) TEST_LENGTH(iommu_destroy, IOMMU_DESTROY); TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO); + TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC); TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC); TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES); TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS); @@ -260,6 +262,143 @@ TEST_F(iommufd_ioas, ioas_destroy) } } +TEST_F(iommufd_ioas, alloc_hwpt_nested) +{ + const uint32_t min_data_len = + offsetofend(struct iommu_hwpt_selftest, iotlb); + struct iommu_hwpt_selftest data = { + .iotlb = IOMMU_TEST_IOTLB_DEFAULT, + }; + uint32_t nested_hwpt_id[2] = {}; + uint32_t parent_hwpt_id = 0; + uint32_t parent_hwpt_id_not_work = 0; + uint32_t test_hwpt_id = 0; + uint32_t iopf_hwpt_id; + uint32_t fault_id; + uint32_t fault_fd; + + if (self->device_id) { + /* Negative tests */ + test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0, + &test_hwpt_id); + test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0, + &test_hwpt_id); + + test_cmd_hwpt_alloc(self->device_id, self->ioas_id, + IOMMU_HWPT_ALLOC_NEST_PARENT, + &parent_hwpt_id); + + test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0, + &parent_hwpt_id_not_work); + + /* Negative nested tests */ + test_err_hwpt_alloc_nested(EINVAL, self->device_id, + parent_hwpt_id, 0, + &nested_hwpt_id[0], + IOMMU_HWPT_DATA_NONE, &data, + sizeof(data)); + test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id, + parent_hwpt_id, 0, + &nested_hwpt_id[0], + IOMMU_HWPT_DATA_SELFTEST + 1, &data, + sizeof(data)); + test_err_hwpt_alloc_nested(EINVAL, self->device_id, + parent_hwpt_id, 0, + &nested_hwpt_id[0], + IOMMU_HWPT_DATA_SELFTEST, &data, + min_data_len - 1); + test_err_hwpt_alloc_nested(EFAULT, self->device_id, + parent_hwpt_id, 0, + &nested_hwpt_id[0], + IOMMU_HWPT_DATA_SELFTEST, NULL, + sizeof(data)); + test_err_hwpt_alloc_nested( + EOPNOTSUPP, self->device_id, parent_hwpt_id, + IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0], + IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data)); + test_err_hwpt_alloc_nested(EINVAL, self->device_id, + parent_hwpt_id_not_work, 0, + &nested_hwpt_id[0], + IOMMU_HWPT_DATA_SELFTEST, &data, + sizeof(data)); + + /* Allocate two nested hwpts sharing one common parent hwpt */ + test_ioctl_fault_alloc(&fault_id, &fault_fd); + test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0, + &nested_hwpt_id[0], + IOMMU_HWPT_DATA_SELFTEST, &data, + sizeof(data)); + test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0, + &nested_hwpt_id[1], + IOMMU_HWPT_DATA_SELFTEST, &data, + sizeof(data)); + test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id, + UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID, + &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, + &data, sizeof(data)); + test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id, + IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id, + IOMMU_HWPT_DATA_SELFTEST, &data, + sizeof(data)); + + /* Negative test: a nested hwpt on top of a nested hwpt */ + test_err_hwpt_alloc_nested(EINVAL, self->device_id, + nested_hwpt_id[0], 0, &test_hwpt_id, + IOMMU_HWPT_DATA_SELFTEST, &data, + sizeof(data)); + /* Negative test: parent hwpt now cannot be freed */ + EXPECT_ERRNO(EBUSY, + _test_ioctl_destroy(self->fd, parent_hwpt_id)); + + /* Attach device to nested_hwpt_id[0] that then will be busy */ + test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]); + EXPECT_ERRNO(EBUSY, + _test_ioctl_destroy(self->fd, nested_hwpt_id[0])); + + /* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */ + test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]); + EXPECT_ERRNO(EBUSY, + _test_ioctl_destroy(self->fd, nested_hwpt_id[1])); + test_ioctl_destroy(nested_hwpt_id[0]); + + /* Switch from nested_hwpt_id[1] to iopf_hwpt_id */ + test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id); + EXPECT_ERRNO(EBUSY, + _test_ioctl_destroy(self->fd, iopf_hwpt_id)); + /* Trigger an IOPF on the device */ + test_cmd_trigger_iopf(self->device_id, fault_fd); + + /* Detach from nested_hwpt_id[1] and destroy it */ + test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id); + test_ioctl_destroy(nested_hwpt_id[1]); + test_ioctl_destroy(iopf_hwpt_id); + + /* Detach from the parent hw_pagetable and destroy it */ + test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id); + test_ioctl_destroy(parent_hwpt_id); + test_ioctl_destroy(parent_hwpt_id_not_work); + close(fault_fd); + test_ioctl_destroy(fault_id); + } else { + test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0, + &parent_hwpt_id); + test_err_hwpt_alloc_nested(ENOENT, self->device_id, + parent_hwpt_id, 0, + &nested_hwpt_id[0], + IOMMU_HWPT_DATA_SELFTEST, &data, + sizeof(data)); + test_err_hwpt_alloc_nested(ENOENT, self->device_id, + parent_hwpt_id, 0, + &nested_hwpt_id[1], + IOMMU_HWPT_DATA_SELFTEST, &data, + sizeof(data)); + test_err_mock_domain_replace(ENOENT, self->stdev_id, + nested_hwpt_id[0]); + test_err_mock_domain_replace(ENOENT, self->stdev_id, + nested_hwpt_id[1]); + } +} + TEST_F(iommufd_ioas, hwpt_attach) { /* Create a device attached directly to a hwpt */ @@ -1404,16 +1543,301 @@ TEST_F(iommufd_mock_domain, alloc_hwpt) int i; for (i = 0; i != variant->mock_domains; i++) { + uint32_t hwpt_id[2]; uint32_t stddev_id; - uint32_t hwpt_id; - test_cmd_hwpt_alloc(self->idev_ids[0], self->ioas_id, &hwpt_id); - test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_err_hwpt_alloc(EOPNOTSUPP, + self->idev_ids[i], self->ioas_id, + ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]); + test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id, + 0, &hwpt_id[0]); + test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id, + IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]); + + /* Do a hw_pagetable rotation test */ + test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]); + EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0])); + test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]); + EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1])); + test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id); + test_ioctl_destroy(hwpt_id[1]); + + test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL); test_ioctl_destroy(stddev_id); - test_ioctl_destroy(hwpt_id); + test_ioctl_destroy(hwpt_id[0]); } } +FIXTURE(iommufd_dirty_tracking) +{ + int fd; + uint32_t ioas_id; + uint32_t hwpt_id; + uint32_t stdev_id; + uint32_t idev_id; + unsigned long page_size; + unsigned long bitmap_size; + void *bitmap; + void *buffer; +}; + +FIXTURE_VARIANT(iommufd_dirty_tracking) +{ + unsigned long buffer_size; + bool hugepages; +}; + +FIXTURE_SETUP(iommufd_dirty_tracking) +{ + size_t mmap_buffer_size; + int mmap_flags; + void *vrc; + int rc; + + self->fd = open("/dev/iommu", O_RDWR); + ASSERT_NE(-1, self->fd); + + mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED; + mmap_buffer_size = variant->buffer_size; + if (variant->hugepages) { + /* + * MAP_POPULATE will cause the kernel to fail mmap if THPs are + * not available. + */ + mmap_flags |= MAP_HUGETLB | MAP_POPULATE; + + /* + * Allocation must be aligned to the HUGEPAGE_SIZE, because the + * following mmap() will automatically align the length to be a + * multiple of the underlying huge page size. Failing to do the + * same at this allocation will result in a memory overwrite by + * the mmap(). + */ + if (mmap_buffer_size < HUGEPAGE_SIZE) + mmap_buffer_size = HUGEPAGE_SIZE; + } + + rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size); + if (rc || !self->buffer) { + SKIP(return, "Skipping buffer_size=%lu due to errno=%d", + mmap_buffer_size, rc); + } + assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0); + vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE, + mmap_flags, -1, 0); + assert(vrc == self->buffer); + + self->page_size = MOCK_PAGE_SIZE; + self->bitmap_size = + variant->buffer_size / self->page_size / BITS_PER_BYTE; + + /* Provision with an extra (PAGE_SIZE) for the unaligned case */ + rc = posix_memalign(&self->bitmap, PAGE_SIZE, + self->bitmap_size + PAGE_SIZE); + assert(!rc); + assert(self->bitmap); + assert((uintptr_t)self->bitmap % PAGE_SIZE == 0); + + test_ioctl_ioas_alloc(&self->ioas_id); + /* Enable 1M mock IOMMU hugepages */ + if (variant->hugepages) { + test_cmd_mock_domain_flags(self->ioas_id, + MOCK_FLAGS_DEVICE_HUGE_IOVA, + &self->stdev_id, &self->hwpt_id, + &self->idev_id); + } else { + test_cmd_mock_domain(self->ioas_id, &self->stdev_id, + &self->hwpt_id, &self->idev_id); + } +} + +FIXTURE_TEARDOWN(iommufd_dirty_tracking) +{ + free(self->buffer); + free(self->bitmap); + teardown_iommufd(self->fd, _metadata); +} + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k) +{ + /* one u32 index bitmap */ + .buffer_size = 128UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k) +{ + /* one u64 index bitmap */ + .buffer_size = 256UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k) +{ + /* two u64 index and trailing end bitmap */ + .buffer_size = 640UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M) +{ + /* 4K bitmap (128M IOVA range) */ + .buffer_size = 128UL * 1024UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge) +{ + /* 4K bitmap (128M IOVA range) */ + .buffer_size = 128UL * 1024UL * 1024UL, + .hugepages = true, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M) +{ + /* 8K bitmap (256M IOVA range) */ + .buffer_size = 256UL * 1024UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M_huge) +{ + /* 8K bitmap (256M IOVA range) */ + .buffer_size = 256UL * 1024UL * 1024UL, + .hugepages = true, +}; + +TEST_F(iommufd_dirty_tracking, enforce_dirty) +{ + uint32_t ioas_id, stddev_id, idev_id; + uint32_t hwpt_id, _hwpt_id; + uint32_t dev_flags; + + /* Regular case */ + dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY; + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id, + NULL); + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); + + /* IOMMU device does not support dirty tracking */ + test_ioctl_ioas_alloc(&ioas_id); + test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id, + &idev_id); + test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_ioctl_destroy(stddev_id); +} + +TEST_F(iommufd_dirty_tracking, set_dirty_tracking) +{ + uint32_t stddev_id; + uint32_t hwpt_id; + + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_cmd_set_dirty_tracking(hwpt_id, true); + test_cmd_set_dirty_tracking(hwpt_id, false); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + +TEST_F(iommufd_dirty_tracking, device_dirty_capability) +{ + uint32_t caps = 0; + uint32_t stddev_id; + uint32_t hwpt_id; + + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_cmd_get_hw_capabilities(self->idev_id, caps, + IOMMU_HW_CAP_DIRTY_TRACKING); + ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING, + caps & IOMMU_HW_CAP_DIRTY_TRACKING); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + +TEST_F(iommufd_dirty_tracking, get_dirty_bitmap) +{ + uint32_t page_size = MOCK_PAGE_SIZE; + uint32_t hwpt_id; + uint32_t ioas_id; + + if (variant->hugepages) + page_size = MOCK_HUGE_PAGE_SIZE; + + test_ioctl_ioas_alloc(&ioas_id); + test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer, + variant->buffer_size, MOCK_APERTURE_START); + + test_cmd_hwpt_alloc(self->idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + + test_cmd_set_dirty_tracking(hwpt_id, true); + + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, page_size, + self->bitmap, self->bitmap_size, 0, _metadata); + + /* PAGE_SIZE unaligned bitmap */ + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, page_size, + self->bitmap + MOCK_PAGE_SIZE, + self->bitmap_size, 0, _metadata); + + /* u64 unaligned bitmap */ + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, page_size, + self->bitmap + 0xff1, self->bitmap_size, 0, + _metadata); + + test_ioctl_destroy(hwpt_id); +} + +TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear) +{ + uint32_t page_size = MOCK_PAGE_SIZE; + uint32_t hwpt_id; + uint32_t ioas_id; + + if (variant->hugepages) + page_size = MOCK_HUGE_PAGE_SIZE; + + test_ioctl_ioas_alloc(&ioas_id); + test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer, + variant->buffer_size, MOCK_APERTURE_START); + + test_cmd_hwpt_alloc(self->idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + + test_cmd_set_dirty_tracking(hwpt_id, true); + + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, page_size, + self->bitmap, self->bitmap_size, + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, + _metadata); + + /* Unaligned bitmap */ + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, page_size, + self->bitmap + MOCK_PAGE_SIZE, + self->bitmap_size, + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, + _metadata); + + /* u64 unaligned bitmap */ + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, page_size, + self->bitmap + 0xff1, self->bitmap_size, + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, + _metadata); + + test_ioctl_destroy(hwpt_id); +} + /* VFIO compatibility IOCTLs */ TEST_F(iommufd, simple_ioctls) diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c index a220ca2a689d..4ee39af6b883 100644 --- a/tools/testing/selftests/iommu/iommufd_fail_nth.c +++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c @@ -612,10 +612,11 @@ TEST_FAIL_NTH(basic_fail_nth, device) &idev_id)) return -1; - if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info))) + if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL)) return -1; - if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, &hwpt_id)) + if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, 0, &hwpt_id, + IOMMU_HWPT_DATA_NONE, 0, 0)) return -1; if (_test_cmd_mock_domain_replace(self->fd, stdev_id, ioas_id2, NULL)) diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index e0753d03ecaa..d9df1f323333 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -16,6 +16,25 @@ /* Hack to make assertions more readable */ #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD +/* Imported from include/asm-generic/bitops/generic-non-atomic.h */ +#define BITS_PER_BYTE 8 +#define BITS_PER_LONG __BITS_PER_LONG +#define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / __BITS_PER_LONG) + +static inline void set_bit(unsigned int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static inline bool test_bit(unsigned int nr, unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1))); +} + static void *buffer; static unsigned long BUFFER_SIZE; @@ -74,6 +93,38 @@ static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id, EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \ stdev_id, hwpt_id, NULL)) +static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id, + __u32 stdev_flags, __u32 *stdev_id, + __u32 *hwpt_id, __u32 *idev_id) +{ + struct iommu_test_cmd cmd = { + .size = sizeof(cmd), + .op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS, + .id = ioas_id, + .mock_domain_flags = { .dev_flags = stdev_flags }, + }; + int ret; + + ret = ioctl(fd, IOMMU_TEST_CMD, &cmd); + if (ret) + return ret; + if (stdev_id) + *stdev_id = cmd.mock_domain_flags.out_stdev_id; + assert(cmd.id != 0); + if (hwpt_id) + *hwpt_id = cmd.mock_domain_flags.out_hwpt_id; + if (idev_id) + *idev_id = cmd.mock_domain_flags.out_idev_id; + return 0; +} +#define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \ + ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \ + stdev_id, hwpt_id, idev_id)) +#define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \ + EXPECT_ERRNO(_errno, \ + _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \ + stdev_id, hwpt_id, NULL)) + static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id, __u32 *hwpt_id) { @@ -102,13 +153,19 @@ static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id, EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \ pt_id, NULL)) -static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, - __u32 *hwpt_id) +static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_id, + __u32 flags, __u32 *hwpt_id, __u32 data_type, + void *data, size_t data_len) { struct iommu_hwpt_alloc cmd = { .size = sizeof(cmd), + .flags = flags, .dev_id = device_id, .pt_id = pt_id, + .data_type = data_type, + .data_len = data_len, + .data_uptr = (uint64_t)data, + .fault_id = ft_id, }; int ret; @@ -120,8 +177,36 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, return 0; } -#define test_cmd_hwpt_alloc(device_id, pt_id, hwpt_id) \ - ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, hwpt_id)) +#define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \ + ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \ + hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \ + 0)) +#define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \ + EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \ + self->fd, device_id, pt_id, 0, flags, \ + hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0)) + +#define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id, \ + data_type, data, data_len) \ + ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \ + hwpt_id, data_type, data, data_len)) +#define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \ + data_type, data, data_len) \ + EXPECT_ERRNO(_errno, \ + _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \ + hwpt_id, data_type, data, data_len)) + +#define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id, \ + data_type, data, data_len) \ + ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \ + flags, hwpt_id, data_type, data, \ + data_len)) +#define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags, \ + hwpt_id, data_type, data, data_len) \ + EXPECT_ERRNO(_errno, \ + _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \ + flags, hwpt_id, data_type, data, \ + data_len)) static int _test_cmd_access_replace_ioas(int fd, __u32 access_id, unsigned int ioas_id) @@ -142,6 +227,136 @@ static int _test_cmd_access_replace_ioas(int fd, __u32 access_id, #define test_cmd_access_replace_ioas(access_id, ioas_id) \ ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id)) +static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled) +{ + struct iommu_hwpt_set_dirty_tracking cmd = { + .size = sizeof(cmd), + .flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0, + .hwpt_id = hwpt_id, + }; + int ret; + + ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd); + if (ret) + return -errno; + return 0; +} +#define test_cmd_set_dirty_tracking(hwpt_id, enabled) \ + ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled)) + +static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, + __u64 *bitmap, __u32 flags) +{ + struct iommu_hwpt_get_dirty_bitmap cmd = { + .size = sizeof(cmd), + .hwpt_id = hwpt_id, + .flags = flags, + .iova = iova, + .length = length, + .page_size = page_size, + .data = (uintptr_t)bitmap, + }; + int ret; + + ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd); + if (ret) + return ret; + return 0; +} + +#define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \ + bitmap, flags) \ + ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \ + page_size, bitmap, flags)) + +static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, + __u64 *bitmap, __u64 *dirty) +{ + struct iommu_test_cmd cmd = { + .size = sizeof(cmd), + .op = IOMMU_TEST_OP_DIRTY, + .id = hwpt_id, + .dirty = { + .iova = iova, + .length = length, + .page_size = page_size, + .uptr = (uintptr_t)bitmap, + } + }; + int ret; + + ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd); + if (ret) + return -ret; + if (dirty) + *dirty = cmd.dirty.out_nr_dirty; + return 0; +} + +#define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \ + bitmap, nr) \ + ASSERT_EQ(0, \ + _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \ + page_size, bitmap, nr)) + +static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, + size_t pte_page_size, __u64 *bitmap, + __u64 bitmap_size, __u32 flags, + struct __test_metadata *_metadata) +{ + unsigned long npte = pte_page_size / page_size, pteset = 2 * npte; + unsigned long nbits = bitmap_size * BITS_PER_BYTE; + unsigned long j, i, nr = nbits / pteset ?: 1; + __u64 out_dirty = 0; + + /* Mark all even bits as dirty in the mock domain */ + memset(bitmap, 0, bitmap_size); + for (i = 0; i < nbits; i += pteset) + set_bit(i, (unsigned long *)bitmap); + + test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, + bitmap, &out_dirty); + ASSERT_EQ(nr, out_dirty); + + /* Expect all even bits as dirty in the user bitmap */ + memset(bitmap, 0, bitmap_size); + test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap, + flags); + /* Beware ASSERT_EQ() is two statements -- braces are not redundant! */ + for (i = 0; i < nbits; i += pteset) { + for (j = 0; j < pteset; j++) { + ASSERT_EQ(j < npte, + test_bit(i + j, (unsigned long *)bitmap)); + } + ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap)); + } + + memset(bitmap, 0, bitmap_size); + test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap, + flags); + + /* It as read already -- expect all zeroes */ + for (i = 0; i < nbits; i += pteset) { + for (j = 0; j < pteset; j++) { + ASSERT_EQ( + (j < npte) && + (flags & + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR), + test_bit(i + j, (unsigned long *)bitmap)); + } + } + + return 0; +} +#define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\ + bitmap, bitmap_size, flags, _metadata) \ + ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \ + page_size, pte_size, bitmap, \ + bitmap_size, flags, _metadata)) + static int _test_cmd_create_access(int fd, unsigned int ioas_id, __u32 *access_id, unsigned int flags) { @@ -266,6 +481,17 @@ static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer, IOMMU_IOAS_MAP_READABLE)); \ }) +#define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova) \ + ({ \ + __u64 __iova = iova; \ + ASSERT_EQ(0, \ + _test_ioctl_ioas_map( \ + self->fd, ioas_id, buffer, length, &__iova, \ + IOMMU_IOAS_MAP_FIXED_IOVA | \ + IOMMU_IOAS_MAP_WRITEABLE | \ + IOMMU_IOAS_MAP_READABLE)); \ + }) + #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \ ({ \ __u64 __iova = iova; \ @@ -354,8 +580,8 @@ static void teardown_iommufd(int fd, struct __test_metadata *_metadata) #endif /* @data can be NULL */ -static int _test_cmd_get_hw_info(int fd, __u32 device_id, - void *data, size_t data_len) +static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data, + size_t data_len, uint32_t *capabilities) { struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data; struct iommu_hw_info cmd = { @@ -363,6 +589,7 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, .dev_id = device_id, .data_len = data_len, .data_uptr = (uint64_t)data, + .out_capabilities = 0, }; int ret; @@ -399,14 +626,82 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, assert(!info->flags); } + if (capabilities) + *capabilities = cmd.out_capabilities; + + return 0; +} + +#define test_cmd_get_hw_info(device_id, data, data_len) \ + ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \ + data_len, NULL)) + +#define test_err_get_hw_info(_errno, device_id, data, data_len) \ + EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \ + data_len, NULL)) + +#define test_cmd_get_hw_capabilities(device_id, caps, mask) \ + ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps)) + +static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd) +{ + struct iommu_fault_alloc cmd = { + .size = sizeof(cmd), + }; + int ret; + + ret = ioctl(fd, IOMMU_FAULT_QUEUE_ALLOC, &cmd); + if (ret) + return ret; + *fault_id = cmd.out_fault_id; + *fault_fd = cmd.out_fault_fd; return 0; } -#define test_cmd_get_hw_info(device_id, data, data_len) \ - ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \ - data, data_len)) +#define test_ioctl_fault_alloc(fault_id, fault_fd) \ + ({ \ + ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \ + fault_fd)); \ + ASSERT_NE(0, *(fault_id)); \ + ASSERT_NE(0, *(fault_fd)); \ + }) + +static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 fault_fd) +{ + struct iommu_test_cmd trigger_iopf_cmd = { + .size = sizeof(trigger_iopf_cmd), + .op = IOMMU_TEST_OP_TRIGGER_IOPF, + .trigger_iopf = { + .dev_id = device_id, + .pasid = 0x1, + .grpid = 0x2, + .perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE, + .addr = 0xdeadbeaf, + }, + }; + struct iommu_hwpt_page_response response = { + .code = IOMMUFD_PAGE_RESP_SUCCESS, + }; + struct iommu_hwpt_pgfault fault = {}; + ssize_t bytes; + int ret; + + ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF), &trigger_iopf_cmd); + if (ret) + return ret; + + bytes = read(fault_fd, &fault, sizeof(fault)); + if (bytes <= 0) + return -EIO; + + response.cookie = fault.cookie; + + bytes = write(fault_fd, &response, sizeof(response)); + if (bytes <= 0) + return -EIO; + + return 0; +} -#define test_err_get_hw_info(_errno, device_id, data, data_len) \ - EXPECT_ERRNO(_errno, \ - _test_cmd_get_hw_info(self->fd, device_id, \ - data, data_len)) +#define test_cmd_trigger_iopf(device_id, fault_fd) \ + ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, fault_fd))