diff options
Diffstat (limited to 'include')
140 files changed, 3278 insertions, 800 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 0dc1ea0b52f5..48f0fd499274 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -365,8 +365,6 @@ struct acpi_device { acpi_handle handle; /* no handle for fixed hardware */ struct fwnode_handle fwnode; struct acpi_device *parent; - struct list_head children; - struct list_head node; struct list_head wakeup_list; struct list_head del_list; struct acpi_device_status status; @@ -379,7 +377,6 @@ struct acpi_device { struct acpi_device_data data; struct acpi_scan_handler *handler; struct acpi_hotplug_context *hp; - struct acpi_driver *driver; const struct acpi_gpio_mapping *driver_gpios; void *driver_data; struct device dev; @@ -483,6 +480,9 @@ extern struct bus_type acpi_bus_type; int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data); int acpi_dev_for_each_child(struct acpi_device *adev, int (*fn)(struct acpi_device *, void *), void *data); +int acpi_dev_for_each_child_reverse(struct acpi_device *adev, + int (*fn)(struct acpi_device *, void *), + void *data); /* * Events @@ -521,6 +521,7 @@ const char *acpi_power_state_string(int state); int acpi_device_set_power(struct acpi_device *device, int state); int acpi_bus_init_power(struct acpi_device *device); int acpi_device_fix_up_power(struct acpi_device *device); +void acpi_device_fix_up_power_extended(struct acpi_device *adev); int acpi_bus_update_power(acpi_handle handle, int *state_p); int acpi_device_update_power(struct acpi_device *device, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); @@ -622,6 +623,8 @@ static inline int acpi_dma_configure(struct device *dev, } struct acpi_device *acpi_find_child_device(struct acpi_device *parent, u64 address, bool check_children); +struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev, + acpi_bus_address adr); int acpi_is_root_bridge(acpi_handle); struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index c6108581d97d..f73d357ecdf5 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -17,7 +17,7 @@ #include <acpi/pcc.h> #include <acpi/processor.h> -/* Support CPPCv2 and CPPCv3 */ +/* CPPCv2 and CPPCv3 support */ #define CPPC_V2_REV 2 #define CPPC_V3_REV 3 #define CPPC_V2_NUM_ENT 21 @@ -145,6 +145,7 @@ extern bool cppc_allow_fast_switch(void); extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data); extern unsigned int cppc_get_transition_latency(int cpu); extern bool cpc_ffh_supported(void); +extern bool cpc_supported_by_cpu(void); extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val); extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val); #else /* !CONFIG_ACPI_CPPC_LIB */ diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 194027371928..9fa49686957a 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -441,9 +441,12 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr) #endif /* CONFIG_ACPI_PROCESSOR_IDLE */ /* in processor_thermal.c */ -int acpi_processor_get_limit_info(struct acpi_processor *pr); +int acpi_processor_thermal_init(struct acpi_processor *pr, + struct acpi_device *device); +void acpi_processor_thermal_exit(struct acpi_processor *pr, + struct acpi_device *device); extern const struct thermal_cooling_device_ops processor_cooling_ops; -#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ) +#ifdef CONFIG_CPU_FREQ void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy); void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy); #else @@ -455,6 +458,6 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) { return; } -#endif /* CONFIG_ACPI_CPU_FREQ_PSS */ +#endif /* CONFIG_CPU_FREQ */ #endif diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index fd7e8fbaeef1..961f4d88f9ef 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -38,6 +38,10 @@ #define wmb() do { kcsan_wmb(); __wmb(); } while (0) #endif +#ifdef __dma_mb +#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0) +#endif + #ifdef __dma_rmb #define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0) #endif @@ -65,6 +69,10 @@ #define wmb() mb() #endif +#ifndef dma_mb +#define dma_mb() mb() +#endif + #ifndef dma_rmb #define dma_rmb() rmb() #endif diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 7ce93aaf69f8..72974cb81343 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -964,7 +964,34 @@ static inline void iounmap(volatile void __iomem *addr) #elif defined(CONFIG_GENERIC_IOREMAP) #include <linux/pgtable.h> -void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); +/* + * Arch code can implement the following two hooks when using GENERIC_IOREMAP + * ioremap_allowed() return a bool, + * - true means continue to remap + * - false means skip remap and return directly + * iounmap_allowed() return a bool, + * - true means continue to vunmap + * - false means skip vunmap and return directly + */ +#ifndef ioremap_allowed +#define ioremap_allowed ioremap_allowed +static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size, + unsigned long prot) +{ + return true; +} +#endif + +#ifndef iounmap_allowed +#define iounmap_allowed iounmap_allowed +static inline bool iounmap_allowed(void *addr) +{ + return true; +} +#endif + +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + unsigned long prot); void iounmap(volatile void __iomem *addr); static inline void __iomem *ioremap(phys_addr_t addr, size_t size) @@ -1125,9 +1152,7 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, } #endif -#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED extern int devmem_is_allowed(unsigned long pfn); -#endif #endif /* __KERNEL__ */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index ff3e82553a76..492dce43236e 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -158,9 +158,24 @@ * Useful if your architecture doesn't use IPIs for remote TLB invalidates * and therefore doesn't naturally serialize with software page-table walkers. * + * MMU_GATHER_NO_FLUSH_CACHE + * + * Indicates the architecture has flush_cache_range() but it needs *NOT* be called + * before unmapping a VMA. + * + * NOTE: strictly speaking we shouldn't have this knob and instead rely on + * flush_cache_range() being a NOP, except Sparc64 seems to be + * different here. + * + * MMU_GATHER_MERGE_VMAS + * + * Indicates the architecture wants to merge ranges over VMAs; typical when + * multiple range invalidates are more expensive than a full invalidate. + * * MMU_GATHER_NO_RANGE * - * Use this if your architecture lacks an efficient flush_tlb_range(). + * Use this if your architecture lacks an efficient flush_tlb_range(). This + * option implies MMU_GATHER_MERGE_VMAS above. * * MMU_GATHER_NO_GATHER * @@ -288,6 +303,7 @@ struct mmu_gather { */ unsigned int vma_exec : 1; unsigned int vma_huge : 1; + unsigned int vma_pfn : 1; unsigned int batch_count; @@ -334,8 +350,8 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) #ifdef CONFIG_MMU_GATHER_NO_RANGE -#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma) -#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma() +#if defined(tlb_flush) +#error MMU_GATHER_NO_RANGE relies on default tlb_flush() #endif /* @@ -352,20 +368,9 @@ static inline void tlb_flush(struct mmu_gather *tlb) flush_tlb_mm(tlb->mm); } -static inline void -tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } - -#define tlb_end_vma tlb_end_vma -static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } - #else /* CONFIG_MMU_GATHER_NO_RANGE */ #ifndef tlb_flush - -#if defined(tlb_start_vma) || defined(tlb_end_vma) -#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma() -#endif - /* * When an architecture does not provide its own tlb_flush() implementation * but does have a reasonably efficient flush_vma_range() implementation @@ -385,6 +390,9 @@ static inline void tlb_flush(struct mmu_gather *tlb) flush_tlb_range(&vma, tlb->start, tlb->end); } } +#endif + +#endif /* CONFIG_MMU_GATHER_NO_RANGE */ static inline void tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) @@ -402,17 +410,9 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) */ tlb->vma_huge = is_vm_hugetlb_page(vma); tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); + tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); } -#else - -static inline void -tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } - -#endif - -#endif /* CONFIG_MMU_GATHER_NO_RANGE */ - static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { /* @@ -486,32 +486,36 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) * case where we're doing a full MM flush. When we're doing a munmap, * the vmas are adjusted to only cover the region to be torn down. */ -#ifndef tlb_start_vma static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (tlb->fullmm) return; tlb_update_vma_flags(tlb, vma); +#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE flush_cache_range(vma, vma->vm_start, vma->vm_end); -} #endif +} -#ifndef tlb_end_vma static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (tlb->fullmm) return; /* - * Do a TLB flush and reset the range at VMA boundaries; this avoids - * the ranges growing with the unused space between consecutive VMAs, - * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on - * this. + * VM_PFNMAP is more fragile because the core mm will not track the + * page mapcount -- there might not be page-frames for these PFNs after + * all. Force flush TLBs for such ranges to avoid munmap() vs + * unmap_mapping_range() races. */ - tlb_flush_mmu_tlbonly(tlb); + if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) { + /* + * Do a TLB flush and reset the range at VMA boundaries; this avoids + * the ranges growing with the unused space between consecutive VMAs. + */ + tlb_flush_mmu_tlbonly(tlb); + } } -#endif /* * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end, diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h index f6da8a132639..b0f80cfd2a26 100644 --- a/include/clocksource/timer-ti-dm.h +++ b/include/clocksource/timer-ti-dm.h @@ -247,148 +247,4 @@ int omap_dm_timers_active(void); #define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \ (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT)) -/* - * The below are inlined to optimize code size for system timers. Other code - * should not need these at all. - */ -#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2PLUS) -static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg, - int posted) -{ - if (posted) - while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) - cpu_relax(); - - return readl_relaxed(timer->func_base + (reg & 0xff)); -} - -static inline void __omap_dm_timer_write(struct omap_dm_timer *timer, - u32 reg, u32 val, int posted) -{ - if (posted) - while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) - cpu_relax(); - - writel_relaxed(val, timer->func_base + (reg & 0xff)); -} - -static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer) -{ - u32 tidr; - - /* Assume v1 ip if bits [31:16] are zero */ - tidr = readl_relaxed(timer->io_base); - if (!(tidr >> 16)) { - timer->revision = 1; - timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET; - timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; - timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; - timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET; - timer->func_base = timer->io_base; - } else { - timer->revision = 2; - timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS; - timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET; - timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR; - timer->pend = timer->io_base + - _OMAP_TIMER_WRITE_PEND_OFFSET + - OMAP_TIMER_V2_FUNC_OFFSET; - timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET; - } -} - -/* - * __omap_dm_timer_enable_posted - enables write posted mode - * @timer: pointer to timer instance handle - * - * Enables the write posted mode for the timer. When posted mode is enabled - * writes to certain timer registers are immediately acknowledged by the - * internal bus and hence prevents stalling the CPU waiting for the write to - * complete. Enabling this feature can improve performance for writing to the - * timer registers. - */ -static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer) -{ - if (timer->posted) - return; - - if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) { - timer->posted = OMAP_TIMER_NONPOSTED; - __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0); - return; - } - - __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, - OMAP_TIMER_CTRL_POSTED, 0); - timer->context.tsicr = OMAP_TIMER_CTRL_POSTED; - timer->posted = OMAP_TIMER_POSTED; -} - -/** - * __omap_dm_timer_override_errata - override errata flags for a timer - * @timer: pointer to timer handle - * @errata: errata flags to be ignored - * - * For a given timer, override a timer errata by clearing the flags - * specified by the errata argument. A specific erratum should only be - * overridden for a timer if the timer is used in such a way the erratum - * has no impact. - */ -static inline void __omap_dm_timer_override_errata(struct omap_dm_timer *timer, - u32 errata) -{ - timer->errata &= ~errata; -} - -static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer, - int posted, unsigned long rate) -{ - u32 l; - - l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); - if (l & OMAP_TIMER_CTRL_ST) { - l &= ~0x1; - __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted); -#ifdef CONFIG_ARCH_OMAP2PLUS - /* Readback to make sure write has completed */ - __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); - /* - * Wait for functional clock period x 3.5 to make sure that - * timer is stopped - */ - udelay(3500000 / rate + 1); -#endif - } - - /* Ack possibly pending interrupt */ - writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat); -} - -static inline void __omap_dm_timer_load_start(struct omap_dm_timer *timer, - u32 ctrl, unsigned int load, - int posted) -{ - __omap_dm_timer_write(timer, OMAP_TIMER_COUNTER_REG, load, posted); - __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, ctrl, posted); -} - -static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer, - unsigned int value) -{ - writel_relaxed(value, timer->irq_ena); - __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0); -} - -static inline unsigned int -__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted) -{ - return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted); -} - -static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer, - unsigned int value) -{ - writel_relaxed(value, timer->irq_stat); -} -#endif /* CONFIG_ARCH_OMAP1 || CONFIG_ARCH_OMAP2PLUS */ #endif /* __CLOCKSOURCE_DMTIMER_H */ diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 0fca8f38bee4..addb135eeea6 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -28,7 +28,7 @@ #include <linux/dma-fence.h> #include <linux/completion.h> #include <linux/xarray.h> -#include <linux/irq_work.h> +#include <linux/workqueue.h> #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) @@ -295,7 +295,7 @@ struct drm_sched_job { */ union { struct dma_fence_cb finish_cb; - struct irq_work work; + struct work_struct work; }; uint64_t id; diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h index 1f8701691d62..8256e7430b63 100644 --- a/include/dt-bindings/clock/exynos7885.h +++ b/include/dt-bindings/clock/exynos7885.h @@ -54,17 +54,39 @@ #define CLK_GOUT_PERI_USI0 43 #define CLK_GOUT_PERI_USI1 44 #define CLK_GOUT_PERI_USI2 45 -#define TOP_NR_CLK 46 +#define CLK_MOUT_FSYS_BUS 46 +#define CLK_MOUT_FSYS_MMC_CARD 47 +#define CLK_MOUT_FSYS_MMC_EMBD 48 +#define CLK_MOUT_FSYS_MMC_SDIO 49 +#define CLK_MOUT_FSYS_USB30DRD 50 +#define CLK_DOUT_FSYS_BUS 51 +#define CLK_DOUT_FSYS_MMC_CARD 52 +#define CLK_DOUT_FSYS_MMC_EMBD 53 +#define CLK_DOUT_FSYS_MMC_SDIO 54 +#define CLK_DOUT_FSYS_USB30DRD 55 +#define CLK_GOUT_FSYS_BUS 56 +#define CLK_GOUT_FSYS_MMC_CARD 57 +#define CLK_GOUT_FSYS_MMC_EMBD 58 +#define CLK_GOUT_FSYS_MMC_SDIO 59 +#define CLK_GOUT_FSYS_USB30DRD 60 +#define TOP_NR_CLK 61 /* CMU_CORE */ -#define CLK_MOUT_CORE_BUS_USER 1 -#define CLK_MOUT_CORE_CCI_USER 2 -#define CLK_MOUT_CORE_G3D_USER 3 -#define CLK_MOUT_CORE_GIC 4 -#define CLK_DOUT_CORE_BUSP 5 -#define CLK_GOUT_CCI_ACLK 6 -#define CLK_GOUT_GIC400_CLK 7 -#define CORE_NR_CLK 8 +#define CLK_MOUT_CORE_BUS_USER 1 +#define CLK_MOUT_CORE_CCI_USER 2 +#define CLK_MOUT_CORE_G3D_USER 3 +#define CLK_MOUT_CORE_GIC 4 +#define CLK_DOUT_CORE_BUSP 5 +#define CLK_GOUT_CCI_ACLK 6 +#define CLK_GOUT_GIC400_CLK 7 +#define CLK_GOUT_TREX_D_CORE_ACLK 8 +#define CLK_GOUT_TREX_D_CORE_GCLK 9 +#define CLK_GOUT_TREX_D_CORE_PCLK 10 +#define CLK_GOUT_TREX_P_CORE_ACLK_P_CORE 11 +#define CLK_GOUT_TREX_P_CORE_CCLK_P_CORE 12 +#define CLK_GOUT_TREX_P_CORE_PCLK 13 +#define CLK_GOUT_TREX_P_CORE_PCLK_P_CORE 14 +#define CORE_NR_CLK 15 /* CMU_PERI */ #define CLK_MOUT_PERI_BUS_USER 1 @@ -112,4 +134,18 @@ #define CLK_GOUT_WDT1_PCLK 43 #define PERI_NR_CLK 44 +/* CMU_FSYS */ +#define CLK_MOUT_FSYS_BUS_USER 1 +#define CLK_MOUT_FSYS_MMC_CARD_USER 2 +#define CLK_MOUT_FSYS_MMC_EMBD_USER 3 +#define CLK_MOUT_FSYS_MMC_SDIO_USER 4 +#define CLK_MOUT_FSYS_USB30DRD_USER 4 +#define CLK_GOUT_MMC_CARD_ACLK 5 +#define CLK_GOUT_MMC_CARD_SDCLKIN 6 +#define CLK_GOUT_MMC_EMBD_ACLK 7 +#define CLK_GOUT_MMC_EMBD_SDCLKIN 8 +#define CLK_GOUT_MMC_SDIO_ACLK 9 +#define CLK_GOUT_MMC_SDIO_SDCLKIN 10 +#define FSYS_NR_CLK 11 + #endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */ diff --git a/include/dt-bindings/clock/nuvoton,npcm845-clk.h b/include/dt-bindings/clock/nuvoton,npcm845-clk.h new file mode 100644 index 000000000000..e5cce08b00e1 --- /dev/null +++ b/include/dt-bindings/clock/nuvoton,npcm845-clk.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2021 Nuvoton Technologies. + * Author: Tomer Maimon <tomer.maimon@nuvoton.com> + * + * Device Tree binding constants for NPCM8XX clock controller. + */ + +#ifndef __DT_BINDINGS_CLOCK_NPCM8XX_H +#define __DT_BINDINGS_CLOCK_NPCM8XX_H + +#define NPCM8XX_CLK_CPU 0 +#define NPCM8XX_CLK_GFX_PIXEL 1 +#define NPCM8XX_CLK_MC 2 +#define NPCM8XX_CLK_ADC 3 +#define NPCM8XX_CLK_AHB 4 +#define NPCM8XX_CLK_TIMER 5 +#define NPCM8XX_CLK_UART 6 +#define NPCM8XX_CLK_UART2 7 +#define NPCM8XX_CLK_MMC 8 +#define NPCM8XX_CLK_SPI3 9 +#define NPCM8XX_CLK_PCI 10 +#define NPCM8XX_CLK_AXI 11 +#define NPCM8XX_CLK_APB4 12 +#define NPCM8XX_CLK_APB3 13 +#define NPCM8XX_CLK_APB2 14 +#define NPCM8XX_CLK_APB1 15 +#define NPCM8XX_CLK_APB5 16 +#define NPCM8XX_CLK_CLKOUT 17 +#define NPCM8XX_CLK_GFX 18 +#define NPCM8XX_CLK_SU 19 +#define NPCM8XX_CLK_SU48 20 +#define NPCM8XX_CLK_SDHC 21 +#define NPCM8XX_CLK_SPI0 22 +#define NPCM8XX_CLK_SPI1 23 +#define NPCM8XX_CLK_SPIX 24 +#define NPCM8XX_CLK_RG 25 +#define NPCM8XX_CLK_RCP 26 +#define NPCM8XX_CLK_PRE_ADC 27 +#define NPCM8XX_CLK_ATB 28 +#define NPCM8XX_CLK_PRE_CLK 29 +#define NPCM8XX_CLK_TH 30 +#define NPCM8XX_CLK_REFCLK 31 +#define NPCM8XX_CLK_SYSBYPCK 32 +#define NPCM8XX_CLK_MCBYPCK 33 + +#define NPCM8XX_NUM_CLOCKS (NPCM8XX_CLK_MCBYPCK + 1) + +#endif diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8350.h b/include/dt-bindings/clock/qcom,dispcc-sm8350.h new file mode 120000 index 000000000000..0312b4544acb --- /dev/null +++ b/include/dt-bindings/clock/qcom,dispcc-sm8350.h @@ -0,0 +1 @@ +qcom,dispcc-sm8250.h
\ No newline at end of file diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h index 8e2bec1c91bf..55f8322a1e50 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h @@ -367,4 +367,7 @@ #define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130 #define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 131 +#define USB0_GDSC 0 +#define USB1_GDSC 1 + #endif diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8350.h b/include/dt-bindings/clock/qcom,gpucc-sm8350.h new file mode 100644 index 000000000000..2ca857f5bfd2 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-sm8350.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H + +/* GPU_CC clocks */ +#define GPU_CC_AHB_CLK 0 +#define GPU_CC_CB_CLK 1 +#define GPU_CC_CRC_AHB_CLK 2 +#define GPU_CC_CX_APB_CLK 3 +#define GPU_CC_CX_GMU_CLK 4 +#define GPU_CC_CX_QDSS_AT_CLK 5 +#define GPU_CC_CX_QDSS_TRIG_CLK 6 +#define GPU_CC_CX_QDSS_TSCTR_CLK 7 +#define GPU_CC_CX_SNOC_DVM_CLK 8 +#define GPU_CC_CXO_AON_CLK 9 +#define GPU_CC_CXO_CLK 10 +#define GPU_CC_FREQ_MEASURE_CLK 11 +#define GPU_CC_GMU_CLK_SRC 12 +#define GPU_CC_GX_GMU_CLK 13 +#define GPU_CC_GX_QDSS_TSCTR_CLK 14 +#define GPU_CC_GX_VSENSE_CLK 15 +#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 16 +#define GPU_CC_HUB_AHB_DIV_CLK_SRC 17 +#define GPU_CC_HUB_AON_CLK 18 +#define GPU_CC_HUB_CLK_SRC 19 +#define GPU_CC_HUB_CX_INT_CLK 20 +#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 21 +#define GPU_CC_MND1X_0_GFX3D_CLK 22 +#define GPU_CC_MND1X_1_GFX3D_CLK 23 +#define GPU_CC_PLL0 24 +#define GPU_CC_PLL1 25 +#define GPU_CC_SLEEP_CLK 26 + +/* GPU_CC resets */ +#define GPUCC_GPU_CC_ACD_BCR 0 +#define GPUCC_GPU_CC_CB_BCR 1 +#define GPUCC_GPU_CC_CX_BCR 2 +#define GPUCC_GPU_CC_FAST_HUB_BCR 3 +#define GPUCC_GPU_CC_GFX3D_AON_BCR 4 +#define GPUCC_GPU_CC_GMU_BCR 5 +#define GPUCC_GPU_CC_GX_BCR 6 +#define GPUCC_GPU_CC_XO_BCR 7 + +/* GPU_CC GDSCRs */ +#define GPU_CX_GDSC 0 +#define GPU_GX_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8450-camcc.h b/include/dt-bindings/clock/qcom,sm8450-camcc.h new file mode 100644 index 000000000000..7ff67acf301a --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8450-camcc.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H + +/* CAM_CC clocks */ +#define CAM_CC_BPS_AHB_CLK 0 +#define CAM_CC_BPS_CLK 1 +#define CAM_CC_BPS_CLK_SRC 2 +#define CAM_CC_BPS_FAST_AHB_CLK 3 +#define CAM_CC_CAMNOC_AXI_CLK 4 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 5 +#define CAM_CC_CAMNOC_DCD_XO_CLK 6 +#define CAM_CC_CCI_0_CLK 7 +#define CAM_CC_CCI_0_CLK_SRC 8 +#define CAM_CC_CCI_1_CLK 9 +#define CAM_CC_CCI_1_CLK_SRC 10 +#define CAM_CC_CORE_AHB_CLK 11 +#define CAM_CC_CPAS_AHB_CLK 12 +#define CAM_CC_CPAS_BPS_CLK 13 +#define CAM_CC_CPAS_FAST_AHB_CLK 14 +#define CAM_CC_CPAS_IFE_0_CLK 15 +#define CAM_CC_CPAS_IFE_1_CLK 16 +#define CAM_CC_CPAS_IFE_2_CLK 17 +#define CAM_CC_CPAS_IFE_LITE_CLK 18 +#define CAM_CC_CPAS_IPE_NPS_CLK 19 +#define CAM_CC_CPAS_SBI_CLK 20 +#define CAM_CC_CPAS_SFE_0_CLK 21 +#define CAM_CC_CPAS_SFE_1_CLK 22 +#define CAM_CC_CPHY_RX_CLK_SRC 23 +#define CAM_CC_CSI0PHYTIMER_CLK 24 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 25 +#define CAM_CC_CSI1PHYTIMER_CLK 26 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 27 +#define CAM_CC_CSI2PHYTIMER_CLK 28 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 29 +#define CAM_CC_CSI3PHYTIMER_CLK 30 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 31 +#define CAM_CC_CSI4PHYTIMER_CLK 32 +#define CAM_CC_CSI4PHYTIMER_CLK_SRC 33 +#define CAM_CC_CSI5PHYTIMER_CLK 34 +#define CAM_CC_CSI5PHYTIMER_CLK_SRC 35 +#define CAM_CC_CSID_CLK 36 +#define CAM_CC_CSID_CLK_SRC 37 +#define CAM_CC_CSID_CSIPHY_RX_CLK 38 +#define CAM_CC_CSIPHY0_CLK 39 +#define CAM_CC_CSIPHY1_CLK 40 +#define CAM_CC_CSIPHY2_CLK 41 +#define CAM_CC_CSIPHY3_CLK 42 +#define CAM_CC_CSIPHY4_CLK 43 +#define CAM_CC_CSIPHY5_CLK 44 +#define CAM_CC_FAST_AHB_CLK_SRC 45 +#define CAM_CC_GDSC_CLK 46 +#define CAM_CC_ICP_AHB_CLK 47 +#define CAM_CC_ICP_CLK 48 +#define CAM_CC_ICP_CLK_SRC 49 +#define CAM_CC_IFE_0_CLK 50 +#define CAM_CC_IFE_0_CLK_SRC 51 +#define CAM_CC_IFE_0_DSP_CLK 52 +#define CAM_CC_IFE_0_FAST_AHB_CLK 53 +#define CAM_CC_IFE_1_CLK 54 +#define CAM_CC_IFE_1_CLK_SRC 55 +#define CAM_CC_IFE_1_DSP_CLK 56 +#define CAM_CC_IFE_1_FAST_AHB_CLK 57 +#define CAM_CC_IFE_2_CLK 58 +#define CAM_CC_IFE_2_CLK_SRC 59 +#define CAM_CC_IFE_2_DSP_CLK 60 +#define CAM_CC_IFE_2_FAST_AHB_CLK 61 +#define CAM_CC_IFE_LITE_AHB_CLK 62 +#define CAM_CC_IFE_LITE_CLK 63 +#define CAM_CC_IFE_LITE_CLK_SRC 64 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 65 +#define CAM_CC_IFE_LITE_CSID_CLK 66 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 67 +#define CAM_CC_IPE_NPS_AHB_CLK 68 +#define CAM_CC_IPE_NPS_CLK 69 +#define CAM_CC_IPE_NPS_CLK_SRC 70 +#define CAM_CC_IPE_NPS_FAST_AHB_CLK 71 +#define CAM_CC_IPE_PPS_CLK 72 +#define CAM_CC_IPE_PPS_FAST_AHB_CLK 73 +#define CAM_CC_JPEG_CLK 74 +#define CAM_CC_JPEG_CLK_SRC 75 +#define CAM_CC_MCLK0_CLK 76 +#define CAM_CC_MCLK0_CLK_SRC 77 +#define CAM_CC_MCLK1_CLK 78 +#define CAM_CC_MCLK1_CLK_SRC 79 +#define CAM_CC_MCLK2_CLK 80 +#define CAM_CC_MCLK2_CLK_SRC 81 +#define CAM_CC_MCLK3_CLK 82 +#define CAM_CC_MCLK3_CLK_SRC 83 +#define CAM_CC_MCLK4_CLK 84 +#define CAM_CC_MCLK4_CLK_SRC 85 +#define CAM_CC_MCLK5_CLK 86 +#define CAM_CC_MCLK5_CLK_SRC 87 +#define CAM_CC_MCLK6_CLK 88 +#define CAM_CC_MCLK6_CLK_SRC 89 +#define CAM_CC_MCLK7_CLK 90 +#define CAM_CC_MCLK7_CLK_SRC 91 +#define CAM_CC_PLL0 92 +#define CAM_CC_PLL0_OUT_EVEN 93 +#define CAM_CC_PLL0_OUT_ODD 94 +#define CAM_CC_PLL1 95 +#define CAM_CC_PLL1_OUT_EVEN 96 +#define CAM_CC_PLL2 97 +#define CAM_CC_PLL3 98 +#define CAM_CC_PLL3_OUT_EVEN 99 +#define CAM_CC_PLL4 100 +#define CAM_CC_PLL4_OUT_EVEN 101 +#define CAM_CC_PLL5 102 +#define CAM_CC_PLL5_OUT_EVEN 103 +#define CAM_CC_PLL6 104 +#define CAM_CC_PLL6_OUT_EVEN 105 +#define CAM_CC_PLL7 106 +#define CAM_CC_PLL7_OUT_EVEN 107 +#define CAM_CC_PLL8 108 +#define CAM_CC_PLL8_OUT_EVEN 109 +#define CAM_CC_QDSS_DEBUG_CLK 110 +#define CAM_CC_QDSS_DEBUG_CLK_SRC 111 +#define CAM_CC_QDSS_DEBUG_XO_CLK 112 +#define CAM_CC_SBI_AHB_CLK 113 +#define CAM_CC_SBI_CLK 114 +#define CAM_CC_SFE_0_CLK 115 +#define CAM_CC_SFE_0_CLK_SRC 116 +#define CAM_CC_SFE_0_FAST_AHB_CLK 117 +#define CAM_CC_SFE_1_CLK 118 +#define CAM_CC_SFE_1_CLK_SRC 119 +#define CAM_CC_SFE_1_FAST_AHB_CLK 120 +#define CAM_CC_SLEEP_CLK 121 +#define CAM_CC_SLEEP_CLK_SRC 122 +#define CAM_CC_SLOW_AHB_CLK_SRC 123 +#define CAM_CC_XO_CLK_SRC 124 + +/* CAM_CC resets */ +#define CAM_CC_BPS_BCR 0 +#define CAM_CC_ICP_BCR 1 +#define CAM_CC_IFE_0_BCR 2 +#define CAM_CC_IFE_1_BCR 3 +#define CAM_CC_IFE_2_BCR 4 +#define CAM_CC_IPE_0_BCR 5 +#define CAM_CC_QDSS_DEBUG_BCR 6 +#define CAM_CC_SBI_BCR 7 +#define CAM_CC_SFE_0_BCR 8 +#define CAM_CC_SFE_1_BCR 9 + +/* CAM_CC GDSCRs */ +#define BPS_GDSC 0 +#define IPE_0_GDSC 1 +#define SBI_GDSC 2 +#define IFE_0_GDSC 3 +#define IFE_1_GDSC 4 +#define IFE_2_GDSC 5 +#define SFE_0_GDSC 6 +#define SFE_1_GDSC 7 +#define TITAN_TOP_GDSC 8 + +#endif diff --git a/include/dt-bindings/clock/sunplus,sp7021-clkc.h b/include/dt-bindings/clock/sunplus,sp7021-clkc.h new file mode 100644 index 000000000000..cd84321eb2b5 --- /dev/null +++ b/include/dt-bindings/clock/sunplus,sp7021-clkc.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) Sunplus Technology Co., Ltd. + * All rights reserved. + */ +#ifndef _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H +#define _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H + +/* gates */ +#define CLK_RTC 0 +#define CLK_OTPRX 1 +#define CLK_NOC 2 +#define CLK_BR 3 +#define CLK_SPIFL 4 +#define CLK_PERI0 5 +#define CLK_PERI1 6 +#define CLK_STC0 7 +#define CLK_STC_AV0 8 +#define CLK_STC_AV1 9 +#define CLK_STC_AV2 10 +#define CLK_UA0 11 +#define CLK_UA1 12 +#define CLK_UA2 13 +#define CLK_UA3 14 +#define CLK_UA4 15 +#define CLK_HWUA 16 +#define CLK_DDC0 17 +#define CLK_UADMA 18 +#define CLK_CBDMA0 19 +#define CLK_CBDMA1 20 +#define CLK_SPI_COMBO_0 21 +#define CLK_SPI_COMBO_1 22 +#define CLK_SPI_COMBO_2 23 +#define CLK_SPI_COMBO_3 24 +#define CLK_AUD 25 +#define CLK_USBC0 26 +#define CLK_USBC1 27 +#define CLK_UPHY0 28 +#define CLK_UPHY1 29 +#define CLK_I2CM0 30 +#define CLK_I2CM1 31 +#define CLK_I2CM2 32 +#define CLK_I2CM3 33 +#define CLK_PMC 34 +#define CLK_CARD_CTL0 35 +#define CLK_CARD_CTL1 36 +#define CLK_CARD_CTL4 37 +#define CLK_BCH 38 +#define CLK_DDFCH 39 +#define CLK_CSIIW0 40 +#define CLK_CSIIW1 41 +#define CLK_MIPICSI0 42 +#define CLK_MIPICSI1 43 +#define CLK_HDMI_TX 44 +#define CLK_VPOST 45 +#define CLK_TGEN 46 +#define CLK_DMIX 47 +#define CLK_TCON 48 +#define CLK_GPIO 49 +#define CLK_MAILBOX 50 +#define CLK_SPIND 51 +#define CLK_I2C2CBUS 52 +#define CLK_SEC 53 +#define CLK_DVE 54 +#define CLK_GPOST0 55 +#define CLK_OSD0 56 +#define CLK_DISP_PWM 57 +#define CLK_UADBG 58 +#define CLK_FIO_CTL 59 +#define CLK_FPGA 60 +#define CLK_L2SW 61 +#define CLK_ICM 62 +#define CLK_AXI_GLOBAL 63 + +/* plls */ +#define PLL_A 64 +#define PLL_E 65 +#define PLL_E_2P5 66 +#define PLL_E_25 67 +#define PLL_E_112P5 68 +#define PLL_F 69 +#define PLL_TV 70 +#define PLL_TV_A 71 +#define PLL_SYS 72 + +#define CLK_MAX 73 + +#endif diff --git a/include/dt-bindings/clock/tegra234-clock.h b/include/dt-bindings/clock/tegra234-clock.h index bd4c3086a2da..173364a93381 100644 --- a/include/dt-bindings/clock/tegra234-clock.h +++ b/include/dt-bindings/clock/tegra234-clock.h @@ -38,6 +38,8 @@ * throughput and memory controller power. */ #define TEGRA234_CLK_EMC 31U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */ +#define TEGRA234_CLK_HOST1X 46U /** @brief output of gate CLK_ENB_FUSE */ #define TEGRA234_CLK_FUSE 40U /** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C1 */ @@ -132,6 +134,8 @@ #define TEGRA234_CLK_UARTA 155U /** @brief output of gate CLK_ENB_PEX1_CORE_6 */ #define TEGRA234_CLK_PEX1_C6_CORE 161U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VIC */ +#define TEGRA234_CLK_VIC 167U /** @brief output of gate CLK_ENB_PEX2_CORE_7 */ #define TEGRA234_CLK_PEX2_C7_CORE 171U /** @brief output of gate CLK_ENB_PEX2_CORE_8 */ @@ -164,10 +168,111 @@ #define TEGRA234_CLK_PEX1_C5_CORE 225U /** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */ #define TEGRA234_CLK_PLLC4 237U +/** @brief RX clock recovered from MGBE0 lane input */ +#define TEGRA234_CLK_MGBE0_RX_INPUT 248U +/** @brief RX clock recovered from MGBE1 lane input */ +#define TEGRA234_CLK_MGBE1_RX_INPUT 249U +/** @brief RX clock recovered from MGBE2 lane input */ +#define TEGRA234_CLK_MGBE2_RX_INPUT 250U +/** @brief RX clock recovered from MGBE3 lane input */ +#define TEGRA234_CLK_MGBE3_RX_INPUT 251U /** @brief 32K input clock provided by PMIC */ #define TEGRA234_CLK_CLK_32K 289U +/** @brief Monitored branch of MBGE0 RX input clock */ +#define TEGRA234_CLK_MGBE0_RX_INPUT_M 357U +/** @brief Monitored branch of MBGE1 RX input clock */ +#define TEGRA234_CLK_MGBE1_RX_INPUT_M 358U +/** @brief Monitored branch of MBGE2 RX input clock */ +#define TEGRA234_CLK_MGBE2_RX_INPUT_M 359U +/** @brief Monitored branch of MBGE3 RX input clock */ +#define TEGRA234_CLK_MGBE3_RX_INPUT_M 360U +/** @brief Monitored branch of MGBE0 RX PCS mux output */ +#define TEGRA234_CLK_MGBE0_RX_PCS_M 361U +/** @brief Monitored branch of MGBE1 RX PCS mux output */ +#define TEGRA234_CLK_MGBE1_RX_PCS_M 362U +/** @brief Monitored branch of MGBE2 RX PCS mux output */ +#define TEGRA234_CLK_MGBE2_RX_PCS_M 363U +/** @brief Monitored branch of MGBE3 RX PCS mux output */ +#define TEGRA234_CLK_MGBE3_RX_PCS_M 364U +/** @brief RX PCS clock recovered from MGBE0 lane input */ +#define TEGRA234_CLK_MGBE0_RX_PCS_INPUT 369U +/** @brief RX PCS clock recovered from MGBE1 lane input */ +#define TEGRA234_CLK_MGBE1_RX_PCS_INPUT 370U +/** @brief RX PCS clock recovered from MGBE2 lane input */ +#define TEGRA234_CLK_MGBE2_RX_PCS_INPUT 371U +/** @brief RX PCS clock recovered from MGBE3 lane input */ +#define TEGRA234_CLK_MGBE3_RX_PCS_INPUT 372U +/** @brief output of mux controlled by GBE_UPHY_MGBE0_RX_PCS_CLK_SRC_SEL */ +#define TEGRA234_CLK_MGBE0_RX_PCS 373U +/** @brief GBE_UPHY_MGBE0_TX_CLK divider gated output */ +#define TEGRA234_CLK_MGBE0_TX 374U +/** @brief GBE_UPHY_MGBE0_TX_PCS_CLK divider gated output */ +#define TEGRA234_CLK_MGBE0_TX_PCS 375U +/** @brief GBE_UPHY_MGBE0_MAC_CLK divider output */ +#define TEGRA234_CLK_MGBE0_MAC_DIVIDER 376U +/** @brief GBE_UPHY_MGBE0_MAC_CLK gate output */ +#define TEGRA234_CLK_MGBE0_MAC 377U +/** @brief GBE_UPHY_MGBE0_MACSEC_CLK gate output */ +#define TEGRA234_CLK_MGBE0_MACSEC 378U +/** @brief GBE_UPHY_MGBE0_EEE_PCS_CLK gate output */ +#define TEGRA234_CLK_MGBE0_EEE_PCS 379U +/** @brief GBE_UPHY_MGBE0_APP_CLK gate output */ +#define TEGRA234_CLK_MGBE0_APP 380U +/** @brief GBE_UPHY_MGBE0_PTP_REF_CLK divider gated output */ +#define TEGRA234_CLK_MGBE0_PTP_REF 381U +/** @brief output of mux controlled by GBE_UPHY_MGBE1_RX_PCS_CLK_SRC_SEL */ +#define TEGRA234_CLK_MGBE1_RX_PCS 382U +/** @brief GBE_UPHY_MGBE1_TX_CLK divider gated output */ +#define TEGRA234_CLK_MGBE1_TX 383U +/** @brief GBE_UPHY_MGBE1_TX_PCS_CLK divider gated output */ +#define TEGRA234_CLK_MGBE1_TX_PCS 384U +/** @brief GBE_UPHY_MGBE1_MAC_CLK divider output */ +#define TEGRA234_CLK_MGBE1_MAC_DIVIDER 385U +/** @brief GBE_UPHY_MGBE1_MAC_CLK gate output */ +#define TEGRA234_CLK_MGBE1_MAC 386U +/** @brief GBE_UPHY_MGBE1_EEE_PCS_CLK gate output */ +#define TEGRA234_CLK_MGBE1_EEE_PCS 388U +/** @brief GBE_UPHY_MGBE1_APP_CLK gate output */ +#define TEGRA234_CLK_MGBE1_APP 389U +/** @brief GBE_UPHY_MGBE1_PTP_REF_CLK divider gated output */ +#define TEGRA234_CLK_MGBE1_PTP_REF 390U +/** @brief output of mux controlled by GBE_UPHY_MGBE2_RX_PCS_CLK_SRC_SEL */ +#define TEGRA234_CLK_MGBE2_RX_PCS 391U +/** @brief GBE_UPHY_MGBE2_TX_CLK divider gated output */ +#define TEGRA234_CLK_MGBE2_TX 392U +/** @brief GBE_UPHY_MGBE2_TX_PCS_CLK divider gated output */ +#define TEGRA234_CLK_MGBE2_TX_PCS 393U +/** @brief GBE_UPHY_MGBE2_MAC_CLK divider output */ +#define TEGRA234_CLK_MGBE2_MAC_DIVIDER 394U +/** @brief GBE_UPHY_MGBE2_MAC_CLK gate output */ +#define TEGRA234_CLK_MGBE2_MAC 395U +/** @brief GBE_UPHY_MGBE2_EEE_PCS_CLK gate output */ +#define TEGRA234_CLK_MGBE2_EEE_PCS 397U +/** @brief GBE_UPHY_MGBE2_APP_CLK gate output */ +#define TEGRA234_CLK_MGBE2_APP 398U +/** @brief GBE_UPHY_MGBE2_PTP_REF_CLK divider gated output */ +#define TEGRA234_CLK_MGBE2_PTP_REF 399U +/** @brief output of mux controlled by GBE_UPHY_MGBE3_RX_PCS_CLK_SRC_SEL */ +#define TEGRA234_CLK_MGBE3_RX_PCS 400U +/** @brief GBE_UPHY_MGBE3_TX_CLK divider gated output */ +#define TEGRA234_CLK_MGBE3_TX 401U +/** @brief GBE_UPHY_MGBE3_TX_PCS_CLK divider gated output */ +#define TEGRA234_CLK_MGBE3_TX_PCS 402U +/** @brief GBE_UPHY_MGBE3_MAC_CLK divider output */ +#define TEGRA234_CLK_MGBE3_MAC_DIVIDER 403U +/** @brief GBE_UPHY_MGBE3_MAC_CLK gate output */ +#define TEGRA234_CLK_MGBE3_MAC 404U +/** @brief GBE_UPHY_MGBE3_MACSEC_CLK gate output */ +#define TEGRA234_CLK_MGBE3_MACSEC 405U +/** @brief GBE_UPHY_MGBE3_EEE_PCS_CLK gate output */ +#define TEGRA234_CLK_MGBE3_EEE_PCS 406U +/** @brief GBE_UPHY_MGBE3_APP_CLK gate output */ +#define TEGRA234_CLK_MGBE3_APP 407U +/** @brief GBE_UPHY_MGBE3_PTP_REF_CLK divider gated output */ +#define TEGRA234_CLK_MGBE3_PTP_REF 408U /** @brief CLK_RST_CONTROLLER_AZA2XBITCLK_OUT_SWITCH_DIVIDER switch divider output (aza_2xbitclk) */ #define TEGRA234_CLK_AZA_2XBIT 457U /** @brief aza_2xbitclk / 2 (aza_bitclk) */ #define TEGRA234_CLK_AZA_BIT 458U + #endif diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h index 9296d0bb5f34..fbfa3febc66d 100644 --- a/include/dt-bindings/mailbox/qcom-ipcc.h +++ b/include/dt-bindings/mailbox/qcom-ipcc.h @@ -30,6 +30,7 @@ #define IPCC_CLIENT_PCIE1 14 #define IPCC_CLIENT_PCIE2 15 #define IPCC_CLIENT_SPSS 16 +#define IPCC_CLIENT_NSP1 18 #define IPCC_CLIENT_TME 23 #define IPCC_CLIENT_WPSS 24 diff --git a/include/dt-bindings/memory/tegra234-mc.h b/include/dt-bindings/memory/tegra234-mc.h index e3b0e9da295d..62987b47ce81 100644 --- a/include/dt-bindings/memory/tegra234-mc.h +++ b/include/dt-bindings/memory/tegra234-mc.h @@ -11,11 +11,16 @@ /* NISO0 stream IDs */ #define TEGRA234_SID_APE 0x02 #define TEGRA234_SID_HDA 0x03 +#define TEGRA234_SID_GPCDMA 0x04 +#define TEGRA234_SID_MGBE 0x06 #define TEGRA234_SID_PCIE0 0x12 #define TEGRA234_SID_PCIE4 0x13 #define TEGRA234_SID_PCIE5 0x14 #define TEGRA234_SID_PCIE6 0x15 #define TEGRA234_SID_PCIE9 0x1f +#define TEGRA234_SID_MGBE_VF1 0x49 +#define TEGRA234_SID_MGBE_VF2 0x4a +#define TEGRA234_SID_MGBE_VF3 0x4b /* NISO1 stream IDs */ #define TEGRA234_SID_SDMMC4 0x02 @@ -26,6 +31,8 @@ #define TEGRA234_SID_PCIE8 0x09 #define TEGRA234_SID_PCIE10 0x0b #define TEGRA234_SID_BPMP 0x10 +#define TEGRA234_SID_HOST1X 0x27 +#define TEGRA234_SID_VIC 0x34 /* * memory client IDs @@ -33,6 +40,7 @@ /* High-definition audio (HDA) read clients */ #define TEGRA234_MEMORY_CLIENT_HDAR 0x15 +#define TEGRA234_MEMORY_CLIENT_HOST1XDMAR 0x16 /* PCIE6 read clients */ #define TEGRA234_MEMORY_CLIENT_PCIE6AR 0x28 /* PCIE6 write clients */ @@ -61,10 +69,28 @@ #define TEGRA234_MEMORY_CLIENT_PCIE10AR1 0x48 /* PCIE7r1 read clients */ #define TEGRA234_MEMORY_CLIENT_PCIE7AR1 0x49 +/* MGBE0 read client */ +#define TEGRA234_MEMORY_CLIENT_MGBEARD 0x58 +/* MGBEB read client */ +#define TEGRA234_MEMORY_CLIENT_MGBEBRD 0x59 +/* MGBEC read client */ +#define TEGRA234_MEMORY_CLIENT_MGBECRD 0x5a +/* MGBED read client */ +#define TEGRA234_MEMORY_CLIENT_MGBEDRD 0x5b +/* MGBE0 write client */ +#define TEGRA234_MEMORY_CLIENT_MGBEAWR 0x5c +/* MGBEB write client */ +#define TEGRA234_MEMORY_CLIENT_MGBEBWR 0x5f +/* MGBEC write client */ +#define TEGRA234_MEMORY_CLIENT_MGBECWR 0x61 /* sdmmcd memory read client */ #define TEGRA234_MEMORY_CLIENT_SDMMCRAB 0x63 +/* MGBED write client */ +#define TEGRA234_MEMORY_CLIENT_MGBEDWR 0x65 /* sdmmcd memory write client */ #define TEGRA234_MEMORY_CLIENT_SDMMCWAB 0x67 +#define TEGRA234_MEMORY_CLIENT_VICSRD 0x6c +#define TEGRA234_MEMORY_CLIENT_VICSWR 0x6d /* BPMP read client */ #define TEGRA234_MEMORY_CLIENT_BPMPR 0x93 /* BPMP write client */ diff --git a/include/dt-bindings/net/pcs-rzn1-miic.h b/include/dt-bindings/net/pcs-rzn1-miic.h new file mode 100644 index 000000000000..784782eaec9e --- /dev/null +++ b/include/dt-bindings/net/pcs-rzn1-miic.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2022 Schneider-Electric + * + * Clément Léger <clement.leger@bootlin.com> + */ + +#ifndef _DT_BINDINGS_PCS_RZN1_MIIC +#define _DT_BINDINGS_PCS_RZN1_MIIC + +/* + * Reefer to the datasheet [1] section 8.2.1, Internal Connection of Ethernet + * Ports to check the available combination + * + * [1] REN_r01uh0750ej0140-rzn1-introduction_MAT_20210228.pdf + */ + +#define MIIC_GMAC1_PORT 0 +#define MIIC_GMAC2_PORT 1 +#define MIIC_RTOS_PORT 2 +#define MIIC_SERCOS_PORTA 3 +#define MIIC_SERCOS_PORTB 4 +#define MIIC_ETHERCAT_PORTA 5 +#define MIIC_ETHERCAT_PORTB 6 +#define MIIC_ETHERCAT_PORTC 7 +#define MIIC_SWITCH_PORTA 8 +#define MIIC_SWITCH_PORTB 9 +#define MIIC_SWITCH_PORTC 10 +#define MIIC_SWITCH_PORTD 11 +#define MIIC_HSR_PORTA 12 +#define MIIC_HSR_PORTB 13 + +#endif diff --git a/include/dt-bindings/power/mt6795-power.h b/include/dt-bindings/power/mt6795-power.h new file mode 100644 index 000000000000..b0fc26cb1da4 --- /dev/null +++ b/include/dt-bindings/power/mt6795-power.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +#ifndef _DT_BINDINGS_POWER_MT6795_POWER_H +#define _DT_BINDINGS_POWER_MT6795_POWER_H + +#define MT6795_POWER_DOMAIN_MM 0 +#define MT6795_POWER_DOMAIN_VDEC 1 +#define MT6795_POWER_DOMAIN_VENC 2 +#define MT6795_POWER_DOMAIN_ISP 3 +#define MT6795_POWER_DOMAIN_MJC 4 +#define MT6795_POWER_DOMAIN_AUDIO 5 +#define MT6795_POWER_DOMAIN_MFG_ASYNC 6 +#define MT6795_POWER_DOMAIN_MFG_2D 7 +#define MT6795_POWER_DOMAIN_MFG 8 +#define MT6795_POWER_DOMAIN_MODEM 9 + +#endif /* _DT_BINDINGS_POWER_MT6795_POWER_H */ diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index 6cce5b7aa940..d81de63ae31c 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -187,6 +187,13 @@ #define MSM8916_VDDMX 3 #define MSM8916_VDDMX_AO 4 +/* MSM8909 Power Domain Indexes */ +#define MSM8909_VDDCX MSM8916_VDDCX +#define MSM8909_VDDCX_AO MSM8916_VDDCX_AO +#define MSM8909_VDDCX_VFC MSM8916_VDDCX_VFC +#define MSM8909_VDDMX MSM8916_VDDMX +#define MSM8909_VDDMX_AO MSM8916_VDDMX_AO + /* MSM8953 Power Domain Indexes */ #define MSM8953_VDDMD 0 #define MSM8953_VDDMD_AO 1 diff --git a/include/dt-bindings/power/tegra234-powergate.h b/include/dt-bindings/power/tegra234-powergate.h index f610eee9bce8..ae9286cef85c 100644 --- a/include/dt-bindings/power/tegra234-powergate.h +++ b/include/dt-bindings/power/tegra234-powergate.h @@ -18,5 +18,7 @@ #define TEGRA234_POWER_DOMAIN_MGBEA 17U #define TEGRA234_POWER_DOMAIN_MGBEB 18U #define TEGRA234_POWER_DOMAIN_MGBEC 19U +#define TEGRA234_POWER_DOMAIN_MGBED 20U +#define TEGRA234_POWER_DOMAIN_VIC 29U #endif diff --git a/include/dt-bindings/reset/sunplus,sp7021-reset.h b/include/dt-bindings/reset/sunplus,sp7021-reset.h new file mode 100644 index 000000000000..ab486707387f --- /dev/null +++ b/include/dt-bindings/reset/sunplus,sp7021-reset.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) Sunplus Technology Co., Ltd. + * All rights reserved. + */ +#ifndef _DT_BINDINGS_RST_SUNPLUS_SP7021_H +#define _DT_BINDINGS_RST_SUNPLUS_SP7021_H + +#define RST_SYSTEM 0 +#define RST_RTC 1 +#define RST_IOCTL 2 +#define RST_IOP 3 +#define RST_OTPRX 4 +#define RST_NOC 5 +#define RST_BR 6 +#define RST_RBUS_L00 7 +#define RST_SPIFL 8 +#define RST_SDCTRL0 9 +#define RST_PERI0 10 +#define RST_A926 11 +#define RST_UMCTL2 12 +#define RST_PERI1 13 +#define RST_DDR_PHY0 14 +#define RST_ACHIP 15 +#define RST_STC0 16 +#define RST_STC_AV0 17 +#define RST_STC_AV1 18 +#define RST_STC_AV2 19 +#define RST_UA0 20 +#define RST_UA1 21 +#define RST_UA2 22 +#define RST_UA3 23 +#define RST_UA4 24 +#define RST_HWUA 25 +#define RST_DDC0 26 +#define RST_UADMA 27 +#define RST_CBDMA0 28 +#define RST_CBDMA1 29 +#define RST_SPI_COMBO_0 30 +#define RST_SPI_COMBO_1 31 +#define RST_SPI_COMBO_2 32 +#define RST_SPI_COMBO_3 33 +#define RST_AUD 34 +#define RST_USBC0 35 +#define RST_USBC1 36 +#define RST_UPHY0 37 +#define RST_UPHY1 38 +#define RST_I2CM0 39 +#define RST_I2CM1 40 +#define RST_I2CM2 41 +#define RST_I2CM3 42 +#define RST_PMC 43 +#define RST_CARD_CTL0 44 +#define RST_CARD_CTL1 45 +#define RST_CARD_CTL4 46 +#define RST_BCH 47 +#define RST_DDFCH 48 +#define RST_CSIIW0 49 +#define RST_CSIIW1 50 +#define RST_MIPICSI0 51 +#define RST_MIPICSI1 52 +#define RST_HDMI_TX 53 +#define RST_VPOST 54 +#define RST_TGEN 55 +#define RST_DMIX 56 +#define RST_TCON 57 +#define RST_INTERRUPT 58 +#define RST_RGST 59 +#define RST_GPIO 60 +#define RST_RBUS_TOP 61 +#define RST_MAILBOX 62 +#define RST_SPIND 63 +#define RST_I2C2CBUS 64 +#define RST_SEC 65 +#define RST_DVE 66 +#define RST_GPOST0 67 +#define RST_OSD0 68 +#define RST_DISP_PWM 69 +#define RST_UADBG 70 +#define RST_DUMMY_MASTER 71 +#define RST_FIO_CTL 72 +#define RST_FPGA 73 +#define RST_L2SW 74 +#define RST_ICM 75 +#define RST_AXI_GLOBAL 76 + +#endif diff --git a/include/dt-bindings/reset/tegra234-reset.h b/include/dt-bindings/reset/tegra234-reset.h index 547ca3b60caa..d48d22b2bc7f 100644 --- a/include/dt-bindings/reset/tegra234-reset.h +++ b/include/dt-bindings/reset/tegra234-reset.h @@ -15,6 +15,7 @@ #define TEGRA234_RESET_PEX1_COMMON_APB 13U #define TEGRA234_RESET_PEX2_CORE_7 14U #define TEGRA234_RESET_PEX2_CORE_7_APB 15U +#define TEGRA234_RESET_GPCDMA 18U #define TEGRA234_RESET_HDA 20U #define TEGRA234_RESET_HDACODEC 21U #define TEGRA234_RESET_I2C1 24U @@ -29,6 +30,12 @@ #define TEGRA234_RESET_I2C7 33U #define TEGRA234_RESET_I2C8 34U #define TEGRA234_RESET_I2C9 35U +#define TEGRA234_RESET_MGBE0_PCS 45U +#define TEGRA234_RESET_MGBE0_MAC 46U +#define TEGRA234_RESET_MGBE1_PCS 49U +#define TEGRA234_RESET_MGBE1_MAC 50U +#define TEGRA234_RESET_MGBE2_PCS 53U +#define TEGRA234_RESET_MGBE2_MAC 54U #define TEGRA234_RESET_PEX2_CORE_10 56U #define TEGRA234_RESET_PEX2_CORE_10_APB 57U #define TEGRA234_RESET_PEX2_COMMON_APB 58U @@ -43,7 +50,10 @@ #define TEGRA234_RESET_QSPI0 76U #define TEGRA234_RESET_QSPI1 77U #define TEGRA234_RESET_SDMMC4 85U +#define TEGRA234_RESET_MGBE3_PCS 87U +#define TEGRA234_RESET_MGBE3_MAC 88U #define TEGRA234_RESET_UARTA 100U +#define TEGRA234_RESET_VIC 113U #define TEGRA234_RESET_PEX0_CORE_0 116U #define TEGRA234_RESET_PEX0_CORE_1 117U #define TEGRA234_RESET_PEX0_CORE_2 118U diff --git a/include/dt-bindings/soc/samsung,boot-mode.h b/include/dt-bindings/soc/samsung,boot-mode.h new file mode 100644 index 000000000000..47ef1cdd3916 --- /dev/null +++ b/include/dt-bindings/soc/samsung,boot-mode.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * Author: Chanho Park <chanho61.park@samsung.com> + * + * Device Tree bindings for Samsung Boot Mode. + */ + +#ifndef __DT_BINDINGS_SAMSUNG_BOOT_MODE_H +#define __DT_BINDINGS_SAMSUNG_BOOT_MODE_H + +/* Boot mode definitions for Exynos Auto v9 SoC */ + +#define EXYNOSAUTOV9_BOOT_FASTBOOT 0xfa +#define EXYNOSAUTOV9_BOOT_BOOTLOADER 0xfc +#define EXYNOSAUTOV9_BOOT_RECOVERY 0xff + +#endif /* __DT_BINDINGS_SAMSUNG_BOOT_MODE_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4f82a5bc6d98..7e7a33b6c8d7 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -105,6 +105,7 @@ enum acpi_irq_model_id { ACPI_IRQ_MODEL_IOSAPIC, ACPI_IRQ_MODEL_PLATFORM, ACPI_IRQ_MODEL_GIC, + ACPI_IRQ_MODEL_LPIC, ACPI_IRQ_MODEL_COUNT }; @@ -356,7 +357,8 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); void acpi_set_irq_model(enum acpi_irq_model_id model, - struct fwnode_handle *fwnode); + struct fwnode_handle *(*)(u32)); +void acpi_set_gsi_to_irq_fallback(u32 (*)(u32)); struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, unsigned int size, @@ -584,7 +586,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; extern bool osc_sb_native_usb4_support_confirmed; -extern bool osc_sb_cppc_not_supported; +extern bool osc_sb_cppc2_support_acked; extern bool osc_cpc_flexible_adr_space_confirmed; /* USB4 Capabilities */ diff --git a/include/linux/acpi_viot.h b/include/linux/acpi_viot.h index 1eb8ee5b0e5f..a5a122431563 100644 --- a/include/linux/acpi_viot.h +++ b/include/linux/acpi_viot.h @@ -6,9 +6,11 @@ #include <linux/acpi.h> #ifdef CONFIG_ACPI_VIOT +void __init acpi_viot_early_init(void); void __init acpi_viot_init(void); int viot_iommu_configure(struct device *dev); #else +static inline void acpi_viot_early_init(void) {} static inline void acpi_viot_init(void) {} static inline int viot_iommu_configure(struct device *dev) { diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 1bfcfb1af352..187b54a9567e 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -264,7 +264,8 @@ struct css_set { * List of csets participating in the on-going migration either as * source or destination. Protected by cgroup_mutex. */ - struct list_head mg_preload_node; + struct list_head mg_src_preload_node; + struct list_head mg_dst_preload_node; struct list_head mg_node; /* @@ -287,6 +288,10 @@ struct css_set { struct cgroup_base_stat { struct task_cputime cputime; + +#ifdef CONFIG_SCHED_CORE + u64 forceidle_sum; +#endif }; /* diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index d08dfcb0ac68..4f2a819fd60a 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -24,6 +24,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } /* context/locking */ # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) +# define __cond_acquires(x) __attribute__((context(x,0,-1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) @@ -50,6 +51,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } /* context/locking */ # define __must_hold(x) # define __acquires(x) +# define __cond_acquires(x) # define __releases(x) # define __acquire(x) (void)0 # define __release(x) (void)0 diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 2c7477354744..314802f98b9d 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -68,6 +68,8 @@ extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, extern ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_retbleed(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 19f0dbfdd7fe..f61447913db9 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -130,7 +130,6 @@ enum cpuhp_state { CPUHP_ZCOMP_PREPARE, CPUHP_TIMERS_PREPARE, CPUHP_MIPS_SOC_PREPARE, - CPUHP_LOONGARCH_SOC_PREPARE, CPUHP_BP_PREPARE_DYN, CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, CPUHP_BRINGUP_CPU, @@ -151,6 +150,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_IRQ_RISCV_STARTING, + CPUHP_AP_IRQ_LOONGARCH_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_MICROCODE_LOADER, @@ -230,6 +230,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, + CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, CPUHP_AP_PERF_ARM_L2X0_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index dc10bee75a72..34aab4dd336c 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -148,6 +148,8 @@ struct devfreq_stats { * reevaluate operable frequencies. Devfreq users may use * devfreq.nb to the corresponding register notifier call chain. * @work: delayed work for load monitoring. + * @freq_table: current frequency table used by the devfreq driver. + * @max_state: count of entry present in the frequency table. * @previous_freq: previously configured frequency value. * @last_status: devfreq user device info, performance statistics * @data: Private data of the governor. The devfreq framework does not @@ -185,6 +187,9 @@ struct devfreq { struct notifier_block nb; struct delayed_work work; + unsigned long *freq_table; + unsigned int max_state; + unsigned long previous_freq; struct devfreq_dev_status last_status; diff --git a/include/linux/dim.h b/include/linux/dim.h index b698266d0035..6c5733981563 100644 --- a/include/linux/dim.h +++ b/include/linux/dim.h @@ -21,7 +21,7 @@ * We consider 10% difference as significant. */ #define IS_SIGNIFICANT_DIFF(val, ref) \ - (((100UL * abs((val) - (ref))) / (ref)) > 10) + ((ref) && (((100UL * abs((val) - (ref))) / (ref)) > 10)) /* * Calculate the gap between two values. diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 8419bffb4398..b9caa01dfac4 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -62,7 +62,7 @@ struct em_perf_domain { /* * em_perf_domain flags: * - * EM_PERF_DOMAIN_MILLIWATTS: The power values are in milli-Watts or some + * EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some * other scale. * * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating @@ -71,7 +71,7 @@ struct em_perf_domain { * EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be * created by platform missing real power information */ -#define EM_PERF_DOMAIN_MILLIWATTS BIT(0) +#define EM_PERF_DOMAIN_MICROWATTS BIT(0) #define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1) #define EM_PERF_DOMAIN_ARTIFICIAL BIT(2) @@ -79,22 +79,44 @@ struct em_perf_domain { #define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL) #ifdef CONFIG_ENERGY_MODEL -#define EM_MAX_POWER 0xFFFF +/* + * The max power value in micro-Watts. The limit of 64 Watts is set as + * a safety net to not overflow multiplications on 32bit platforms. The + * 32bit value limit for total Perf Domain power implies a limit of + * maximum CPUs in such domain to 64. + */ +#define EM_MAX_POWER (64000000) /* 64 Watts */ + +/* + * To avoid possible energy estimation overflow on 32bit machines add + * limits to number of CPUs in the Perf. Domain. + * We are safe on 64bit machine, thus some big number. + */ +#ifdef CONFIG_64BIT +#define EM_MAX_NUM_CPUS 4096 +#else +#define EM_MAX_NUM_CPUS 16 +#endif /* - * Increase resolution of energy estimation calculations for 64-bit - * architectures. The extra resolution improves decision made by EAS for the - * task placement when two Performance Domains might provide similar energy - * estimation values (w/o better resolution the values could be equal). + * To avoid an overflow on 32bit machines while calculating the energy + * use a different order in the operation. First divide by the 'cpu_scale' + * which would reduce big value stored in the 'cost' field, then multiply by + * the 'sum_util'. This would allow to handle existing platforms, which have + * e.g. power ~1.3 Watt at max freq, so the 'cost' value > 1mln micro-Watts. + * In such scenario, where there are 4 CPUs in the Perf. Domain the 'sum_util' + * could be 4096, then multiplication: 'cost' * 'sum_util' would overflow. + * This reordering of operations has some limitations, we lose small + * precision in the estimation (comparing to 64bit platform w/o reordering). * - * We increase resolution only if we have enough bits to allow this increased - * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit - * are pretty high and the returns do not justify the increased costs. + * We are safe on 64bit machine. */ #ifdef CONFIG_64BIT -#define em_scale_power(p) ((p) * 1000) +#define em_estimate_energy(cost, sum_util, scale_cpu) \ + (((cost) * (sum_util)) / (scale_cpu)) #else -#define em_scale_power(p) (p) +#define em_estimate_energy(cost, sum_util, scale_cpu) \ + (((cost) / (scale_cpu)) * (sum_util)) #endif struct em_data_callback { @@ -112,7 +134,7 @@ struct em_data_callback { * and frequency. * * In case of CPUs, the power is the one of a single CPU in the domain, - * expressed in milli-Watts or an abstract scale. It is expected to + * expressed in micro-Watts or an abstract scale. It is expected to * fit in the [0, EM_MAX_POWER] range. * * Return 0 on success. @@ -148,7 +170,7 @@ struct em_perf_domain *em_cpu_get(int cpu); struct em_perf_domain *em_pd_get(struct device *dev); int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, struct em_data_callback *cb, cpumask_t *span, - bool milliwatts); + bool microwatts); void em_dev_unregister_perf_domain(struct device *dev); /** @@ -273,7 +295,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, * pd_nrg = ------------------------ (4) * scale_cpu */ - return ps->cost * sum_util / scale_cpu; + return em_estimate_energy(ps->cost, sum_util, scale_cpu); } /** @@ -297,7 +319,7 @@ struct em_data_callback {}; static inline int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, struct em_data_callback *cb, cpumask_t *span, - bool milliwatts) + bool microwatts) { return -EINVAL; } diff --git a/include/linux/evm.h b/include/linux/evm.h index 4c374be70247..aa63e0b3c0a2 100644 --- a/include/linux/evm.h +++ b/include/linux/evm.h @@ -21,7 +21,8 @@ extern enum integrity_status evm_verifyxattr(struct dentry *dentry, void *xattr_value, size_t xattr_value_len, struct integrity_iint_cache *iint); -extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr); +extern int evm_inode_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry, struct iattr *attr); extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid); extern int evm_inode_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry, const char *name, @@ -68,7 +69,8 @@ static inline enum integrity_status evm_verifyxattr(struct dentry *dentry, } #endif -static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr) +static inline int evm_inode_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry, struct iattr *attr) { return 0; } diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index edc28555814c..8ad743def6f3 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -59,15 +59,19 @@ #define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \ FAN_MARK_FILESYSTEM) +#define FANOTIFY_MARK_CMD_BITS (FAN_MARK_ADD | FAN_MARK_REMOVE | \ + FAN_MARK_FLUSH) + +#define FANOTIFY_MARK_IGNORE_BITS (FAN_MARK_IGNORED_MASK | \ + FAN_MARK_IGNORE) + #define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \ - FAN_MARK_ADD | \ - FAN_MARK_REMOVE | \ + FANOTIFY_MARK_CMD_BITS | \ + FANOTIFY_MARK_IGNORE_BITS | \ FAN_MARK_DONT_FOLLOW | \ FAN_MARK_ONLYDIR | \ - FAN_MARK_IGNORED_MASK | \ FAN_MARK_IGNORED_SURV_MODIFY | \ - FAN_MARK_EVICTABLE | \ - FAN_MARK_FLUSH) + FAN_MARK_EVICTABLE) /* * Events that can be reported with data type FSNOTIFY_EVENT_PATH. @@ -111,6 +115,10 @@ FANOTIFY_PERM_EVENTS | \ FAN_Q_OVERFLOW | FAN_ONDIR) +/* Events and flags relevant only for directories */ +#define FANOTIFY_DIRONLY_EVENT_BITS (FANOTIFY_DIRENT_EVENTS | \ + FAN_EVENT_ON_CHILD | FAN_ONDIR) + #define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \ FANOTIFY_EVENT_FLAGS) diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h index ff5596dd30f8..2382dec6d6ab 100644 --- a/include/linux/fbcon.h +++ b/include/linux/fbcon.h @@ -15,6 +15,8 @@ void fbcon_new_modelist(struct fb_info *info); void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps); void fbcon_fb_blanked(struct fb_info *info, int blank); +int fbcon_modechange_possible(struct fb_info *info, + struct fb_var_screeninfo *var); void fbcon_update_vcs(struct fb_info *info, bool all); void fbcon_remap_all(struct fb_info *info); int fbcon_set_con2fb_map_ioctl(void __user *argp); @@ -33,6 +35,8 @@ static inline void fbcon_new_modelist(struct fb_info *info) {} static inline void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps) {} static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {} +static inline int fbcon_modechange_possible(struct fb_info *info, + struct fb_var_screeninfo *var) { return 0; } static inline void fbcon_update_vcs(struct fb_info *info, bool all) {} static inline void fbcon_remap_all(struct fb_info *info) {} static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; } diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 1ec73d5352c3..cbde3b1fa414 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -34,6 +34,7 @@ #define PM_API_VERSION_2 2 /* ATF only commands */ +#define TF_A_PM_REGISTER_SGI 0xa04 #define PM_GET_TRUSTZONE_VERSION 0xa03 #define PM_SET_SUSPEND_MODE 0xa02 #define GET_CALLBACK_DATA 0xa01 @@ -468,6 +469,7 @@ int zynqmp_pm_feature(const u32 api_id); int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id); int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value); int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload); +int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset); #else static inline int zynqmp_pm_get_api_version(u32 *version) { @@ -733,6 +735,11 @@ static inline int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, { return -ENODEV; } + +static inline int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 9ad5e3520fae..9f131e559d05 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -180,6 +180,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* File supports async buffered reads */ #define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) +/* File supports async nowait buffered writes */ +#define FMODE_BUF_WASYNC ((__force fmode_t)0x80000000) + /* * Attribute flags. These should be or-ed together to figure out what * has been changed! @@ -221,8 +224,26 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, struct iattr { unsigned int ia_valid; umode_t ia_mode; - kuid_t ia_uid; - kgid_t ia_gid; + /* + * The two anonymous unions wrap structures with the same member. + * + * Filesystems raising FS_ALLOW_IDMAP need to use ia_vfs{g,u}id which + * are a dedicated type requiring the filesystem to use the dedicated + * helpers. Other filesystem can continue to use ia_{g,u}id until they + * have been ported. + * + * They always contain the same value. In other words FS_ALLOW_IDMAP + * pass down the same value on idmapped mounts as they would on regular + * mounts. + */ + union { + kuid_t ia_uid; + vfsuid_t ia_vfsuid; + }; + union { + kgid_t ia_gid; + vfsgid_t ia_vfsgid; + }; loff_t ia_size; struct timespec64 ia_atime; struct timespec64 ia_mtime; @@ -1600,13 +1621,68 @@ static inline void i_gid_write(struct inode *inode, gid_t gid) * @mnt_userns: user namespace of the mount the inode was found from * @inode: inode to map * + * Note, this will eventually be removed completely in favor of the type-safe + * i_uid_into_vfsuid(). + * * Return: the inode's i_uid mapped down according to @mnt_userns. * If the inode's i_uid has no mapping INVALID_UID is returned. */ static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns, const struct inode *inode) { - return mapped_kuid_fs(mnt_userns, i_user_ns(inode), inode->i_uid); + return AS_KUIDT(make_vfsuid(mnt_userns, i_user_ns(inode), inode->i_uid)); +} + +/** + * i_uid_into_vfsuid - map an inode's i_uid down into a mnt_userns + * @mnt_userns: user namespace of the mount the inode was found from + * @inode: inode to map + * + * Return: whe inode's i_uid mapped down according to @mnt_userns. + * If the inode's i_uid has no mapping INVALID_VFSUID is returned. + */ +static inline vfsuid_t i_uid_into_vfsuid(struct user_namespace *mnt_userns, + const struct inode *inode) +{ + return make_vfsuid(mnt_userns, i_user_ns(inode), inode->i_uid); +} + +/** + * i_uid_needs_update - check whether inode's i_uid needs to be updated + * @mnt_userns: user namespace of the mount the inode was found from + * @attr: the new attributes of @inode + * @inode: the inode to update + * + * Check whether the $inode's i_uid field needs to be updated taking idmapped + * mounts into account if the filesystem supports it. + * + * Return: true if @inode's i_uid field needs to be updated, false if not. + */ +static inline bool i_uid_needs_update(struct user_namespace *mnt_userns, + const struct iattr *attr, + const struct inode *inode) +{ + return ((attr->ia_valid & ATTR_UID) && + !vfsuid_eq(attr->ia_vfsuid, + i_uid_into_vfsuid(mnt_userns, inode))); +} + +/** + * i_uid_update - update @inode's i_uid field + * @mnt_userns: user namespace of the mount the inode was found from + * @attr: the new attributes of @inode + * @inode: the inode to update + * + * Safely update @inode's i_uid field translating the vfsuid of any idmapped + * mount into the filesystem kuid. + */ +static inline void i_uid_update(struct user_namespace *mnt_userns, + const struct iattr *attr, + struct inode *inode) +{ + if (attr->ia_valid & ATTR_UID) + inode->i_uid = from_vfsuid(mnt_userns, i_user_ns(inode), + attr->ia_vfsuid); } /** @@ -1614,13 +1690,68 @@ static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns, * @mnt_userns: user namespace of the mount the inode was found from * @inode: inode to map * + * Note, this will eventually be removed completely in favor of the type-safe + * i_gid_into_vfsgid(). + * * Return: the inode's i_gid mapped down according to @mnt_userns. * If the inode's i_gid has no mapping INVALID_GID is returned. */ static inline kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns, const struct inode *inode) { - return mapped_kgid_fs(mnt_userns, i_user_ns(inode), inode->i_gid); + return AS_KGIDT(make_vfsgid(mnt_userns, i_user_ns(inode), inode->i_gid)); +} + +/** + * i_gid_into_vfsgid - map an inode's i_gid down into a mnt_userns + * @mnt_userns: user namespace of the mount the inode was found from + * @inode: inode to map + * + * Return: the inode's i_gid mapped down according to @mnt_userns. + * If the inode's i_gid has no mapping INVALID_VFSGID is returned. + */ +static inline vfsgid_t i_gid_into_vfsgid(struct user_namespace *mnt_userns, + const struct inode *inode) +{ + return make_vfsgid(mnt_userns, i_user_ns(inode), inode->i_gid); +} + +/** + * i_gid_needs_update - check whether inode's i_gid needs to be updated + * @mnt_userns: user namespace of the mount the inode was found from + * @attr: the new attributes of @inode + * @inode: the inode to update + * + * Check whether the $inode's i_gid field needs to be updated taking idmapped + * mounts into account if the filesystem supports it. + * + * Return: true if @inode's i_gid field needs to be updated, false if not. + */ +static inline bool i_gid_needs_update(struct user_namespace *mnt_userns, + const struct iattr *attr, + const struct inode *inode) +{ + return ((attr->ia_valid & ATTR_GID) && + !vfsgid_eq(attr->ia_vfsgid, + i_gid_into_vfsgid(mnt_userns, inode))); +} + +/** + * i_gid_update - update @inode's i_gid field + * @mnt_userns: user namespace of the mount the inode was found from + * @attr: the new attributes of @inode + * @inode: the inode to update + * + * Safely update @inode's i_gid field translating the vfsgid of any idmapped + * mount into the filesystem kgid. + */ +static inline void i_gid_update(struct user_namespace *mnt_userns, + const struct iattr *attr, + struct inode *inode) +{ + if (attr->ia_valid & ATTR_GID) + inode->i_gid = from_vfsgid(mnt_userns, i_user_ns(inode), + attr->ia_vfsgid); } /** @@ -2195,8 +2326,8 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns, struct inode *inode) { - return !uid_valid(i_uid_into_mnt(mnt_userns, inode)) || - !gid_valid(i_gid_into_mnt(mnt_userns, inode)); + return !vfsuid_valid(i_uid_into_vfsuid(mnt_userns, inode)) || + !vfsgid_valid(i_gid_into_vfsgid(mnt_userns, inode)); } static inline int iocb_flags(struct file *file); @@ -2387,6 +2518,7 @@ static inline void file_accessed(struct file *file) } extern int file_modified(struct file *file); +int kiocb_modified(struct kiocb *iocb); int sync_inode_metadata(struct inode *inode, int wait); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 72585c9729a2..b86265664879 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -130,6 +130,7 @@ struct fscache_cookie { #define FSCACHE_COOKIE_DO_PREP_TO_WRITE 12 /* T if cookie needs write preparation */ #define FSCACHE_COOKIE_HAVE_DATA 13 /* T if this cookie has data stored */ #define FSCACHE_COOKIE_IS_HASHED 14 /* T if this cookie is hashed */ +#define FSCACHE_COOKIE_DO_INVALIDATE 15 /* T if cookie needs invalidation */ enum fscache_cookie_state state; u8 advice; /* FSCACHE_ADV_* */ diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 9560734759fa..d7d96c806bff 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -518,8 +518,8 @@ struct fsnotify_mark { struct hlist_node obj_list; /* Head of list of marks for an object [mark ref] */ struct fsnotify_mark_connector *connector; - /* Events types to ignore [mark->lock, group->mark_mutex] */ - __u32 ignored_mask; + /* Events types and flags to ignore [mark->lock, group->mark_mutex] */ + __u32 ignore_mask; /* General fsnotify mark flags */ #define FSNOTIFY_MARK_FLAG_ALIVE 0x0001 #define FSNOTIFY_MARK_FLAG_ATTACHED 0x0002 @@ -529,6 +529,7 @@ struct fsnotify_mark { /* fanotify mark flags */ #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100 #define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200 +#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400 unsigned int flags; /* flags [mark->lock] */ }; @@ -655,15 +656,91 @@ extern void fsnotify_remove_queued_event(struct fsnotify_group *group, /* functions used to manipulate the marks attached to inodes */ -/* Get mask for calculating object interest taking ignored mask into account */ +/* + * Canonical "ignore mask" including event flags. + * + * Note the subtle semantic difference from the legacy ->ignored_mask. + * ->ignored_mask traditionally only meant which events should be ignored, + * while ->ignore_mask also includes flags regarding the type of objects on + * which events should be ignored. + */ +static inline __u32 fsnotify_ignore_mask(struct fsnotify_mark *mark) +{ + __u32 ignore_mask = mark->ignore_mask; + + /* The event flags in ignore mask take effect */ + if (mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS) + return ignore_mask; + + /* + * Legacy behavior: + * - Always ignore events on dir + * - Ignore events on child if parent is watching children + */ + ignore_mask |= FS_ISDIR; + ignore_mask &= ~FS_EVENT_ON_CHILD; + ignore_mask |= mark->mask & FS_EVENT_ON_CHILD; + + return ignore_mask; +} + +/* Legacy ignored_mask - only event types to ignore */ +static inline __u32 fsnotify_ignored_events(struct fsnotify_mark *mark) +{ + return mark->ignore_mask & ALL_FSNOTIFY_EVENTS; +} + +/* + * Check if mask (or ignore mask) should be applied depending if victim is a + * directory and whether it is reported to a watching parent. + */ +static inline bool fsnotify_mask_applicable(__u32 mask, bool is_dir, + int iter_type) +{ + /* Should mask be applied to a directory? */ + if (is_dir && !(mask & FS_ISDIR)) + return false; + + /* Should mask be applied to a child? */ + if (iter_type == FSNOTIFY_ITER_TYPE_PARENT && + !(mask & FS_EVENT_ON_CHILD)) + return false; + + return true; +} + +/* + * Effective ignore mask taking into account if event victim is a + * directory and whether it is reported to a watching parent. + */ +static inline __u32 fsnotify_effective_ignore_mask(struct fsnotify_mark *mark, + bool is_dir, int iter_type) +{ + __u32 ignore_mask = fsnotify_ignored_events(mark); + + if (!ignore_mask) + return 0; + + /* For non-dir and non-child, no need to consult the event flags */ + if (!is_dir && iter_type != FSNOTIFY_ITER_TYPE_PARENT) + return ignore_mask; + + ignore_mask = fsnotify_ignore_mask(mark); + if (!fsnotify_mask_applicable(ignore_mask, is_dir, iter_type)) + return 0; + + return ignore_mask & ALL_FSNOTIFY_EVENTS; +} + +/* Get mask for calculating object interest taking ignore mask into account */ static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark) { __u32 mask = mark->mask; - if (!mark->ignored_mask) + if (!fsnotify_ignored_events(mark)) return mask; - /* Interest in FS_MODIFY may be needed for clearing ignored mask */ + /* Interest in FS_MODIFY may be needed for clearing ignore mask */ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) mask |= FS_MODIFY; @@ -671,7 +748,7 @@ static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark) * If mark is interested in ignoring events on children, the object must * show interest in those events for fsnotify_parent() to notice it. */ - return mask | (mark->ignored_mask & ALL_FSNOTIFY_EVENTS); + return mask | mark->ignore_mask; } /* Get mask of events for a list of marks */ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 2d2ccae933c2..0ace7759acd2 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -348,7 +348,7 @@ struct vm_area_struct; #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ - __GFP_SKIP_KASAN_POISON) + __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON) #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 54c3c6506503..6aeea1071b1b 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -12,6 +12,8 @@ #include <linux/property.h> #include <linux/types.h> +#include <asm/msi.h> + struct gpio_desc; struct of_phandle_args; struct device_node; @@ -23,6 +25,13 @@ enum gpio_lookup_flags; struct gpio_chip; +union gpio_irq_fwspec { + struct irq_fwspec fwspec; +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + msi_alloc_info_t msiinfo; +#endif +}; + #define GPIO_LINE_DIRECTION_IN 1 #define GPIO_LINE_DIRECTION_OUT 0 @@ -103,9 +112,10 @@ struct gpio_irq_chip { * variant named &gpiochip_populate_parent_fwspec_fourcell is also * available. */ - void *(*populate_parent_alloc_arg)(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type); + int (*populate_parent_alloc_arg)(struct gpio_chip *gc, + union gpio_irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type); /** * @child_offset_to_irq: @@ -649,28 +659,14 @@ struct bgpio_pdata { #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, +int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type); +int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, + union gpio_irq_fwspec *gfwspec, unsigned int parent_hwirq, unsigned int parent_type); -void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type); - -#else - -static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) -{ - return NULL; -} - -static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) -{ - return NULL; -} #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 3af34de54330..56d6a0196534 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -149,19 +149,19 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset); * It is used in atomic context when code wants to access the contents of a * page that might be allocated from high memory (see __GFP_HIGHMEM), for * example a page in the pagecache. The API has two functions, and they - * can be used in a manner similar to the following: + * can be used in a manner similar to the following:: * - * -- Find the page of interest. -- - * struct page *page = find_get_page(mapping, offset); + * // Find the page of interest. + * struct page *page = find_get_page(mapping, offset); * - * -- Gain access to the contents of that page. -- - * void *vaddr = kmap_atomic(page); + * // Gain access to the contents of that page. + * void *vaddr = kmap_atomic(page); * - * -- Do something to the contents of that page. -- - * memset(vaddr, 0, PAGE_SIZE); + * // Do something to the contents of that page. + * memset(vaddr, 0, PAGE_SIZE); * - * -- Unmap that page. -- - * kunmap_atomic(vaddr); + * // Unmap that page. + * kunmap_atomic(vaddr); * * Note that the kunmap_atomic() call takes the result of the kmap_atomic() * call, not the argument. diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index de29821231c9..4ddaf6ad73ef 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -461,4 +461,16 @@ static inline int split_folio_to_list(struct folio *folio, return split_huge_page_to_list(&folio->page, list); } +/* + * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to + * limitations in the implementation like arm64 MTE can override this to + * false + */ +#ifndef arch_thp_swp_supported +static inline bool arch_thp_swp_supported(void) +{ + return true; +} +#endif + #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/ima.h b/include/linux/ima.h index 426b1744215e..81708ca0ebc7 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -140,6 +140,11 @@ static inline int ima_measure_critical_data(const char *event_label, #endif /* CONFIG_IMA */ +#ifdef CONFIG_HAVE_IMA_KEXEC +int __init ima_free_kexec_buffer(void); +int __init ima_get_kexec_buffer(void **addr, size_t *size); +#endif + #ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT extern bool arch_ima_get_secureboot(void); extern const char * const *arch_get_ima_policy(void); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 4f29139bbfc3..5fcf89faa31a 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -612,7 +612,6 @@ struct intel_iommu { struct device_domain_info { struct list_head link; /* link to domain siblings */ struct list_head global; /* link to global list */ - struct list_head table; /* link to pasid table */ u32 segment; /* PCI segment number */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ @@ -729,8 +728,6 @@ extern int dmar_ir_support(void); void *alloc_pgtable_page(int node); void free_pgtable_page(void *vaddr); struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); -int for_each_device_domain(int (*fn)(struct device_domain_info *info, - void *data), void *data); void iommu_flush_write_buffer(struct intel_iommu *iommu); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h new file mode 100644 index 000000000000..f7fab3758cb9 --- /dev/null +++ b/include/linux/io_uring_types.h @@ -0,0 +1,574 @@ +#ifndef IO_URING_TYPES_H +#define IO_URING_TYPES_H + +#include <linux/blkdev.h> +#include <linux/task_work.h> +#include <linux/bitmap.h> +#include <linux/llist.h> +#include <uapi/linux/io_uring.h> + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +struct io_wq_work { + struct io_wq_work_node list; + unsigned flags; + /* place it here instead of io_kiocb as it fills padding and saves 4B */ + int cancel_seq; +}; + +struct io_fixed_file { + /* file * with additional FFS_* flags */ + unsigned long file_ptr; +}; + +struct io_file_table { + struct io_fixed_file *files; + unsigned long *bitmap; + unsigned int alloc_hint; +}; + +struct io_notif; +struct io_notif_slot; + +struct io_hash_bucket { + spinlock_t lock; + struct hlist_head list; +} ____cacheline_aligned_in_smp; + +struct io_hash_table { + struct io_hash_bucket *hbs; + unsigned hash_bits; +}; + +/* + * Arbitrary limit, can be raised if need be + */ +#define IO_RINGFD_REG_MAX 16 + +struct io_uring_task { + /* submission side */ + int cached_refs; + const struct io_ring_ctx *last; + struct io_wq *io_wq; + struct file *registered_rings[IO_RINGFD_REG_MAX]; + + struct xarray xa; + struct wait_queue_head wait; + atomic_t in_idle; + atomic_t inflight_tracked; + struct percpu_counter inflight; + + struct { /* task_work */ + struct llist_head task_list; + struct callback_head task_work; + } ____cacheline_aligned_in_smp; +}; + +struct io_uring { + u32 head ____cacheline_aligned_in_smp; + u32 tail ____cacheline_aligned_in_smp; +}; + +/* + * This data is shared with the application through the mmap at offsets + * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. + * + * The offsets to the member fields are published through struct + * io_sqring_offsets when calling io_uring_setup. + */ +struct io_rings { + /* + * Head and tail offsets into the ring; the offsets need to be + * masked to get valid indices. + * + * The kernel controls head of the sq ring and the tail of the cq ring, + * and the application controls tail of the sq ring and the head of the + * cq ring. + */ + struct io_uring sq, cq; + /* + * Bitmasks to apply to head and tail offsets (constant, equals + * ring_entries - 1) + */ + u32 sq_ring_mask, cq_ring_mask; + /* Ring sizes (constant, power of 2) */ + u32 sq_ring_entries, cq_ring_entries; + /* + * Number of invalid entries dropped by the kernel due to + * invalid index stored in array + * + * Written by the kernel, shouldn't be modified by the + * application (i.e. get number of "new events" by comparing to + * cached value). + * + * After a new SQ head value was read by the application this + * counter includes all submissions that were dropped reaching + * the new SQ head (and possibly more). + */ + u32 sq_dropped; + /* + * Runtime SQ flags + * + * Written by the kernel, shouldn't be modified by the + * application. + * + * The application needs a full memory barrier before checking + * for IORING_SQ_NEED_WAKEUP after updating the sq tail. + */ + atomic_t sq_flags; + /* + * Runtime CQ flags + * + * Written by the application, shouldn't be modified by the + * kernel. + */ + u32 cq_flags; + /* + * Number of completion events lost because the queue was full; + * this should be avoided by the application by making sure + * there are not more requests pending than there is space in + * the completion queue. + * + * Written by the kernel, shouldn't be modified by the + * application (i.e. get number of "new events" by comparing to + * cached value). + * + * As completion events come in out of order this counter is not + * ordered with any other data. + */ + u32 cq_overflow; + /* + * Ring buffer of completion events. + * + * The kernel writes completion events fresh every time they are + * produced, so the application is allowed to modify pending + * entries. + */ + struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; +}; + +struct io_restriction { + DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); + DECLARE_BITMAP(sqe_op, IORING_OP_LAST); + u8 sqe_flags_allowed; + u8 sqe_flags_required; + bool registered; +}; + +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; + +struct io_submit_state { + /* inline/task_work completion list, under ->uring_lock */ + struct io_wq_work_node free_list; + /* batch completion logic */ + struct io_wq_work_list compl_reqs; + struct io_submit_link link; + + bool plug_started; + bool need_plug; + unsigned short submit_nr; + struct blk_plug plug; +}; + +struct io_ev_fd { + struct eventfd_ctx *cq_ev_fd; + unsigned int eventfd_async: 1; + struct rcu_head rcu; +}; + +struct io_alloc_cache { + struct hlist_head list; + unsigned int nr_cached; +}; + +struct io_ring_ctx { + /* const or read-mostly hot data */ + struct { + struct percpu_ref refs; + + struct io_rings *rings; + unsigned int flags; + enum task_work_notify_mode notify_method; + unsigned int compat: 1; + unsigned int drain_next: 1; + unsigned int restricted: 1; + unsigned int off_timeout_used: 1; + unsigned int drain_active: 1; + unsigned int drain_disabled: 1; + unsigned int has_evfd: 1; + unsigned int syscall_iopoll: 1; + } ____cacheline_aligned_in_smp; + + /* submission data */ + struct { + struct mutex uring_lock; + + /* + * Ring buffer of indices into array of io_uring_sqe, which is + * mmapped by the application using the IORING_OFF_SQES offset. + * + * This indirection could e.g. be used to assign fixed + * io_uring_sqe entries to operations and only submit them to + * the queue when needed. + * + * The kernel modifies neither the indices array nor the entries + * array. + */ + u32 *sq_array; + struct io_uring_sqe *sq_sqes; + unsigned cached_sq_head; + unsigned sq_entries; + + /* + * Fixed resources fast path, should be accessed only under + * uring_lock, and updated through io_uring_register(2) + */ + struct io_rsrc_node *rsrc_node; + int rsrc_cached_refs; + atomic_t cancel_seq; + struct io_file_table file_table; + unsigned nr_user_files; + unsigned nr_user_bufs; + struct io_mapped_ubuf **user_bufs; + struct io_notif_slot *notif_slots; + unsigned nr_notif_slots; + + struct io_submit_state submit_state; + + struct io_buffer_list *io_bl; + struct xarray io_bl_xa; + struct list_head io_buffers_cache; + + struct io_hash_table cancel_table_locked; + struct list_head cq_overflow_list; + struct io_alloc_cache apoll_cache; + struct io_alloc_cache netmsg_cache; + } ____cacheline_aligned_in_smp; + + /* IRQ completion list, under ->completion_lock */ + struct io_wq_work_list locked_free_list; + unsigned int locked_free_nr; + + const struct cred *sq_creds; /* cred used for __io_sq_thread() */ + struct io_sq_data *sq_data; /* if using sq thread polling */ + + struct wait_queue_head sqo_sq_wait; + struct list_head sqd_list; + + unsigned long check_cq; + + unsigned int file_alloc_start; + unsigned int file_alloc_end; + + struct xarray personalities; + u32 pers_next; + + struct { + /* + * We cache a range of free CQEs we can use, once exhausted it + * should go through a slower range setup, see __io_get_cqe() + */ + struct io_uring_cqe *cqe_cached; + struct io_uring_cqe *cqe_sentinel; + + unsigned cached_cq_tail; + unsigned cq_entries; + struct io_ev_fd __rcu *io_ev_fd; + struct wait_queue_head cq_wait; + unsigned cq_extra; + } ____cacheline_aligned_in_smp; + + struct { + spinlock_t completion_lock; + + /* + * ->iopoll_list is protected by the ctx->uring_lock for + * io_uring instances that don't use IORING_SETUP_SQPOLL. + * For SQPOLL, only the single threaded io_sq_thread() will + * manipulate the list, hence no extra locking is needed there. + */ + struct io_wq_work_list iopoll_list; + struct io_hash_table cancel_table; + bool poll_multi_queue; + + struct list_head io_buffers_comp; + } ____cacheline_aligned_in_smp; + + /* timeouts */ + struct { + spinlock_t timeout_lock; + atomic_t cq_timeouts; + struct list_head timeout_list; + struct list_head ltimeout_list; + unsigned cq_last_tm_flush; + } ____cacheline_aligned_in_smp; + + /* Keep this last, we don't need it for the fast path */ + + struct io_restriction restrictions; + struct task_struct *submitter_task; + + /* slow path rsrc auxilary data, used by update/register */ + struct io_rsrc_node *rsrc_backup_node; + struct io_mapped_ubuf *dummy_ubuf; + struct io_rsrc_data *file_data; + struct io_rsrc_data *buf_data; + + struct delayed_work rsrc_put_work; + struct llist_head rsrc_put_llist; + struct list_head rsrc_ref_list; + spinlock_t rsrc_ref_lock; + + struct list_head io_buffers_pages; + + #if defined(CONFIG_UNIX) + struct socket *ring_sock; + #endif + /* hashed buffered write serialization */ + struct io_wq_hash *hash_map; + + /* Only used for accounting purposes */ + struct user_struct *user; + struct mm_struct *mm_account; + + /* ctx exit and cancelation */ + struct llist_head fallback_llist; + struct delayed_work fallback_work; + struct work_struct exit_work; + struct list_head tctx_list; + struct completion ref_comp; + + /* io-wq management, e.g. thread count */ + u32 iowq_limits[2]; + bool iowq_limits_set; + + struct list_head defer_list; + unsigned sq_thread_idle; + /* protected by ->completion_lock */ + unsigned evfd_last_cq_tail; +}; + +enum { + REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, + REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, + REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, + REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, + REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, + REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, + REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT, + + /* first byte is taken by user flags, shift it to not overlap */ + REQ_F_FAIL_BIT = 8, + REQ_F_INFLIGHT_BIT, + REQ_F_CUR_POS_BIT, + REQ_F_NOWAIT_BIT, + REQ_F_LINK_TIMEOUT_BIT, + REQ_F_NEED_CLEANUP_BIT, + REQ_F_POLLED_BIT, + REQ_F_BUFFER_SELECTED_BIT, + REQ_F_BUFFER_RING_BIT, + REQ_F_REISSUE_BIT, + REQ_F_CREDS_BIT, + REQ_F_REFCOUNT_BIT, + REQ_F_ARM_LTIMEOUT_BIT, + REQ_F_ASYNC_DATA_BIT, + REQ_F_SKIP_LINK_CQES_BIT, + REQ_F_SINGLE_POLL_BIT, + REQ_F_DOUBLE_POLL_BIT, + REQ_F_PARTIAL_IO_BIT, + REQ_F_CQE32_INIT_BIT, + REQ_F_APOLL_MULTISHOT_BIT, + REQ_F_CLEAR_POLLIN_BIT, + REQ_F_HASH_LOCKED_BIT, + /* keep async read/write and isreg together and in order */ + REQ_F_SUPPORT_NOWAIT_BIT, + REQ_F_ISREG_BIT, + + /* not a real bit, just to check we're not overflowing the space */ + __REQ_F_LAST_BIT, +}; + +enum { + /* ctx owns file */ + REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT), + /* drain existing IO first */ + REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT), + /* linked sqes */ + REQ_F_LINK = BIT(REQ_F_LINK_BIT), + /* doesn't sever on completion < 0 */ + REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT), + /* IOSQE_ASYNC */ + REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT), + /* IOSQE_BUFFER_SELECT */ + REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT), + /* IOSQE_CQE_SKIP_SUCCESS */ + REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT), + + /* fail rest of links */ + REQ_F_FAIL = BIT(REQ_F_FAIL_BIT), + /* on inflight list, should be cancelled and waited on exit reliably */ + REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT), + /* read/write uses file position */ + REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT), + /* must not punt to workers */ + REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT), + /* has or had linked timeout */ + REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT), + /* needs cleanup */ + REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), + /* already went through poll handler */ + REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), + /* buffer already selected */ + REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), + /* buffer selected from ring, needs commit */ + REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT), + /* caller should reissue async */ + REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), + /* supports async reads/writes */ + REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT), + /* regular file */ + REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), + /* has creds assigned */ + REQ_F_CREDS = BIT(REQ_F_CREDS_BIT), + /* skip refcounting if not set */ + REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT), + /* there is a linked timeout that has to be armed */ + REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT), + /* ->async_data allocated */ + REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT), + /* don't post CQEs while failing linked requests */ + REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT), + /* single poll may be active */ + REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT), + /* double poll may active */ + REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT), + /* request has already done partial IO */ + REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT), + /* fast poll multishot mode */ + REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT), + /* ->extra1 and ->extra2 are initialised */ + REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT), + /* recvmsg special flag, clear EPOLLIN */ + REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT), + /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ + REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT), +}; + +typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked); + +struct io_task_work { + struct llist_node node; + io_req_tw_func_t func; +}; + +struct io_cqe { + __u64 user_data; + __s32 res; + /* fd initially, then cflags for completion */ + union { + __u32 flags; + int fd; + }; +}; + +/* + * Each request type overlays its private data structure on top of this one. + * They must not exceed this one in size. + */ +struct io_cmd_data { + struct file *file; + /* each command gets 56 bytes of data */ + __u8 data[56]; +}; + +#define io_kiocb_to_cmd(req) ((void *) &(req)->cmd) +#define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr) + +struct io_kiocb { + union { + /* + * NOTE! Each of the io_kiocb union members has the file pointer + * as the first entry in their struct definition. So you can + * access the file pointer through any of the sub-structs, + * or directly as just 'file' in this struct. + */ + struct file *file; + struct io_cmd_data cmd; + }; + + u8 opcode; + /* polled IO has completed */ + u8 iopoll_completed; + /* + * Can be either a fixed buffer index, or used with provided buffers. + * For the latter, before issue it points to the buffer group ID, + * and after selection it points to the buffer ID itself. + */ + u16 buf_index; + unsigned int flags; + + struct io_cqe cqe; + + struct io_ring_ctx *ctx; + struct task_struct *task; + + struct io_rsrc_node *rsrc_node; + + union { + /* store used ubuf, so we can prevent reloading */ + struct io_mapped_ubuf *imu; + + /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ + struct io_buffer *kbuf; + + /* + * stores buffer ID for ring provided buffers, valid IFF + * REQ_F_BUFFER_RING is set. + */ + struct io_buffer_list *buf_list; + }; + + union { + /* used by request caches, completion batching and iopoll */ + struct io_wq_work_node comp_list; + /* cache ->apoll->events */ + __poll_t apoll_events; + }; + atomic_t refs; + atomic_t poll_refs; + struct io_task_work io_task_work; + /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ + union { + struct hlist_node hash_node; + struct { + u64 extra1; + u64 extra2; + }; + }; + /* internal polling, see IORING_FEAT_FAST_POLL */ + struct async_poll *apoll; + /* opcode allocated if it needs to store data for async defer */ + void *async_data; + /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ + struct io_kiocb *link; + /* custom credentials, valid IFF REQ_F_CREDS is set */ + const struct cred *creds; + struct io_wq_work work; +}; + +struct io_overflow_cqe { + struct list_head list; + struct io_uring_cqe cqe; +}; + +#endif diff --git a/include/linux/irq.h b/include/linux/irq.h index 505308253d23..c3eb89606c2b 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -151,7 +151,9 @@ struct irq_common_data { #endif void *handler_data; struct msi_desc *msi_desc; +#ifdef CONFIG_SMP cpumask_var_t affinity; +#endif #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK cpumask_var_t effective_affinity; #endif @@ -879,21 +881,34 @@ static inline int irq_data_get_node(struct irq_data *d) return irq_common_data_get_node(d->common); } -static inline struct cpumask *irq_get_affinity_mask(int irq) +static inline +const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) { - struct irq_data *d = irq_get_irq_data(irq); +#ifdef CONFIG_SMP + return d->common->affinity; +#else + return cpumask_of(0); +#endif +} - return d ? d->common->affinity : NULL; +static inline void irq_data_update_affinity(struct irq_data *d, + const struct cpumask *m) +{ +#ifdef CONFIG_SMP + cpumask_copy(d->common->affinity, m); +#endif } -static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) +static inline const struct cpumask *irq_get_affinity_mask(int irq) { - return d->common->affinity; + struct irq_data *d = irq_get_irq_data(irq); + + return d ? irq_data_get_affinity_mask(d) : NULL; } #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK static inline -struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) +const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) { return d->common->effective_affinity; } @@ -908,13 +923,14 @@ static inline void irq_data_update_effective_affinity(struct irq_data *d, { } static inline -struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) +const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) { - return d->common->affinity; + return irq_data_get_affinity_mask(d); } #endif -static inline struct cpumask *irq_get_effective_affinity_mask(unsigned int irq) +static inline +const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); @@ -1121,6 +1137,7 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on); /* Setup functions for irq_chip_generic */ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw_irq); +void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq); struct irq_chip_generic * irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, void __iomem *reg_base, irq_flow_handler_t handler); diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h index cb8455c87c8a..aa1813749a4f 100644 --- a/include/linux/irqchip/mmp.h +++ b/include/linux/irqchip/mmp.h @@ -4,4 +4,7 @@ extern struct irq_chip icu_irq_chip; +extern void icu_init_irq(void); +extern void mmp2_init_icu(void); + #endif /* __IRQCHIP_MMP_H */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index a77584593f7d..1cd4e36890fb 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -209,14 +209,15 @@ static inline void irq_set_handler_locked(struct irq_data *data, * Must be called with irq_desc locked and valid parameters. */ static inline void -irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip, +irq_set_chip_handler_name_locked(struct irq_data *data, + const struct irq_chip *chip, irq_flow_handler_t handler, const char *name) { struct irq_desc *desc = irq_data_to_desc(data); desc->handle_irq = handler; desc->name = name; - data->chip = chip; + data->chip = (struct irq_chip *)chip; } bool irq_check_status_bit(unsigned int irq, unsigned int bitmask); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index bf1eef337a07..570831ca9951 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -220,8 +220,6 @@ extern void jump_label_lock(void); extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type); -extern void arch_jump_label_transform_static(struct jump_entry *entry, - enum jump_label_type type); extern bool arch_jump_label_transform_queue(struct jump_entry *entry, enum jump_label_type type); extern void arch_jump_label_transform_apply(void); @@ -230,12 +228,12 @@ extern void static_key_slow_inc(struct static_key *key); extern void static_key_slow_dec(struct static_key *key); extern void static_key_slow_inc_cpuslocked(struct static_key *key); extern void static_key_slow_dec_cpuslocked(struct static_key *key); -extern void jump_label_apply_nops(struct module *mod); extern int static_key_count(struct static_key *key); extern void static_key_enable(struct static_key *key); extern void static_key_disable(struct static_key *key); extern void static_key_enable_cpuslocked(struct static_key *key); extern void static_key_disable_cpuslocked(struct static_key *key); +extern enum jump_label_type jump_label_init_type(struct jump_entry *entry); /* * We should be using ATOMIC_INIT() for initializing .enabled, but @@ -303,11 +301,6 @@ static inline int jump_label_text_reserved(void *start, void *end) static inline void jump_label_lock(void) {} static inline void jump_label_unlock(void) {} -static inline int jump_label_apply_nops(struct module *mod) -{ - return 0; -} - static inline void static_key_enable(struct static_key *key) { STATIC_KEY_CHECK_USE(key); diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 69ae6b278464..ddb5a358fd82 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -28,6 +28,9 @@ enum cpu_usage_stat { CPUTIME_STEAL, CPUTIME_GUEST, CPUTIME_GUEST_NICE, +#ifdef CONFIG_SCHED_CORE + CPUTIME_FORCEIDLE, +#endif NR_STATS, }; @@ -115,4 +118,8 @@ extern void account_process_tick(struct task_struct *, int user); extern void account_idle_ticks(unsigned long ticks); +#ifdef CONFIG_SCHED_CORE +extern void __account_forceidle_time(struct task_struct *tsk, u64 delta); +#endif + #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index ce6536f1d269..475683cd67f1 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -452,6 +452,12 @@ static inline int kexec_crash_loaded(void) { return 0; } #define kexec_in_progress false #endif /* CONFIG_KEXEC_CORE */ +#ifdef CONFIG_KEXEC_SIG +void set_kexec_sig_enforced(void); +#else +static inline void set_kexec_sig_enforced(void) {} +#endif + #endif /* !defined(__ASSEBMLY__) */ #endif /* LINUX_KEXEC_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c20f2d55840c..90a45ef7203b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1513,7 +1513,7 @@ static inline void kvm_arch_end_assignment(struct kvm *kvm) { } -static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) +static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) { return false; } @@ -1822,6 +1822,15 @@ struct _kvm_stats_desc { STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \ KVM_STATS_BASE_POW10, 0) +/* Instantaneous boolean value, read only */ +#define STATS_DESC_IBOOLEAN(SCOPE, name) \ + STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ + KVM_STATS_BASE_POW10, 0) +/* Peak (sticky) boolean value, read/write */ +#define STATS_DESC_PBOOLEAN(SCOPE, name) \ + STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ + KVM_STATS_BASE_POW10, 0) + /* Cumulative time in nanosecond */ #define STATS_DESC_TIME_NSEC(SCOPE, name) \ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ @@ -1853,7 +1862,7 @@ struct _kvm_stats_desc { HALT_POLL_HIST_COUNT), \ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ HALT_POLL_HIST_COUNT), \ - STATS_DESC_ICOUNTER(VCPU_GENERIC, blocking) + STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) extern struct dentry *kvm_debugfs_dir; diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b6829b970093..1f1099dac3f0 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -188,7 +188,7 @@ static inline void lockdep_init_map_waits(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass, u8 inner, u8 outer) { - lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL); + lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); } static inline void @@ -211,24 +211,28 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, * or they are too narrow (they suffer from a false class-split): */ #define lockdep_set_class(lock, key) \ - lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_class_and_name(lock, key, name) \ - lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_class_and_subclass(lock, key, sub) \ - lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_subclass(lock, sub) \ - lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_novalidate_class(lock) \ lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) diff --git a/include/linux/lockref.h b/include/linux/lockref.h index 99f17cc8e163..c3a1f78bc884 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -38,7 +38,6 @@ extern void lockref_get(struct lockref *); extern int lockref_put_return(struct lockref *); extern int lockref_get_not_zero(struct lockref *); extern int lockref_put_not_zero(struct lockref *); -extern int lockref_get_or_lock(struct lockref *); extern int lockref_put_or_lock(struct lockref *); extern void lockref_mark_dead(struct lockref *); diff --git a/include/linux/memregion.h b/include/linux/memregion.h index e11595256cac..c04c4fd2e209 100644 --- a/include/linux/memregion.h +++ b/include/linux/memregion.h @@ -16,7 +16,7 @@ static inline int memregion_alloc(gfp_t gfp) { return -ENOMEM; } -void memregion_free(int id) +static inline void memregion_free(int id) { } #endif diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h index ed37dc40e82a..f70a810c55f7 100644 --- a/include/linux/mfd/bcm2835-pm.h +++ b/include/linux/mfd/bcm2835-pm.h @@ -9,6 +9,7 @@ struct bcm2835_pm { struct device *dev; void __iomem *base; void __iomem *asb; + void __iomem *rpivid_asb; }; #endif /* BCM2835_MFD_PM_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index cf3d0d673f6b..7898e29bcfb5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1130,23 +1130,27 @@ static inline bool is_zone_movable_page(const struct page *page) #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) DECLARE_STATIC_KEY_FALSE(devmap_managed_key); -bool __put_devmap_managed_page(struct page *page); -static inline bool put_devmap_managed_page(struct page *page) +bool __put_devmap_managed_page_refs(struct page *page, int refs); +static inline bool put_devmap_managed_page_refs(struct page *page, int refs) { if (!static_branch_unlikely(&devmap_managed_key)) return false; if (!is_zone_device_page(page)) return false; - return __put_devmap_managed_page(page); + return __put_devmap_managed_page_refs(page, refs); } - #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ -static inline bool put_devmap_managed_page(struct page *page) +static inline bool put_devmap_managed_page_refs(struct page *page, int refs) { return false; } #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ +static inline bool put_devmap_managed_page(struct page *page) +{ + return put_devmap_managed_page_refs(page, 1); +} + /* 127: arbitrary random number, small enough to assemble well */ #define folio_ref_zero_or_close_to_overflow(folio) \ ((unsigned int) folio_ref_count(folio) + 127u <= 127u) diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h index ee5a217de2a8..f6e5369d2928 100644 --- a/include/linux/mnt_idmapping.h +++ b/include/linux/mnt_idmapping.h @@ -13,6 +13,129 @@ struct user_namespace; */ extern struct user_namespace init_user_ns; +typedef struct { + uid_t val; +} vfsuid_t; + +typedef struct { + gid_t val; +} vfsgid_t; + +static_assert(sizeof(vfsuid_t) == sizeof(kuid_t)); +static_assert(sizeof(vfsgid_t) == sizeof(kgid_t)); +static_assert(offsetof(vfsuid_t, val) == offsetof(kuid_t, val)); +static_assert(offsetof(vfsgid_t, val) == offsetof(kgid_t, val)); + +#ifdef CONFIG_MULTIUSER +static inline uid_t __vfsuid_val(vfsuid_t uid) +{ + return uid.val; +} + +static inline gid_t __vfsgid_val(vfsgid_t gid) +{ + return gid.val; +} +#else +static inline uid_t __vfsuid_val(vfsuid_t uid) +{ + return 0; +} + +static inline gid_t __vfsgid_val(vfsgid_t gid) +{ + return 0; +} +#endif + +static inline bool vfsuid_valid(vfsuid_t uid) +{ + return __vfsuid_val(uid) != (uid_t)-1; +} + +static inline bool vfsgid_valid(vfsgid_t gid) +{ + return __vfsgid_val(gid) != (gid_t)-1; +} + +static inline bool vfsuid_eq(vfsuid_t left, vfsuid_t right) +{ + return vfsuid_valid(left) && __vfsuid_val(left) == __vfsuid_val(right); +} + +static inline bool vfsgid_eq(vfsgid_t left, vfsgid_t right) +{ + return vfsgid_valid(left) && __vfsgid_val(left) == __vfsgid_val(right); +} + +/** + * vfsuid_eq_kuid - check whether kuid and vfsuid have the same value + * @vfsuid: the vfsuid to compare + * @kuid: the kuid to compare + * + * Check whether @vfsuid and @kuid have the same values. + * + * Return: true if @vfsuid and @kuid have the same value, false if not. + * Comparison between two invalid uids returns false. + */ +static inline bool vfsuid_eq_kuid(vfsuid_t vfsuid, kuid_t kuid) +{ + return vfsuid_valid(vfsuid) && __vfsuid_val(vfsuid) == __kuid_val(kuid); +} + +/** + * vfsgid_eq_kgid - check whether kgid and vfsgid have the same value + * @vfsgid: the vfsgid to compare + * @kgid: the kgid to compare + * + * Check whether @vfsgid and @kgid have the same values. + * + * Return: true if @vfsgid and @kgid have the same value, false if not. + * Comparison between two invalid gids returns false. + */ +static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid) +{ + return vfsgid_valid(vfsgid) && __vfsgid_val(vfsgid) == __kgid_val(kgid); +} + +/* + * vfs{g,u}ids are created from k{g,u}ids. + * We don't allow them to be created from regular {u,g}id. + */ +#define VFSUIDT_INIT(val) (vfsuid_t){ __kuid_val(val) } +#define VFSGIDT_INIT(val) (vfsgid_t){ __kgid_val(val) } + +#define INVALID_VFSUID VFSUIDT_INIT(INVALID_UID) +#define INVALID_VFSGID VFSGIDT_INIT(INVALID_GID) + +/* + * Allow a vfs{g,u}id to be used as a k{g,u}id where we want to compare + * whether the mapped value is identical to value of a k{g,u}id. + */ +#define AS_KUIDT(val) (kuid_t){ __vfsuid_val(val) } +#define AS_KGIDT(val) (kgid_t){ __vfsgid_val(val) } + +#ifdef CONFIG_MULTIUSER +/** + * vfsgid_in_group_p() - check whether a vfsuid matches the caller's groups + * @vfsgid: the mnt gid to match + * + * This function can be used to determine whether @vfsuid matches any of the + * caller's groups. + * + * Return: 1 if vfsuid matches caller's groups, 0 if not. + */ +static inline int vfsgid_in_group_p(vfsgid_t vfsgid) +{ + return in_group_p(AS_KGIDT(vfsgid)); +} +#else +static inline int vfsgid_in_group_p(vfsgid_t vfsgid) +{ + return 1; +} +#endif + /** * initial_idmapping - check whether this is the initial mapping * @ns: idmapping to check @@ -48,7 +171,7 @@ static inline bool no_idmapping(const struct user_namespace *mnt_userns, } /** - * mapped_kuid_fs - map a filesystem kuid into a mnt_userns + * make_vfsuid - map a filesystem kuid into a mnt_userns * @mnt_userns: the mount's idmapping * @fs_userns: the filesystem's idmapping * @kuid : kuid to be mapped @@ -67,25 +190,33 @@ static inline bool no_idmapping(const struct user_namespace *mnt_userns, * If @kuid has no mapping in either @mnt_userns or @fs_userns INVALID_UID is * returned. */ -static inline kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns, - struct user_namespace *fs_userns, - kuid_t kuid) + +static inline vfsuid_t make_vfsuid(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + kuid_t kuid) { uid_t uid; if (no_idmapping(mnt_userns, fs_userns)) - return kuid; + return VFSUIDT_INIT(kuid); if (initial_idmapping(fs_userns)) uid = __kuid_val(kuid); else uid = from_kuid(fs_userns, kuid); if (uid == (uid_t)-1) - return INVALID_UID; - return make_kuid(mnt_userns, uid); + return INVALID_VFSUID; + return VFSUIDT_INIT(make_kuid(mnt_userns, uid)); +} + +static inline kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + kuid_t kuid) +{ + return AS_KUIDT(make_vfsuid(mnt_userns, fs_userns, kuid)); } /** - * mapped_kgid_fs - map a filesystem kgid into a mnt_userns + * make_vfsgid - map a filesystem kgid into a mnt_userns * @mnt_userns: the mount's idmapping * @fs_userns: the filesystem's idmapping * @kgid : kgid to be mapped @@ -104,21 +235,56 @@ static inline kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns, * If @kgid has no mapping in either @mnt_userns or @fs_userns INVALID_GID is * returned. */ -static inline kgid_t mapped_kgid_fs(struct user_namespace *mnt_userns, - struct user_namespace *fs_userns, - kgid_t kgid) + +static inline vfsgid_t make_vfsgid(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + kgid_t kgid) { gid_t gid; if (no_idmapping(mnt_userns, fs_userns)) - return kgid; + return VFSGIDT_INIT(kgid); if (initial_idmapping(fs_userns)) gid = __kgid_val(kgid); else gid = from_kgid(fs_userns, kgid); if (gid == (gid_t)-1) - return INVALID_GID; - return make_kgid(mnt_userns, gid); + return INVALID_VFSGID; + return VFSGIDT_INIT(make_kgid(mnt_userns, gid)); +} + +static inline kgid_t mapped_kgid_fs(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + kgid_t kgid) +{ + return AS_KGIDT(make_vfsgid(mnt_userns, fs_userns, kgid)); +} + +/** + * from_vfsuid - map a vfsuid into the filesystem idmapping + * @mnt_userns: the mount's idmapping + * @fs_userns: the filesystem's idmapping + * @vfsuid : vfsuid to be mapped + * + * Map @vfsuid into the filesystem idmapping. This function has to be used in + * order to e.g. write @vfsuid to inode->i_uid. + * + * Return: @vfsuid mapped into the filesystem idmapping + */ +static inline kuid_t from_vfsuid(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + vfsuid_t vfsuid) +{ + uid_t uid; + + if (no_idmapping(mnt_userns, fs_userns)) + return AS_KUIDT(vfsuid); + uid = from_kuid(mnt_userns, AS_KUIDT(vfsuid)); + if (uid == (uid_t)-1) + return INVALID_UID; + if (initial_idmapping(fs_userns)) + return KUIDT_INIT(uid); + return make_kuid(fs_userns, uid); } /** @@ -145,16 +311,66 @@ static inline kuid_t mapped_kuid_user(struct user_namespace *mnt_userns, struct user_namespace *fs_userns, kuid_t kuid) { - uid_t uid; + return from_vfsuid(mnt_userns, fs_userns, VFSUIDT_INIT(kuid)); +} + +/** + * vfsuid_has_fsmapping - check whether a vfsuid maps into the filesystem + * @mnt_userns: the mount's idmapping + * @fs_userns: the filesystem's idmapping + * @vfsuid: vfsuid to be mapped + * + * Check whether @vfsuid has a mapping in the filesystem idmapping. Use this + * function to check whether the filesystem idmapping has a mapping for + * @vfsuid. + * + * Return: true if @vfsuid has a mapping in the filesystem, false if not. + */ +static inline bool vfsuid_has_fsmapping(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + vfsuid_t vfsuid) +{ + return uid_valid(from_vfsuid(mnt_userns, fs_userns, vfsuid)); +} + +/** + * vfsuid_into_kuid - convert vfsuid into kuid + * @vfsuid: the vfsuid to convert + * + * This can be used when a vfsuid is committed as a kuid. + * + * Return: a kuid with the value of @vfsuid + */ +static inline kuid_t vfsuid_into_kuid(vfsuid_t vfsuid) +{ + return AS_KUIDT(vfsuid); +} + +/** + * from_vfsgid - map a vfsgid into the filesystem idmapping + * @mnt_userns: the mount's idmapping + * @fs_userns: the filesystem's idmapping + * @vfsgid : vfsgid to be mapped + * + * Map @vfsgid into the filesystem idmapping. This function has to be used in + * order to e.g. write @vfsgid to inode->i_gid. + * + * Return: @vfsgid mapped into the filesystem idmapping + */ +static inline kgid_t from_vfsgid(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + vfsgid_t vfsgid) +{ + gid_t gid; if (no_idmapping(mnt_userns, fs_userns)) - return kuid; - uid = from_kuid(mnt_userns, kuid); - if (uid == (uid_t)-1) - return INVALID_UID; + return AS_KGIDT(vfsgid); + gid = from_kgid(mnt_userns, AS_KGIDT(vfsgid)); + if (gid == (gid_t)-1) + return INVALID_GID; if (initial_idmapping(fs_userns)) - return KUIDT_INIT(uid); - return make_kuid(fs_userns, uid); + return KGIDT_INIT(gid); + return make_kgid(fs_userns, gid); } /** @@ -181,16 +397,39 @@ static inline kgid_t mapped_kgid_user(struct user_namespace *mnt_userns, struct user_namespace *fs_userns, kgid_t kgid) { - gid_t gid; + return from_vfsgid(mnt_userns, fs_userns, VFSGIDT_INIT(kgid)); +} - if (no_idmapping(mnt_userns, fs_userns)) - return kgid; - gid = from_kgid(mnt_userns, kgid); - if (gid == (gid_t)-1) - return INVALID_GID; - if (initial_idmapping(fs_userns)) - return KGIDT_INIT(gid); - return make_kgid(fs_userns, gid); +/** + * vfsgid_has_fsmapping - check whether a vfsgid maps into the filesystem + * @mnt_userns: the mount's idmapping + * @fs_userns: the filesystem's idmapping + * @vfsgid: vfsgid to be mapped + * + * Check whether @vfsgid has a mapping in the filesystem idmapping. Use this + * function to check whether the filesystem idmapping has a mapping for + * @vfsgid. + * + * Return: true if @vfsgid has a mapping in the filesystem, false if not. + */ +static inline bool vfsgid_has_fsmapping(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + vfsgid_t vfsgid) +{ + return gid_valid(from_vfsgid(mnt_userns, fs_userns, vfsgid)); +} + +/** + * vfsgid_into_kgid - convert vfsgid into kgid + * @vfsgid: the vfsgid to convert + * + * This can be used when a vfsgid is committed as a kgid. + * + * Return: a kgid with the value of @vfsgid + */ +static inline kgid_t vfsgid_into_kgid(vfsgid_t vfsgid) +{ + return AS_KGIDT(vfsgid); } /** @@ -209,7 +448,8 @@ static inline kgid_t mapped_kgid_user(struct user_namespace *mnt_userns, static inline kuid_t mapped_fsuid(struct user_namespace *mnt_userns, struct user_namespace *fs_userns) { - return mapped_kuid_user(mnt_userns, fs_userns, current_fsuid()); + return from_vfsuid(mnt_userns, fs_userns, + VFSUIDT_INIT(current_fsuid())); } /** @@ -228,7 +468,8 @@ static inline kuid_t mapped_fsuid(struct user_namespace *mnt_userns, static inline kgid_t mapped_fsgid(struct user_namespace *mnt_userns, struct user_namespace *fs_userns) { - return mapped_kgid_user(mnt_userns, fs_userns, current_fsgid()); + return from_vfsgid(mnt_userns, fs_userns, + VFSGIDT_INIT(current_fsgid())); } #endif /* _LINUX_MNT_IDMAPPING_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f615a66c89e9..2563d30736e9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1671,7 +1671,7 @@ enum netdev_priv_flags { IFF_FAILOVER_SLAVE = 1<<28, IFF_L3MDEV_RX_HANDLER = 1<<29, IFF_LIVE_RENAME_OK = 1<<30, - IFF_TX_SKB_NO_LINEAR = 1<<31, + IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), IFF_CHANGE_PROTO_DOWN = BIT_ULL(32), }; diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 1773e5df8e65..1b18dfa52e48 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -214,7 +214,7 @@ struct netfs_request_ops { void (*issue_read)(struct netfs_io_subrequest *subreq); bool (*is_still_valid)(struct netfs_io_request *rreq); int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, - struct folio *folio, void **_fsdata); + struct folio **foliop, void **_fsdata); void (*done)(struct netfs_io_request *rreq); }; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index e3934003f239..07cfc922f8e4 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -906,12 +906,14 @@ struct nvme_common_command { __le32 cdw2[2]; __le64 metadata; union nvme_data_ptr dptr; + struct_group(cdws, __le32 cdw10; __le32 cdw11; __le32 cdw12; __le32 cdw13; __le32 cdw14; __le32 cdw15; + ); }; struct nvme_rw_command { diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 15b940ec1eac..10bc88cc3bf6 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -32,11 +32,16 @@ struct unwind_hint { * * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function. * Useful for code which doesn't have an ELF function annotation. + * + * UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc. */ #define UNWIND_HINT_TYPE_CALL 0 #define UNWIND_HINT_TYPE_REGS 1 #define UNWIND_HINT_TYPE_REGS_PARTIAL 2 #define UNWIND_HINT_TYPE_FUNC 3 +#define UNWIND_HINT_TYPE_ENTRY 4 +#define UNWIND_HINT_TYPE_SAVE 5 +#define UNWIND_HINT_TYPE_RESTORE 6 #ifdef CONFIG_OBJTOOL @@ -124,7 +129,7 @@ struct unwind_hint { * the debuginfo as necessary. It will also warn if it sees any * inconsistencies. */ -.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .Lunwind_hint_ip_\@: .pushsection .discard.unwind_hints /* struct unwind_hint */ @@ -177,7 +182,7 @@ struct unwind_hint { #define ASM_REACHABLE #else #define ANNOTATE_INTRA_FUNCTION_CALL -.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .endm .macro STACK_FRAME_NON_STANDARD func:req .endm diff --git a/include/linux/of.h b/include/linux/of.h index f0a5d6b10c5a..20a4e7cb7afe 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -441,8 +441,6 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline, size_t extra_fdt_size); -int ima_get_kexec_buffer(void **addr, size_t *size); -int ima_free_kexec_buffer(void); #else /* CONFIG_OF */ static inline void of_core_init(void) diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h index 861e606b820f..b7bce4983638 100644 --- a/include/linux/once_lite.h +++ b/include/linux/once_lite.h @@ -9,15 +9,27 @@ */ #define DO_ONCE_LITE(func, ...) \ DO_ONCE_LITE_IF(true, func, ##__VA_ARGS__) -#define DO_ONCE_LITE_IF(condition, func, ...) \ + +#define __ONCE_LITE_IF(condition) \ ({ \ static bool __section(".data.once") __already_done; \ - bool __ret_do_once = !!(condition); \ + bool __ret_cond = !!(condition); \ + bool __ret_once = false; \ \ - if (unlikely(__ret_do_once && !__already_done)) { \ + if (unlikely(__ret_cond && !__already_done)) { \ __already_done = true; \ - func(__VA_ARGS__); \ + __ret_once = true; \ } \ + unlikely(__ret_once); \ + }) + +#define DO_ONCE_LITE_IF(condition, func, ...) \ + ({ \ + bool __ret_do_once = !!(condition); \ + \ + if (__ONCE_LITE_IF(__ret_do_once)) \ + func(__VA_ARGS__); \ + \ unlikely(__ret_do_once); \ }) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0178823ce8c2..7fa460ccf7fa 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -556,10 +556,13 @@ #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 +#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3 0x1727 #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F3 0x14b0 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d +#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3 +#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h index 46f9b6fe306e..bf66fe011fa8 100644 --- a/include/linux/perf/riscv_pmu.h +++ b/include/linux/perf/riscv_pmu.h @@ -56,9 +56,13 @@ struct riscv_pmu { struct cpu_hw_events __percpu *hw_events; struct hlist_node node; + struct notifier_block riscv_pm_nb; }; #define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu)) + +void riscv_pmu_start(struct perf_event *event, int flags); +void riscv_pmu_stop(struct perf_event *event, int flags); unsigned long riscv_pmu_ctr_read_csr(unsigned long csr); int riscv_pmu_event_set_period(struct perf_event *event); uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index da759560eec5..ee8b9ecdc03b 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -759,6 +759,8 @@ struct perf_event { struct pid_namespace *ns; u64 id; + atomic64_t lost_samples; + u64 (*clock)(void); perf_overflow_handler_t overflow_handler; void *overflow_handler_context; diff --git a/include/linux/phy.h b/include/linux/phy.h index 508f1149665b..b09f7d36cff2 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -572,6 +572,10 @@ struct macsec_ops; * @mdix_ctrl: User setting of crossover * @pma_extable: Cached value of PMA/PMD Extended Abilities Register * @interrupts: Flag interrupts have been enabled + * @irq_suspended: Flag indicating PHY is suspended and therefore interrupt + * handling shall be postponed until PHY has resumed + * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended, + * requiring a rerun of the interrupt handler after resume * @interface: enum phy_interface_t value * @skb: Netlink message for cable diagnostics * @nest: Netlink nest used for cable diagnostics @@ -626,6 +630,8 @@ struct phy_device { /* Interrupts are enabled */ unsigned interrupts:1; + unsigned irq_suspended:1; + unsigned irq_rerun:1; enum phy_state state; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 9e4d056967c6..0a41b2dcccad 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -88,7 +88,7 @@ extern void pm_runtime_get_suppliers(struct device *dev); extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device_link *link); -extern void pm_runtime_release_supplier(struct device_link *link, bool check_idle); +extern void pm_runtime_release_supplier(struct device_link *link); extern int devm_pm_runtime_enable(struct device *dev); @@ -314,8 +314,7 @@ static inline void pm_runtime_get_suppliers(struct device *dev) {} static inline void pm_runtime_put_suppliers(struct device *dev) {} static inline void pm_runtime_new_link(struct device *dev) {} static inline void pm_runtime_drop_link(struct device_link *link) {} -static inline void pm_runtime_release_supplier(struct device_link *link, - bool check_idle) {} +static inline void pm_runtime_release_supplier(struct device_link *link) {} #endif /* !CONFIG_PM */ diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 196a157456aa..77f4849e3418 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -109,7 +109,6 @@ extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws); extern int device_wakeup_enable(struct device *dev); extern int device_wakeup_disable(struct device *dev); extern void device_set_wakeup_capable(struct device *dev, bool capable); -extern int device_init_wakeup(struct device *dev, bool val); extern int device_set_wakeup_enable(struct device *dev, bool enable); extern void __pm_stay_awake(struct wakeup_source *ws); extern void pm_stay_awake(struct device *dev); @@ -167,13 +166,6 @@ static inline int device_set_wakeup_enable(struct device *dev, bool enable) return 0; } -static inline int device_init_wakeup(struct device *dev, bool val) -{ - device_set_wakeup_capable(dev, val); - device_set_wakeup_enable(dev, val); - return 0; -} - static inline bool device_may_wakeup(struct device *dev) { return dev->power.can_wakeup && dev->power.should_wakeup; @@ -217,4 +209,27 @@ static inline void pm_wakeup_hard_event(struct device *dev) return pm_wakeup_dev_event(dev, 0, true); } +/** + * device_init_wakeup - Device wakeup initialization. + * @dev: Device to handle. + * @enable: Whether or not to enable @dev as a wakeup device. + * + * By default, most devices should leave wakeup disabled. The exceptions are + * devices that everyone expects to be wakeup sources: keyboards, power buttons, + * possibly network interfaces, etc. Also, devices that don't generate their + * own wakeup requests but merely forward requests from one bus to another + * (like PCI bridges) should have wakeup enabled by default. + */ +static inline int device_init_wakeup(struct device *dev, bool enable) +{ + if (enable) { + device_set_wakeup_capable(dev, true); + return device_wakeup_enable(dev); + } else { + device_wakeup_disable(dev); + device_set_wakeup_capable(dev, false); + return 0; + } +} + #endif /* _LINUX_PM_WAKEUP_H */ diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index b65c877d92b8..7d1e604c1325 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -73,6 +73,7 @@ extern int set_posix_acl(struct user_namespace *, struct inode *, int, struct posix_acl *); struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); +struct posix_acl *posix_acl_clone(const struct posix_acl *acl, gfp_t flags); #ifdef CONFIG_FS_POSIX_ACL int posix_acl_chmod(struct user_namespace *, struct inode *, umode_t); diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h index 1766e1de6956..b6bd3eac2bcc 100644 --- a/include/linux/posix_acl_xattr.h +++ b/include/linux/posix_acl_xattr.h @@ -33,21 +33,31 @@ posix_acl_xattr_count(size_t size) } #ifdef CONFIG_FS_POSIX_ACL -void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, - struct inode *inode, - void *value, size_t size); -void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, - struct inode *inode, - void *value, size_t size); +void posix_acl_fix_xattr_from_user(void *value, size_t size); +void posix_acl_fix_xattr_to_user(void *value, size_t size); +void posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns, + const struct inode *inode, + void *value, size_t size); +void posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns, + const struct inode *inode, + void *value, size_t size); #else -static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, - struct inode *inode, - void *value, size_t size) +static inline void posix_acl_fix_xattr_from_user(void *value, size_t size) { } -static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, - struct inode *inode, - void *value, size_t size) +static inline void posix_acl_fix_xattr_to_user(void *value, size_t size) +{ +} +static inline void +posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns, + const struct inode *inode, void *value, + size_t size) +{ +} +static inline void +posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns, + const struct inode *inode, void *value, + size_t size) { } #endif diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 9771a0761a40..9429930c5566 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -6,9 +6,6 @@ #include <linux/mutex.h> #include <linux/of.h> -struct pwm_capture; -struct seq_file; - struct pwm_chip; /** @@ -252,6 +249,16 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, } /** + * struct pwm_capture - PWM capture data + * @period: period of the PWM signal (in nanoseconds) + * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) + */ +struct pwm_capture { + unsigned int period; + unsigned int duty_cycle; +}; + +/** * struct pwm_ops - PWM controller operations * @request: optional hook for requesting a PWM * @free: optional hook for freeing a PWM @@ -261,10 +268,6 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, * called once per PWM device when the PWM chip is * registered. * @owner: helps prevent removal of modules exporting active PWMs - * @config: configure duty cycles and period length for this PWM - * @set_polarity: configure the polarity of this PWM - * @enable: enable PWM output toggling - * @disable: disable PWM output toggling */ struct pwm_ops { int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); @@ -276,14 +279,6 @@ struct pwm_ops { void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state); struct module *owner; - - /* Only used by legacy drivers */ - int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns); - int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, - enum pwm_polarity polarity); - int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); - void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); }; /** @@ -312,16 +307,6 @@ struct pwm_chip { struct pwm_device *pwms; }; -/** - * struct pwm_capture - PWM capture data - * @period: period of the PWM signal (in nanoseconds) - * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) - */ -struct pwm_capture { - unsigned int period; - unsigned int duty_cycle; -}; - #if IS_ENABLED(CONFIG_PWM) /* PWM user APIs */ struct pwm_device *pwm_request(int pwm_id, const char *label); diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index a0f6668924d3..0d8625d71733 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -20,11 +20,12 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb) } /* i_mutex must being held */ -static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) +static inline bool is_quota_modification(struct user_namespace *mnt_userns, + struct inode *inode, struct iattr *ia) { - return (ia->ia_valid & ATTR_SIZE) || - (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) || - (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid)); + return ((ia->ia_valid & ATTR_SIZE) || + i_uid_needs_update(mnt_userns, ia, inode) || + i_gid_needs_update(mnt_userns, ia, inode)); } #if defined(CONFIG_QUOTA) @@ -115,7 +116,8 @@ int dquot_set_dqblk(struct super_block *sb, struct kqid id, struct qc_dqblk *di); int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); -int dquot_transfer(struct inode *inode, struct iattr *iattr); +int dquot_transfer(struct user_namespace *mnt_userns, struct inode *inode, + struct iattr *iattr); static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type) { @@ -234,7 +236,8 @@ static inline void dquot_free_inode(struct inode *inode) { } -static inline int dquot_transfer(struct inode *inode, struct iattr *iattr) +static inline int dquot_transfer(struct user_namespace *mnt_userns, + struct inode *inode, struct iattr *iattr) { return 0; } diff --git a/include/linux/refcount.h b/include/linux/refcount.h index b8a6e387f8f9..a62fcca97486 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -361,9 +361,9 @@ static inline void refcount_dec(refcount_t *r) extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); -extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); -extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); +extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock); +extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock); extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, - unsigned long *flags); + unsigned long *flags) __cond_acquires(lock); #endif /* _LINUX_REFCOUNT_H */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 8952fa3d0d59..7cf2157134ac 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1336,6 +1336,22 @@ static inline int regmap_field_update_bits(struct regmap_field *field, NULL, false, false); } +static inline int regmap_field_set_bits(struct regmap_field *field, + unsigned int bits) +{ + return regmap_field_update_bits_base(field, bits, bits, NULL, false, + false); +} + +static inline int regmap_field_clear_bits(struct regmap_field *field, + unsigned int bits) +{ + return regmap_field_update_bits_base(field, bits, 0, NULL, false, + false); +} + +int regmap_field_test_bits(struct regmap_field *field, unsigned int bits); + static inline int regmap_field_force_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val) @@ -1424,6 +1440,8 @@ struct regmap_irq_sub_irq_map { unsigned int *offset; }; +struct regmap_irq_chip_data; + /** * struct regmap_irq_chip - Description of a generic regmap irq_chip. * @@ -1451,32 +1469,50 @@ struct regmap_irq_sub_irq_map { * main_status set. * * @status_base: Base status register address. - * @mask_base: Base mask register address. - * @mask_writeonly: Base mask register is write only. - * @unmask_base: Base unmask register address. for chips who have - * separate mask and unmask registers + * @mask_base: Base mask register address. Mask bits are set to 1 when an + * interrupt is masked, 0 when unmasked. + * @unmask_base: Base unmask register address. Unmask bits are set to 1 when + * an interrupt is unmasked and 0 when masked. * @ack_base: Base ack address. If zero then the chip is clear on read. * Using zero value is possible with @use_ack bit. * @wake_base: Base address for wake enables. If zero unsupported. - * @type_base: Base address for irq type. If zero unsupported. - * @virt_reg_base: Base addresses for extra config regs. + * @type_base: Base address for irq type. If zero unsupported. Deprecated, + * use @config_base instead. + * @virt_reg_base: Base addresses for extra config regs. Deprecated, use + * @config_base instead. + * @config_base: Base address for IRQ type config regs. If null unsupported. * @irq_reg_stride: Stride to use for chips where registers are not contiguous. * @init_ack_masked: Ack all masked interrupts once during initalization. * @mask_invert: Inverted mask register: cleared bits are masked out. + * Deprecated; prefer describing an inverted mask register as + * an unmask register. + * @mask_unmask_non_inverted: Controls mask bit inversion for chips that set + * both @mask_base and @unmask_base. If false, mask and unmask bits are + * inverted (which is deprecated behavior); if true, bits will not be + * inverted and the registers keep their normal behavior. Note that if + * you use only one of @mask_base or @unmask_base, this flag has no + * effect and is unnecessary. Any new drivers that set both @mask_base + * and @unmask_base should set this to true to avoid relying on the + * deprecated behavior. * @use_ack: Use @ack register even if it is zero. * @ack_invert: Inverted ack register: cleared bits for ack. * @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts. * @wake_invert: Inverted wake register: cleared bits are wake enabled. - * @type_invert: Invert the type flags. - * @type_in_mask: Use the mask registers for controlling irq type. For - * interrupts defining type_rising/falling_mask use mask_base - * for edge configuration and never update bits in type_base. + * @type_invert: Invert the type flags. Deprecated, use config registers + * instead. + * @type_in_mask: Use the mask registers for controlling irq type. Use this if + * the hardware provides separate bits for rising/falling edge + * or low/high level interrupts and they should be combined into + * a single logical interrupt. Use &struct regmap_irq_type data + * to define the mask bit for each irq type. * @clear_on_unmask: For chips with interrupts cleared on read: read the status * registers before unmasking interrupts to clear any bits * set when they were masked. * @not_fixed_stride: Used when chip peripherals are not laid out with fixed - * stride. Must be used with sub_reg_offsets containing the - * offsets to each peripheral. + * stride. Must be used with sub_reg_offsets containing the + * offsets to each peripheral. Deprecated; the same thing + * can be accomplished with a @get_irq_reg callback, without + * the need for a @sub_reg_offsets table. * @status_invert: Inverted status register: cleared bits are active interrupts. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * @@ -1484,17 +1520,28 @@ struct regmap_irq_sub_irq_map { * @irqs: Descriptors for individual IRQs. Interrupt numbers are * assigned based on the index in the array of the interrupt. * @num_irqs: Number of descriptors. - * @num_type_reg: Number of type registers. + * @num_type_reg: Number of type registers. Deprecated, use config registers + * instead. * @num_virt_regs: Number of non-standard irq configuration registers. - * If zero unsupported. - * @type_reg_stride: Stride to use for chips where type registers are not - * contiguous. + * If zero unsupported. Deprecated, use config registers + * instead. + * @num_config_bases: Number of config base registers. + * @num_config_regs: Number of config registers for each config base register. * @handle_pre_irq: Driver specific callback to handle interrupt from device * before regmap_irq_handler process the interrupts. * @handle_post_irq: Driver specific callback to handle interrupt from device * after handling the interrupts in regmap_irq_handler(). * @set_type_virt: Driver specific callback to extend regmap_irq_set_type() - * and configure virt regs. + * and configure virt regs. Deprecated, use @set_type_config + * callback and config registers instead. + * @set_type_config: Callback used for configuring irq types. + * @get_irq_reg: Callback for mapping (base register, index) pairs to register + * addresses. The base register will be one of @status_base, + * @mask_base, etc., @main_status, or any of @config_base. + * The index will be in the range [0, num_main_regs[ for the + * main status base, [0, num_type_settings[ for any config + * register base, and [0, num_regs[ for any other base. + * If unspecified then regmap_irq_get_irq_reg_linear() is used. * @irq_drv_data: Driver specific IRQ data which is passed as parameter when * driver specific pre/post interrupt handler is called. * @@ -1517,20 +1564,21 @@ struct regmap_irq_chip { unsigned int wake_base; unsigned int type_base; unsigned int *virt_reg_base; + const unsigned int *config_base; unsigned int irq_reg_stride; - bool mask_writeonly:1; - bool init_ack_masked:1; - bool mask_invert:1; - bool use_ack:1; - bool ack_invert:1; - bool clear_ack:1; - bool wake_invert:1; - bool runtime_pm:1; - bool type_invert:1; - bool type_in_mask:1; - bool clear_on_unmask:1; - bool not_fixed_stride:1; - bool status_invert:1; + unsigned int init_ack_masked:1; + unsigned int mask_invert:1; + unsigned int mask_unmask_non_inverted:1; + unsigned int use_ack:1; + unsigned int ack_invert:1; + unsigned int clear_ack:1; + unsigned int wake_invert:1; + unsigned int runtime_pm:1; + unsigned int type_invert:1; + unsigned int type_in_mask:1; + unsigned int clear_on_unmask:1; + unsigned int not_fixed_stride:1; + unsigned int status_invert:1; int num_regs; @@ -1539,16 +1587,24 @@ struct regmap_irq_chip { int num_type_reg; int num_virt_regs; - unsigned int type_reg_stride; + int num_config_bases; + int num_config_regs; int (*handle_pre_irq)(void *irq_drv_data); int (*handle_post_irq)(void *irq_drv_data); int (*set_type_virt)(unsigned int **buf, unsigned int type, unsigned long hwirq, int reg); + int (*set_type_config)(unsigned int **buf, unsigned int type, + const struct regmap_irq *irq_data, int idx); + unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data, + unsigned int base, int index); void *irq_drv_data; }; -struct regmap_irq_chip_data; +unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data, + unsigned int base, int index); +int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type, + const struct regmap_irq *irq_data, int idx); int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, @@ -1769,6 +1825,27 @@ regmap_field_force_update_bits(struct regmap_field *field, return -EINVAL; } +static inline int regmap_field_set_bits(struct regmap_field *field, + unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_clear_bits(struct regmap_field *field, + unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_test_bits(struct regmap_field *field, + unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_fields_write(struct regmap_field *field, unsigned int id, unsigned int val) { diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index bbf6590a6dec..bc6cda706d1f 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -171,10 +171,13 @@ struct regulator; /** * struct regulator_bulk_data - Data used for bulk regulator operations. * - * @supply: The name of the supply. Initialised by the user before - * using the bulk regulator APIs. - * @consumer: The regulator consumer for the supply. This will be managed - * by the bulk API. + * @supply: The name of the supply. Initialised by the user before + * using the bulk regulator APIs. + * @init_load_uA: After getting the regulator, regulator_set_load() will be + * called with this load. Initialised by the user before + * using the bulk regulator APIs. + * @consumer: The regulator consumer for the supply. This will be managed + * by the bulk API. * * The regulator APIs provide a series of regulator_bulk_() API calls as * a convenience to consumers which require multiple supplies. This @@ -182,6 +185,7 @@ struct regulator; */ struct regulator_bulk_data { const char *supply; + int init_load_uA; struct regulator *consumer; /* private: Internal use */ @@ -240,6 +244,10 @@ int __must_check regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers); int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers); +int __must_check devm_regulator_bulk_get_const( + struct device *dev, int num_consumers, + const struct regulator_bulk_data *in_consumers, + struct regulator_bulk_data **out_consumers); int __must_check regulator_bulk_enable(int num_consumers, struct regulator_bulk_data *consumers); int regulator_bulk_disable(int num_consumers, diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 0228caaa6741..f9a7461e72b8 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -348,6 +348,7 @@ enum regulator_type { * @ramp_delay_table: Table for mapping the regulator ramp-rate values. Values * should be given in units of V/S (uV/uS). See the * regulator_set_ramp_delay_regmap(). + * @n_ramp_values: number of elements at @ramp_delay_table. * * @enable_time: Time taken for initial enable of regulator (in uS). * @off_on_delay: guard time (in uS), before re-enabling a regulator diff --git a/include/linux/reset.h b/include/linux/reset.h index 8a21b5756c3e..514ddf003efc 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -731,7 +731,7 @@ static inline int __must_check devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, true); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true); } /** diff --git a/include/linux/rtsx_usb.h b/include/linux/rtsx_usb.h index 159729cffd8e..3247ed8e9ff0 100644 --- a/include/linux/rtsx_usb.h +++ b/include/linux/rtsx_usb.h @@ -54,8 +54,6 @@ struct rtsx_ucr { struct usb_device *pusb_dev; struct usb_interface *pusb_intf; struct usb_sg_request current_sg; - unsigned char *iobuf; - dma_addr_t iobuf_dma; struct timer_list sg_timer; struct mutex dev_mutex; diff --git a/include/linux/sched.h b/include/linux/sched.h index c46f3a63b758..88b8817b827d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2257,7 +2257,7 @@ static inline bool owner_on_cpu(struct task_struct *owner) } /* Returns effective CPU energy utilization, as seen by the scheduler */ -unsigned long sched_cpu_util(int cpu, unsigned long max); +unsigned long sched_cpu_util(int cpu); #endif /* CONFIG_SMP */ #ifdef CONFIG_RSEQ diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index e5af028c08b4..994c25640e15 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -39,20 +39,12 @@ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p) } extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task); extern void rt_mutex_adjust_pi(struct task_struct *p); -static inline bool tsk_is_pi_blocked(struct task_struct *tsk) -{ - return tsk->pi_blocked_on != NULL; -} #else static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) { return NULL; } # define rt_mutex_adjust_pi(p) do { } while (0) -static inline bool tsk_is_pi_blocked(struct task_struct *tsk) -{ - return false; -} #endif extern void normalize_rt_tasks(void); diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 505aaf9fe477..81cab4b01edc 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -85,7 +85,7 @@ static inline void exit_thread(struct task_struct *tsk) extern __noreturn void do_group_exit(int); extern void exit_files(struct task_struct *); -extern void exit_itimers(struct signal_struct *); +extern void exit_itimers(struct task_struct *); extern pid_t kernel_clone(struct kernel_clone_args *kargs); struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 56cffe42abbc..816df6cc444e 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -81,6 +81,7 @@ struct sched_domain_shared { atomic_t ref; atomic_t nr_busy_cpus; int has_idle_cores; + int nr_idle_scan; }; struct sched_domain { diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 704111f63993..a193884ecf2b 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -60,6 +60,12 @@ struct scmi_clock_info { }; }; +enum scmi_power_scale { + SCMI_POWER_BOGOWATTS, + SCMI_POWER_MILLIWATTS, + SCMI_POWER_MICROWATTS +}; + struct scmi_handle; struct scmi_device; struct scmi_protocol_handle; @@ -135,7 +141,7 @@ struct scmi_perf_proto_ops { unsigned long *rate, unsigned long *power); bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph, struct device *dev); - bool (*power_scale_mw_get)(const struct scmi_protocol_handle *ph); + enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *ph); }; /** @@ -561,6 +567,116 @@ struct scmi_voltage_proto_ops { }; /** + * struct scmi_powercap_info - Describe one available Powercap domain + * + * @id: Domain ID as advertised by the platform. + * @notify_powercap_cap_change: CAP change notification support. + * @notify_powercap_measurement_change: MEASUREMENTS change notifications + * support. + * @async_powercap_cap_set: Asynchronous CAP set support. + * @powercap_cap_config: CAP configuration support. + * @powercap_monitoring: Monitoring (measurements) support. + * @powercap_pai_config: PAI configuration support. + * @powercap_scale_mw: Domain reports power data in milliwatt units. + * @powercap_scale_uw: Domain reports power data in microwatt units. + * Note that, when both @powercap_scale_mw and + * @powercap_scale_uw are set to false, the domain + * reports power data on an abstract linear scale. + * @name: name assigned to the Powercap Domain by platform. + * @min_pai: Minimum configurable PAI. + * @max_pai: Maximum configurable PAI. + * @pai_step: Step size between two consecutive PAI values. + * @min_power_cap: Minimum configurable CAP. + * @max_power_cap: Maximum configurable CAP. + * @power_cap_step: Step size between two consecutive CAP values. + * @sustainable_power: Maximum sustainable power consumption for this domain + * under normal conditions. + * @accuracy: The accuracy with which the power is measured and reported in + * integral multiples of 0.001 percent. + * @parent_id: Identifier of the containing parent power capping domain, or the + * value 0xFFFFFFFF if this powercap domain is a root domain not + * contained in any other domain. + */ +struct scmi_powercap_info { + unsigned int id; + bool notify_powercap_cap_change; + bool notify_powercap_measurement_change; + bool async_powercap_cap_set; + bool powercap_cap_config; + bool powercap_monitoring; + bool powercap_pai_config; + bool powercap_scale_mw; + bool powercap_scale_uw; + bool fastchannels; + char name[SCMI_MAX_STR_SIZE]; + unsigned int min_pai; + unsigned int max_pai; + unsigned int pai_step; + unsigned int min_power_cap; + unsigned int max_power_cap; + unsigned int power_cap_step; + unsigned int sustainable_power; + unsigned int accuracy; +#define SCMI_POWERCAP_ROOT_ZONE_ID 0xFFFFFFFFUL + unsigned int parent_id; + struct scmi_fc_info *fc_info; +}; + +/** + * struct scmi_powercap_proto_ops - represents the various operations provided + * by SCMI Powercap Protocol + * + * @num_domains_get: get the count of powercap domains provided by SCMI. + * @info_get: get the information for the specified domain. + * @cap_get: get the current CAP value for the specified domain. + * @cap_set: set the CAP value for the specified domain to the provided value; + * if the domain supports setting the CAP with an asynchronous command + * this request will finally trigger an asynchronous transfer, but, if + * @ignore_dresp here is set to true, this call will anyway return + * immediately without waiting for the related delayed response. + * @pai_get: get the current PAI value for the specified domain. + * @pai_set: set the PAI value for the specified domain to the provided value. + * @measurements_get: retrieve the current average power measurements for the + * specified domain and the related PAI upon which is + * calculated. + * @measurements_threshold_set: set the desired low and high power thresholds + * to be used when registering for notification + * of type POWERCAP_MEASUREMENTS_NOTIFY with this + * powercap domain. + * Note that this must be called at least once + * before registering any callback with the usual + * @scmi_notify_ops; moreover, in case this method + * is called with measurement notifications already + * enabled it will also trigger, transparently, a + * proper update of the power thresholds configured + * in the SCMI backend server. + * @measurements_threshold_get: get the currently configured low and high power + * thresholds used when registering callbacks for + * notification POWERCAP_MEASUREMENTS_NOTIFY. + */ +struct scmi_powercap_proto_ops { + int (*num_domains_get)(const struct scmi_protocol_handle *ph); + const struct scmi_powercap_info __must_check *(*info_get) + (const struct scmi_protocol_handle *ph, u32 domain_id); + int (*cap_get)(const struct scmi_protocol_handle *ph, u32 domain_id, + u32 *power_cap); + int (*cap_set)(const struct scmi_protocol_handle *ph, u32 domain_id, + u32 power_cap, bool ignore_dresp); + int (*pai_get)(const struct scmi_protocol_handle *ph, u32 domain_id, + u32 *pai); + int (*pai_set)(const struct scmi_protocol_handle *ph, u32 domain_id, + u32 pai); + int (*measurements_get)(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *average_power, u32 *pai); + int (*measurements_threshold_set)(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_thresh_low, + u32 power_thresh_high); + int (*measurements_threshold_get)(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_thresh_low, + u32 *power_thresh_high); +}; + +/** * struct scmi_notify_ops - represents notifications' operations provided by * SCMI core * @devm_event_notifier_register: Managed registration of a notifier_block for @@ -624,6 +740,9 @@ struct scmi_notify_ops { * * @dev: pointer to the SCMI device * @version: pointer to the structure containing SCMI version information + * @devm_protocol_acquire: devres managed method to get hold of a protocol, + * causing its initialization and related resource + * accounting * @devm_protocol_get: devres managed method to acquire a protocol and get specific * operations and a dedicated protocol handler * @devm_protocol_put: devres managed method to release a protocol @@ -642,6 +761,8 @@ struct scmi_handle { struct device *dev; struct scmi_revision_info *version; + int __must_check (*devm_protocol_acquire)(struct scmi_device *sdev, + u8 proto); const void __must_check * (*devm_protocol_get)(struct scmi_device *sdev, u8 proto, struct scmi_protocol_handle **ph); @@ -661,6 +782,7 @@ enum scmi_std_protocol { SCMI_PROTOCOL_SENSOR = 0x15, SCMI_PROTOCOL_RESET = 0x16, SCMI_PROTOCOL_VOLTAGE = 0x17, + SCMI_PROTOCOL_POWERCAP = 0x18, }; enum scmi_system_events { @@ -762,6 +884,8 @@ enum scmi_notification_events { SCMI_EVENT_RESET_ISSUED = 0x0, SCMI_EVENT_BASE_ERROR_EVENT = 0x0, SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER = 0x0, + SCMI_EVENT_POWERCAP_CAP_CHANGED = 0x0, + SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED = 0x1, }; struct scmi_power_state_changed_report { @@ -781,8 +905,10 @@ struct scmi_clock_rate_notif_report { struct scmi_system_power_state_notifier_report { ktime_t timestamp; unsigned int agent_id; +#define SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(flags) ((flags) & BIT(0)) unsigned int flags; unsigned int system_state; + unsigned int timeout; }; struct scmi_perf_limits_report { @@ -830,4 +956,18 @@ struct scmi_base_error_report { unsigned long long reports[]; }; +struct scmi_powercap_cap_changed_report { + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int power_cap; + unsigned int pai; +}; + +struct scmi_powercap_meas_changed_report { + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int power; +}; #endif /* _LINUX_SCMI_PROTOCOL_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 7fc4e9f49f54..4d0baf30266e 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -353,7 +353,8 @@ int security_inode_readlink(struct dentry *dentry); int security_inode_follow_link(struct dentry *dentry, struct inode *inode, bool rcu); int security_inode_permission(struct inode *inode, int mask); -int security_inode_setattr(struct dentry *dentry, struct iattr *attr); +int security_inode_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry, struct iattr *attr); int security_inode_getattr(const struct path *path); int security_inode_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry, const char *name, @@ -848,8 +849,9 @@ static inline int security_inode_permission(struct inode *inode, int mask) return 0; } -static inline int security_inode_setattr(struct dentry *dentry, - struct iattr *attr) +static inline int security_inode_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry, + struct iattr *attr) { return 0; } diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 657a0fc68a3f..fde258b3decd 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -390,6 +390,11 @@ static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED; static inline int setup_earlycon(char *buf) { return 0; } #endif +static inline bool uart_console_enabled(struct uart_port *port) +{ + return uart_console(port) && (port->cons->flags & CON_ENABLED); +} + struct uart_port *uart_get_console(struct uart_port *ports, int nr, struct console *c); int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d3d10556f0fa..1111adefd906 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -686,10 +686,18 @@ enum { * charged to the kernel memory. */ SKBFL_PURE_ZEROCOPY = BIT(2), + + SKBFL_DONT_ORPHAN = BIT(3), + + /* page references are managed by the ubuf_info, so it's safe to + * use frags only up until ubuf_info is released + */ + SKBFL_MANAGED_FRAG_REFS = BIT(4), }; #define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG) -#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY) +#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \ + SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS) /* * The callback notifies userspace to release buffers when skb DMA is done in @@ -1773,13 +1781,14 @@ void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, bool success); -int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb, - struct iov_iter *from, size_t length); +int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb, struct iov_iter *from, + size_t length); static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) { - return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); + return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len); } int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, @@ -1806,6 +1815,11 @@ static inline bool skb_zcopy_pure(const struct sk_buff *skb) return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY; } +static inline bool skb_zcopy_managed(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS; +} + static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1, const struct sk_buff *skb2) { @@ -1880,6 +1894,14 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) } } +void __skb_zcopy_downgrade_managed(struct sk_buff *skb); + +static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb) +{ + if (unlikely(skb_zcopy_managed(skb))) + __skb_zcopy_downgrade_managed(skb); +} + static inline void skb_mark_not_on_list(struct sk_buff *skb) { skb->next = NULL; @@ -2528,6 +2550,22 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb) return skb_headlen(skb) + __skb_pagelen(skb); } +static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, + int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &shinfo->frags[i]; + + /* + * Propagate page pfmemalloc to the skb if we can. The problem is + * that not all callers have unique ownership of the page but rely + * on page_is_pfmemalloc doing the right thing(tm). + */ + frag->bv_page = page; + frag->bv_offset = off; + skb_frag_size_set(frag, size); +} + /** * __skb_fill_page_desc - initialise a paged fragment in an skb * @skb: buffer containing fragment to be initialised @@ -2544,17 +2582,7 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb) static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - /* - * Propagate page pfmemalloc to the skb if we can. The problem is - * that not all callers have unique ownership of the page but rely - * on page_is_pfmemalloc doing the right thing(tm). - */ - frag->bv_page = page; - frag->bv_offset = off; - skb_frag_size_set(frag, size); - + __skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size); page = compound_head(page); if (page_is_pfmemalloc(page)) skb->pfmemalloc = true; @@ -3182,8 +3210,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) { if (likely(!skb_zcopy(skb))) return 0; - if (!skb_zcopy_is_nouarg(skb) && - skb_uarg(skb)->callback == msg_zerocopy_callback) + if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN) return 0; return skb_copy_ubufs(skb, gfp_mask); } @@ -3496,7 +3523,10 @@ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) */ static inline void skb_frag_unref(struct sk_buff *skb, int f) { - __skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle); + struct skb_shared_info *shinfo = skb_shinfo(skb); + + if (!skb_zcopy_managed(skb)) + __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle); } /** diff --git a/include/linux/soc/mediatek/mtk-mutex.h b/include/linux/soc/mediatek/mtk-mutex.h index 6fe4ffbde290..a0f4f51a3b45 100644 --- a/include/linux/soc/mediatek/mtk-mutex.h +++ b/include/linux/soc/mediatek/mtk-mutex.h @@ -10,11 +10,33 @@ struct regmap; struct device; struct mtk_mutex; +enum mtk_mutex_mod_index { + /* MDP table index */ + MUTEX_MOD_IDX_MDP_RDMA0, + MUTEX_MOD_IDX_MDP_RSZ0, + MUTEX_MOD_IDX_MDP_RSZ1, + MUTEX_MOD_IDX_MDP_TDSHP0, + MUTEX_MOD_IDX_MDP_WROT0, + MUTEX_MOD_IDX_MDP_WDMA, + MUTEX_MOD_IDX_MDP_AAL0, + MUTEX_MOD_IDX_MDP_CCORR0, + + MUTEX_MOD_IDX_MAX /* ALWAYS keep at the end */ +}; + +enum mtk_mutex_sof_index { + MUTEX_SOF_IDX_SINGLE_MODE, + + MUTEX_SOF_IDX_MAX /* ALWAYS keep at the end */ +}; + struct mtk_mutex *mtk_mutex_get(struct device *dev); int mtk_mutex_prepare(struct mtk_mutex *mutex); void mtk_mutex_add_comp(struct mtk_mutex *mutex, enum mtk_ddp_comp_id id); void mtk_mutex_enable(struct mtk_mutex *mutex); +int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex, + void *pkt); void mtk_mutex_disable(struct mtk_mutex *mutex); void mtk_mutex_remove_comp(struct mtk_mutex *mutex, enum mtk_ddp_comp_id id); @@ -22,5 +44,10 @@ void mtk_mutex_unprepare(struct mtk_mutex *mutex); void mtk_mutex_put(struct mtk_mutex *mutex); void mtk_mutex_acquire(struct mtk_mutex *mutex); void mtk_mutex_release(struct mtk_mutex *mutex); +int mtk_mutex_write_mod(struct mtk_mutex *mutex, + enum mtk_mutex_mod_index idx, + bool clear); +int mtk_mutex_write_sof(struct mtk_mutex *mutex, + enum mtk_mutex_sof_index idx); #endif /* MTK_MUTEX_H */ diff --git a/include/linux/socket.h b/include/linux/socket.h index 17311ad9f9af..d4523974efbd 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -14,6 +14,8 @@ struct file; struct pid; struct cred; struct socket; +struct sock; +struct sk_buff; #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) @@ -69,6 +71,9 @@ struct msghdr { unsigned int msg_flags; /* flags on received message */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ struct kiocb *msg_iocb; /* ptr to iocb for async requests */ + struct ubuf_info *msg_ubuf; + int (*sg_from_iter)(struct sock *sk, struct sk_buff *skb, + struct iov_iter *from, size_t length); }; struct user_msghdr { @@ -416,10 +421,9 @@ extern int recvmsg_copy_msghdr(struct msghdr *msg, struct user_msghdr __user *umsg, unsigned flags, struct sockaddr __user **uaddr, struct iovec **iov); -extern int __copy_msghdr_from_user(struct msghdr *kmsg, - struct user_msghdr __user *umsg, - struct sockaddr __user **save_addr, - struct iovec __user **uiov, size_t *nsegs); +extern int __copy_msghdr(struct msghdr *kmsg, + struct user_msghdr *umsg, + struct sockaddr __user **save_addr); /* helpers which do the actual work for syscalls */ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index d361ba26203b..e6c73d5ff1a8 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -17,6 +17,7 @@ #include <uapi/linux/spi/spi.h> #include <linux/acpi.h> +#include <linux/u64_stats_sync.h> struct dma_chan; struct software_node; @@ -34,7 +35,8 @@ extern struct bus_type spi_bus_type; /** * struct spi_statistics - statistics for spi transfers - * @lock: lock protecting this structure + * @syncp: seqcount to protect members in this struct for per-cpu udate + * on 32-bit systems * * @messages: number of spi-messages handled * @transfers: number of spi_transfers handled @@ -59,37 +61,48 @@ extern struct bus_type spi_bus_type; * maxsize limit */ struct spi_statistics { - spinlock_t lock; /* lock for the whole structure */ + struct u64_stats_sync syncp; - unsigned long messages; - unsigned long transfers; - unsigned long errors; - unsigned long timedout; + u64_stats_t messages; + u64_stats_t transfers; + u64_stats_t errors; + u64_stats_t timedout; - unsigned long spi_sync; - unsigned long spi_sync_immediate; - unsigned long spi_async; + u64_stats_t spi_sync; + u64_stats_t spi_sync_immediate; + u64_stats_t spi_async; - unsigned long long bytes; - unsigned long long bytes_rx; - unsigned long long bytes_tx; + u64_stats_t bytes; + u64_stats_t bytes_rx; + u64_stats_t bytes_tx; #define SPI_STATISTICS_HISTO_SIZE 17 - unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; + u64_stats_t transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; - unsigned long transfers_split_maxsize; + u64_stats_t transfers_split_maxsize; }; -#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \ - do { \ - unsigned long flags; \ - spin_lock_irqsave(&(stats)->lock, flags); \ - (stats)->field += count; \ - spin_unlock_irqrestore(&(stats)->lock, flags); \ +#define SPI_STATISTICS_ADD_TO_FIELD(pcpu_stats, field, count) \ + do { \ + struct spi_statistics *__lstats; \ + get_cpu(); \ + __lstats = this_cpu_ptr(pcpu_stats); \ + u64_stats_update_begin(&__lstats->syncp); \ + u64_stats_add(&__lstats->field, count); \ + u64_stats_update_end(&__lstats->syncp); \ + put_cpu(); \ } while (0) -#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \ - SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1) +#define SPI_STATISTICS_INCREMENT_FIELD(pcpu_stats, field) \ + do { \ + struct spi_statistics *__lstats; \ + get_cpu(); \ + __lstats = this_cpu_ptr(pcpu_stats); \ + u64_stats_update_begin(&__lstats->syncp); \ + u64_stats_inc(&__lstats->field); \ + u64_stats_update_end(&__lstats->syncp); \ + put_cpu(); \ + } while (0) /** * struct spi_delay - SPI delay information @@ -149,7 +162,7 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); * @cs_inactive: delay to be introduced by the controller after CS is * deasserted. If @cs_change_delay is used from @spi_transfer, then the * two delays will be added up. - * @statistics: statistics for the spi_device + * @pcpu_statistics: statistics for the spi_device * * A @spi_device is used to interchange data between an SPI slave * (usually a discrete chip) and CPU memory. @@ -163,13 +176,13 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); struct spi_device { struct device dev; struct spi_controller *controller; - struct spi_controller *master; /* compatibility layer */ + struct spi_controller *master; /* Compatibility layer */ u32 max_speed_hz; u8 chip_select; u8 bits_per_word; bool rt; -#define SPI_NO_TX BIT(31) /* no transmit wire */ -#define SPI_NO_RX BIT(30) /* no receive wire */ +#define SPI_NO_TX BIT(31) /* No transmit wire */ +#define SPI_NO_RX BIT(30) /* No receive wire */ /* * All bits defined above should be covered by SPI_MODE_KERNEL_MASK. * The SPI_MODE_KERNEL_MASK has the SPI_MODE_USER_MASK counterpart, @@ -186,15 +199,15 @@ struct spi_device { void *controller_data; char modalias[SPI_NAME_SIZE]; const char *driver_override; - struct gpio_desc *cs_gpiod; /* chip select gpio desc */ - struct spi_delay word_delay; /* inter-word delay */ + struct gpio_desc *cs_gpiod; /* Chip select gpio desc */ + struct spi_delay word_delay; /* Inter-word delay */ /* CS delays */ struct spi_delay cs_setup; struct spi_delay cs_hold; struct spi_delay cs_inactive; - /* the statistics */ - struct spi_statistics statistics; + /* The statistics */ + struct spi_statistics __percpu *pcpu_statistics; /* * likely need more hooks for more protocol options affecting how @@ -215,7 +228,7 @@ static inline struct spi_device *to_spi_device(struct device *dev) return dev ? container_of(dev, struct spi_device, dev) : NULL; } -/* most drivers won't need to care about device refcounting */ +/* Most drivers won't need to care about device refcounting */ static inline struct spi_device *spi_dev_get(struct spi_device *spi) { return (spi && get_device(&spi->dev)) ? spi : NULL; @@ -238,7 +251,7 @@ static inline void spi_set_ctldata(struct spi_device *spi, void *state) spi->controller_state = state; } -/* device driver data */ +/* Device driver data */ static inline void spi_set_drvdata(struct spi_device *spi, void *data) { @@ -305,7 +318,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select); -/* use a define to avoid include chaining to get THIS_MODULE */ +/* Use a define to avoid include chaining to get THIS_MODULE */ #define spi_register_driver(driver) \ __spi_register_driver(THIS_MODULE, driver) @@ -370,10 +383,14 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @pump_messages: work struct for scheduling work to the message pump * @queue_lock: spinlock to syncronise access to message queue * @queue: message queue - * @idling: the device is entering idle state * @cur_msg: the currently in-flight message - * @cur_msg_prepared: spi_prepare_message was called for the currently - * in-flight message + * @cur_msg_completion: a completion for the current in-flight message + * @cur_msg_incomplete: Flag used internally to opportunistically skip + * the @cur_msg_completion. This flag is used to check if the driver has + * already called spi_finalize_current_message(). + * @cur_msg_need_completion: Flag used internally to opportunistically skip + * the @cur_msg_completion. This flag is used to signal the context that + * is running spi_finalize_current_message() that it needs to complete() * @cur_msg_mapped: message has been mapped for DMA * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip * selected @@ -433,7 +450,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @max_native_cs: When cs_gpiods is used, and this field is filled in, * spi_register_controller() will validate all native CS (including the * unused native CS) against this value. - * @statistics: statistics for the spi_controller + * @pcpu_statistics: statistics for the spi_controller * @dma_tx: DMA transmit channel * @dma_rx: DMA receive channel * @dummy_rx: dummy receive buffer for full-duplex devices @@ -450,6 +467,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @irq_flags: Interrupt enable state during PTP system timestamping * @fallback: fallback to pio if dma transfer return failure with * SPI_TRANS_FAIL_NO_START. + * @queue_empty: signal green light for opportunistically skipping the queue + * for spi_sync transfers. * * Each SPI controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals @@ -467,7 +486,7 @@ struct spi_controller { struct list_head list; - /* other than negative (== assign one dynamically), bus_num is fully + /* Other than negative (== assign one dynamically), bus_num is fully * board-specific. usually that simplifies to being SOC-specific. * example: one SOC has three SPI controllers, numbered 0..2, * and one board's schematics might show it using SPI-2. software @@ -480,7 +499,7 @@ struct spi_controller { */ u16 num_chipselect; - /* some SPI controllers pose alignment requirements on DMAable + /* Some SPI controllers pose alignment requirements on DMAable * buffers; let protocol drivers know about these requirements. */ u16 dma_alignment; @@ -491,29 +510,29 @@ struct spi_controller { /* spi_device.mode flags override flags for this controller */ u32 buswidth_override_bits; - /* bitmask of supported bits_per_word for transfers */ + /* Bitmask of supported bits_per_word for transfers */ u32 bits_per_word_mask; #define SPI_BPW_MASK(bits) BIT((bits) - 1) #define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1) - /* limits on transfer speed */ + /* Limits on transfer speed */ u32 min_speed_hz; u32 max_speed_hz; - /* other constraints relevant to this driver */ + /* Other constraints relevant to this driver */ u16 flags; -#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */ -#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */ -#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */ -#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */ -#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */ +#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* Can't do full duplex */ +#define SPI_CONTROLLER_NO_RX BIT(1) /* Can't do buffer read */ +#define SPI_CONTROLLER_NO_TX BIT(2) /* Can't do buffer write */ +#define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */ +#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */ #define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ - /* flag indicating if the allocation of this struct is devres-managed */ + /* Flag indicating if the allocation of this struct is devres-managed */ bool devm_allocated; - /* flag indicating this is an SPI slave controller */ + /* Flag indicating this is an SPI slave controller */ bool slave; /* @@ -529,11 +548,11 @@ struct spi_controller { /* Used to avoid adding the same CS twice */ struct mutex add_lock; - /* lock and mutex for SPI bus locking */ + /* Lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; - /* flag indicating that the SPI bus is locked for exclusive use */ + /* Flag indicating that the SPI bus is locked for exclusive use */ bool bus_lock_flag; /* Setup mode and clock, etc (spi driver may call many times). @@ -554,7 +573,7 @@ struct spi_controller { */ int (*set_cs_timing)(struct spi_device *spi); - /* bidirectional bulk transfers + /* Bidirectional bulk transfers * * + The transfer() method may not sleep; its main role is * just to add the message to the queue. @@ -576,7 +595,7 @@ struct spi_controller { int (*transfer)(struct spi_device *spi, struct spi_message *mesg); - /* called on release() to free memory provided by spi_controller */ + /* Called on release() to free memory provided by spi_controller */ void (*cleanup)(struct spi_device *spi); /* @@ -603,12 +622,13 @@ struct spi_controller { spinlock_t queue_lock; struct list_head queue; struct spi_message *cur_msg; - bool idling; + struct completion cur_msg_completion; + bool cur_msg_incomplete; + bool cur_msg_need_completion; bool busy; bool running; bool rt; bool auto_runtime_pm; - bool cur_msg_prepared; bool cur_msg_mapped; char last_cs; bool last_cs_mode_high; @@ -646,14 +666,14 @@ struct spi_controller { s8 unused_native_cs; s8 max_native_cs; - /* statistics */ - struct spi_statistics statistics; + /* Statistics */ + struct spi_statistics __percpu *pcpu_statistics; /* DMA channels for use with core dmaengine helpers */ struct dma_chan *dma_tx; struct dma_chan *dma_rx; - /* dummy data for full duplex devices */ + /* Dummy data for full duplex devices */ void *dummy_rx; void *dummy_tx; @@ -667,6 +687,9 @@ struct spi_controller { /* Interrupt enable state during PTP system timestamping */ unsigned long irq_flags; + + /* Flag for enabling opportunistic skipping of the queue in spi_sync */ + bool queue_empty; }; static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) @@ -715,7 +738,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr, struct spi_transfer *xfer, size_t progress, bool irqs_off); -/* the spi driver core manages memory for the spi_controller classdev */ +/* The spi driver core manages memory for the spi_controller classdev */ extern struct spi_controller *__spi_alloc_controller(struct device *host, unsigned int size, bool slave); @@ -785,7 +808,7 @@ typedef void (*spi_res_release_t)(struct spi_controller *ctlr, struct spi_res { struct list_head entry; spi_res_release_t release; - unsigned long long data[]; /* guarantee ull alignment */ + unsigned long long data[]; /* Guarantee ull alignment */ }; /*---------------------------------------------------------------------------*/ @@ -918,7 +941,7 @@ struct spi_res { * and its transfers, ignore them until its completion callback. */ struct spi_transfer { - /* it's ok if tx_buf == rx_buf (right?) + /* It's ok if tx_buf == rx_buf (right?) * for MicroWire, one buffer must be null * buffers must work with dma_*map_single() calls, unless * spi_message.is_dma_mapped reports a pre-existing mapping @@ -975,6 +998,7 @@ struct spi_transfer { * @queue: for use by whichever driver currently owns the message * @state: for use by whichever driver currently owns the message * @resources: for resource management when the spi message is processed + * @prepared: spi_prepare_message was called for the this message * * A @spi_message is used to execute an atomic sequence of data transfers, * each represented by a struct spi_transfer. The sequence is "atomic" @@ -1008,22 +1032,25 @@ struct spi_message { * tell them about such special cases. */ - /* completion is reported through a callback */ + /* Completion is reported through a callback */ void (*complete)(void *context); void *context; unsigned frame_length; unsigned actual_length; int status; - /* for optional use by whatever driver currently owns the + /* For optional use by whatever driver currently owns the * spi_message ... between calls to spi_async and then later * complete(), that's the spi_controller controller driver. */ struct list_head queue; void *state; - /* list of spi_res reources when the spi message is processed */ + /* List of spi_res reources when the spi message is processed */ struct list_head resources; + + /* spi_prepare_message() was called for this message */ + bool prepared; }; static inline void spi_message_init_no_memset(struct spi_message *m) @@ -1127,7 +1154,7 @@ spi_max_transfer_size(struct spi_device *spi) if (ctlr->max_transfer_size) tr_max = ctlr->max_transfer_size(spi); - /* transfer size limit must not be greater than messsage size limit */ + /* Transfer size limit must not be greater than message size limit */ return min(tr_max, msg_max); } @@ -1278,7 +1305,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len) return spi_sync_transfer(spi, &t, 1); } -/* this copies txbuf and rxbuf data; for small transfers only! */ +/* This copies txbuf and rxbuf data; for small transfers only! */ extern int spi_write_then_read(struct spi_device *spi, const void *txbuf, unsigned n_tx, void *rxbuf, unsigned n_rx); @@ -1301,7 +1328,7 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) status = spi_write_then_read(spi, &cmd, 1, &result, 1); - /* return negative errno or unsigned value */ + /* Return negative errno or unsigned value */ return (status < 0) ? status : result; } @@ -1326,7 +1353,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) status = spi_write_then_read(spi, &cmd, 1, &result, 2); - /* return negative errno or unsigned value */ + /* Return negative errno or unsigned value */ return (status < 0) ? status : result; } @@ -1406,7 +1433,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) * are active in some dynamic board configuration models. */ struct spi_board_info { - /* the device name and module name are coupled, like platform_bus; + /* The device name and module name are coupled, like platform_bus; * "modalias" is normally the driver name. * * platform_data goes to spi_device.dev.platform_data, @@ -1419,7 +1446,7 @@ struct spi_board_info { void *controller_data; int irq; - /* slower signaling on noisy or low voltage boards */ + /* Slower signaling on noisy or low voltage boards */ u32 max_speed_hz; @@ -1448,7 +1475,7 @@ struct spi_board_info { extern int spi_register_board_info(struct spi_board_info const *info, unsigned n); #else -/* board init code may ignore whether SPI is configured or not */ +/* Board init code may ignore whether SPI is configured or not */ static inline int spi_register_board_info(struct spi_board_info const *info, unsigned n) { return 0; } diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 29917850f079..8df475db88c0 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -260,6 +260,7 @@ struct plat_stmmacenet_data { bool has_crossts; int int_snapshot_num; int ext_snapshot_num; + bool int_snapshot_en; bool ext_snapshot_en; bool multi_msi_en; int msi_mac_vec; diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h index b0dcfa26d07b..8ba8b5be5567 100644 --- a/include/linux/sysfb.h +++ b/include/linux/sysfb.h @@ -55,6 +55,18 @@ struct efifb_dmi_info { int flags; }; +#ifdef CONFIG_SYSFB + +void sysfb_disable(void); + +#else /* CONFIG_SYSFB */ + +static inline void sysfb_disable(void) +{ +} + +#endif /* CONFIG_SYSFB */ + #ifdef CONFIG_EFI extern struct efifb_dmi_info efifb_dmi_list[]; @@ -72,8 +84,8 @@ static inline void sysfb_apply_efi_quirks(struct platform_device *pd) bool sysfb_parse_mode(const struct screen_info *si, struct simplefb_platform_data *mode); -int sysfb_create_simplefb(const struct screen_info *si, - const struct simplefb_platform_data *mode); +struct platform_device *sysfb_create_simplefb(const struct screen_info *si, + const struct simplefb_platform_data *mode); #else /* CONFIG_SYSFB_SIMPLE */ @@ -83,10 +95,10 @@ static inline bool sysfb_parse_mode(const struct screen_info *si, return false; } -static inline int sysfb_create_simplefb(const struct screen_info *si, - const struct simplefb_platform_data *mode) +static inline struct platform_device *sysfb_create_simplefb(const struct screen_info *si, + const struct simplefb_platform_data *mode) { - return -EINVAL; + return ERR_PTR(-EINVAL); } #endif /* CONFIG_SYSFB_SIMPLE */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 365733b428d8..1386c713885d 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -40,8 +40,6 @@ enum thermal_trend { THERMAL_TREND_STABLE, /* temperature is stable */ THERMAL_TREND_RAISING, /* temperature is raising */ THERMAL_TREND_DROPPING, /* temperature is dropping */ - THERMAL_TREND_RAISE_FULL, /* apply highest cooling action */ - THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */ }; /* Thermal notification reason */ @@ -80,6 +78,18 @@ struct thermal_zone_device_ops { void (*critical)(struct thermal_zone_device *); }; +/** + * struct thermal_trip - representation of a point in temperature domain + * @temperature: temperature value in miliCelsius + * @hysteresis: relative hysteresis in miliCelsius + * @type: trip point type + */ +struct thermal_trip { + int temperature; + int hysteresis; + enum thermal_trip_type type; +}; + struct thermal_cooling_device_ops { int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); @@ -113,7 +123,8 @@ struct thermal_cooling_device { * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis * @mode: current mode of this thermal zone * @devdata: private pointer for device private data - * @trips: number of trip points the thermal zone supports + * @trips: an array of struct thermal_trip + * @num_trips: number of trip points the thermal zone supports * @trips_disabled; bitmap for disabled trips * @passive_delay_jiffies: number of jiffies to wait between polls when * performing passive cooling. @@ -153,7 +164,8 @@ struct thermal_zone_device { struct thermal_attr *trip_hyst_attrs; enum thermal_device_mode mode; void *devdata; - int trips; + struct thermal_trip *trips; + int num_trips; unsigned long trips_disabled; /* bitmap for disabled trips */ unsigned long passive_delay_jiffies; unsigned long polling_delay_jiffies; @@ -366,8 +378,14 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev, struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, struct thermal_zone_params *, int, int); + void thermal_zone_device_unregister(struct thermal_zone_device *); +struct thermal_zone_device * +thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int, + void *, struct thermal_zone_device_ops *, + struct thermal_zone_params *, int, int); + int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *, unsigned long, unsigned long, diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 49c7c32815f1..b47c2e7ed0ee 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -257,6 +257,7 @@ void virtio_device_ready(struct virtio_device *dev) WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK); +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION /* * The virtio_synchronize_cbs() makes sure vring_interrupt() * will see the driver specific setup if it sees vq->broken @@ -264,6 +265,7 @@ void virtio_device_ready(struct virtio_device *dev) */ virtio_synchronize_cbs(dev); __virtio_unbreak_device(dev); +#endif /* * The transport should ensure the visibility of vq->broken * before setting DRIVER_OK. See the comments for the transport diff --git a/include/linux/wait.h b/include/linux/wait.h index 851e07da2583..58cfbf81447c 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -544,10 +544,11 @@ do { \ \ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ HRTIMER_MODE_REL); \ - if ((timeout) != KTIME_MAX) \ - hrtimer_start_range_ns(&__t.timer, timeout, \ - current->timer_slack_ns, \ - HRTIMER_MODE_REL); \ + if ((timeout) != KTIME_MAX) { \ + hrtimer_set_expires_range_ns(&__t.timer, timeout, \ + current->timer_slack_ns); \ + hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \ + } \ \ __ret = ___wait_event(wq_head, condition, state, 0, 0, \ if (!__t.task) { \ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index e91bea371b18..3f045f6d6c4f 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -364,7 +364,14 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); void wb_update_bandwidth(struct bdi_writeback *wb); + +/* Invoke balance dirty pages in async mode. */ +#define BDP_ASYNC 0x0001 + void balance_dirty_pages_ratelimited(struct address_space *mapping); +int balance_dirty_pages_ratelimited_flags(struct address_space *mapping, + unsigned int flags); + bool wb_over_bg_thresh(struct bdi_writeback *wb); typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 4c379d23ec6e..979a9d3e5bfb 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -61,7 +61,7 @@ int __vfs_setxattr_locked(struct user_namespace *, struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct user_namespace *, struct dentry *, const char *, - const void *, size_t, int); + void *, size_t, int); int __vfs_removexattr(struct user_namespace *, struct dentry *, const char *); int __vfs_removexattr_locked(struct user_namespace *, struct dentry *, const char *, struct inode **); diff --git a/include/net/addrconf.h b/include/net/addrconf.h index f7506f08e505..c04f359655b8 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -405,6 +405,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev) { const struct inet6_dev *idev = __in6_dev_get(dev); + if (unlikely(!idev)) + return true; + return !!idev->cnf.ignore_routes_with_linkdown; } diff --git a/include/net/amt.h b/include/net/amt.h index 0e40c3d64fcf..08fc30cf2f34 100644 --- a/include/net/amt.h +++ b/include/net/amt.h @@ -78,6 +78,15 @@ enum amt_status { #define AMT_STATUS_MAX (__AMT_STATUS_MAX - 1) +/* Gateway events only */ +enum amt_event { + AMT_EVENT_NONE, + AMT_EVENT_RECEIVE, + AMT_EVENT_SEND_DISCOVERY, + AMT_EVENT_SEND_REQUEST, + __AMT_EVENT_MAX, +}; + struct amt_header { #if defined(__LITTLE_ENDIAN_BITFIELD) u8 type:4, @@ -292,6 +301,12 @@ struct amt_group_node { struct hlist_head sources[]; }; +#define AMT_MAX_EVENTS 16 +struct amt_events { + enum amt_event event; + struct sk_buff *skb; +}; + struct amt_dev { struct net_device *dev; struct net_device *stream_dev; @@ -308,6 +323,7 @@ struct amt_dev { struct delayed_work req_wq; /* Protected by RTNL */ struct delayed_work secret_wq; + struct work_struct event_wq; /* AMT status */ enum amt_status status; /* Generated key */ @@ -345,6 +361,10 @@ struct amt_dev { /* Used only in gateway mode */ u64 mac:48, reserved:16; + /* AMT gateway side message handler queue */ + struct amt_events events[AMT_MAX_EVENTS]; + u8 event_idx; + u8 nr_events; }; #define AMT_TOS 0xc0 diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 3c4f550e5a8b..2f766e3437ce 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -847,6 +847,7 @@ enum { }; void l2cap_chan_hold(struct l2cap_chan *c); +struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c); void l2cap_chan_put(struct l2cap_chan *c); static inline void l2cap_chan_lock(struct l2cap_chan *chan) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 6d02e12e4702..80f41446b1f0 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -8462,11 +8462,12 @@ int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp, * cfg80211_obss_color_collision_notify - notify about bss color collision * @dev: network device * @color_bitmap: representations of the colors that the local BSS is aware of + * @gfp: allocation flags */ static inline int cfg80211_obss_color_collision_notify(struct net_device *dev, - u64 color_bitmap) + u64 color_bitmap, gfp_t gfp) { - return cfg80211_bss_color_notify(dev, GFP_KERNEL, + return cfg80211_bss_color_notify(dev, gfp, NL80211_CMD_OBSS_COLOR_COLLISION, 0, color_bitmap); } diff --git a/include/net/compat.h b/include/net/compat.h index 595fee069b82..84c163f40f38 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -46,9 +46,8 @@ struct compat_rtentry { unsigned short rt_irtt; /* Initial RTT */ }; -int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg, - struct sockaddr __user **save_addr, compat_uptr_t *ptr, - compat_size_t *len); +int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr *msg, + struct sockaddr __user **save_addr); int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, struct sockaddr __user **, struct iovec **); int put_cmsg_compat(struct msghdr*, int, int, int, void *); diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 6484095a8c01..7ac313858037 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -152,6 +152,7 @@ enum flow_action_id { FLOW_ACTION_PIPE, FLOW_ACTION_VLAN_PUSH_ETH, FLOW_ACTION_VLAN_POP_ETH, + FLOW_ACTION_CONTINUE, NUM_FLOW_ACTIONS, }; diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 85cd695e7fd1..ee88f0f1350f 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -321,7 +321,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); -#define TCP_PINGPONG_THRESH 3 +#define TCP_PINGPONG_THRESH 1 static inline void inet_csk_enter_pingpong_mode(struct sock *sk) { @@ -338,14 +338,6 @@ static inline bool inet_csk_in_pingpong_mode(struct sock *sk) return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; } -static inline void inet_csk_inc_pingpong_cnt(struct sock *sk) -{ - struct inet_connection_sock *icsk = inet_csk(sk); - - if (icsk->icsk_ack.pingpong < U8_MAX) - icsk->icsk_ack.pingpong++; -} - static inline bool inet_csk_has_ulp(struct sock *sk) { return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops; diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index ebfa3df6f8dc..fd6b510d114b 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -179,7 +179,7 @@ static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept, + return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept), bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index daead5fb389a..6395f6b9a5d2 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -107,7 +107,8 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) { - if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) + if (!sk->sk_mark && + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) return skb->mark; return sk->sk_mark; @@ -120,7 +121,7 @@ static inline int inet_request_bound_dev_if(const struct sock *sk, #ifdef CONFIG_NET_L3_MASTER_DEV struct net *net = sock_net(sk); - if (!bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept) + if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) return l3mdev_master_ifindex_by_index(net, skb->skb_iif); #endif @@ -132,7 +133,7 @@ static inline int inet_sk_bound_l3mdev(const struct sock *sk) #ifdef CONFIG_NET_L3_MASTER_DEV struct net *net = sock_net(sk); - if (!net->ipv4.sysctl_tcp_l3mdev_accept) + if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) return l3mdev_master_ifindex_by_index(net, sk->sk_bound_dev_if); #endif @@ -374,7 +375,7 @@ static inline bool inet_get_convert_csum(struct sock *sk) static inline bool inet_can_nonlocal_bind(struct net *net, struct inet_sock *inet) { - return net->ipv4.sysctl_ip_nonlocal_bind || + return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) || inet->freebind || inet->transparent; } diff --git a/include/net/ip.h b/include/net/ip.h index 26fffda78cca..1c979fd1904c 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -357,7 +357,7 @@ static inline bool sysctl_dev_name_is_allowed(const char *name) static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port) { - return port < net->ipv4.sysctl_ip_prot_sock; + return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock); } #else @@ -384,7 +384,7 @@ void ipfrag_init(void); void ip_static_sysctl_init(void); #define IP4_REPLY_MARK(net, mark) \ - ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0) + (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0) static inline bool ip_is_fragment(const struct iphdr *iph) { @@ -446,7 +446,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, struct net *net = dev_net(dst->dev); unsigned int mtu; - if (net->ipv4.sysctl_ip_fwd_use_pmtu || + if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) || ip_mtu_locked(dst) || !forwarding) { mtu = rt->rt_pmtu; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index ebadb2103968..47642b020706 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -6960,10 +6960,11 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw, * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is * aware of. + * @gfp: allocation flags */ void ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif, - u64 color_bitmap); + u64 color_bitmap, gfp_t gfp); /** * ieee80211_is_tx_data - check if frame is a data frame diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 279ae0fff7ad..64cf655c818c 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -657,18 +657,22 @@ static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl) tmpl->len = sizeof(struct nft_set_ext); } -static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id, - unsigned int len) +static inline int nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id, + unsigned int len) { tmpl->len = ALIGN(tmpl->len, nft_set_ext_types[id].align); - BUG_ON(tmpl->len > U8_MAX); + if (tmpl->len > U8_MAX) + return -EINVAL; + tmpl->offset[id] = tmpl->len; tmpl->len += nft_set_ext_types[id].len + len; + + return 0; } -static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id) +static inline int nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id) { - nft_set_ext_add_length(tmpl, id, 0); + return nft_set_ext_add_length(tmpl, id, 0); } static inline void nft_set_ext_init(struct nft_set_ext *ext, @@ -1338,24 +1342,28 @@ void nft_unregister_flowtable_type(struct nf_flowtable_type *type); /** * struct nft_traceinfo - nft tracing information and state * + * @trace: other struct members are initialised + * @nf_trace: copy of skb->nf_trace before rule evaluation + * @type: event type (enum nft_trace_types) + * @skbid: hash of skb to be used as trace id + * @packet_dumped: packet headers sent in a previous traceinfo message * @pkt: pktinfo currently processed * @basechain: base chain currently processed * @chain: chain currently processed * @rule: rule that was evaluated * @verdict: verdict given by rule - * @type: event type (enum nft_trace_types) - * @packet_dumped: packet headers sent in a previous traceinfo message - * @trace: other struct members are initialised */ struct nft_traceinfo { + bool trace; + bool nf_trace; + bool packet_dumped; + enum nft_trace_types type:8; + u32 skbid; const struct nft_pktinfo *pkt; const struct nft_base_chain *basechain; const struct nft_chain *chain; const struct nft_rule_dp *rule; const struct nft_verdict *verdict; - enum nft_trace_types type; - bool packet_dumped; - bool trace; }; void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt, diff --git a/include/net/protocol.h b/include/net/protocol.h index f51c06ae365f..6aef8cb11cc8 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -35,8 +35,6 @@ /* This is used to register protocols. */ struct net_protocol { - int (*early_demux)(struct sk_buff *skb); - int (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); /* This returns an error if we weren't able to handle the error. */ @@ -52,8 +50,6 @@ struct net_protocol { #if IS_ENABLED(CONFIG_IPV6) struct inet6_protocol { - void (*early_demux)(struct sk_buff *skb); - void (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); /* This returns an error if we weren't able to handle the error. */ diff --git a/include/net/raw.h b/include/net/raw.h index 8ad8df594853..c51a635671a7 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -75,7 +75,7 @@ static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept, + return inet_bound_dev_eq(READ_ONCE(net->ipv4.sysctl_raw_l3mdev_accept), bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); diff --git a/include/net/route.h b/include/net/route.h index 991a3985712d..bbcf2aba149f 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -373,7 +373,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst) struct net *net = dev_net(dst->dev); if (hoplimit == 0) - hoplimit = net->ipv4.sysctl_ip_default_ttl; + hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); return hoplimit; } diff --git a/include/net/sock.h b/include/net/sock.h index 72ca97ccb460..7a48991cdb19 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1529,7 +1529,7 @@ void __sk_mem_reclaim(struct sock *sk, int amount); /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */ static inline long sk_prot_mem_limits(const struct sock *sk, int index) { - long val = sk->sk_prot->sysctl_mem[index]; + long val = READ_ONCE(sk->sk_prot->sysctl_mem[index]); #if PAGE_SIZE > SK_MEM_QUANTUM val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT; @@ -2843,18 +2843,18 @@ static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) { /* Does this proto have per netns sysctl_wmem ? */ if (proto->sysctl_wmem_offset) - return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset); + return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset)); - return *proto->sysctl_wmem; + return READ_ONCE(*proto->sysctl_wmem); } static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) { /* Does this proto have per netns sysctl_rmem ? */ if (proto->sysctl_rmem_offset) - return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset); + return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset)); - return *proto->sysctl_rmem; + return READ_ONCE(*proto->sysctl_rmem); } /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10) diff --git a/include/net/tcp.h b/include/net/tcp.h index 1e99f5c61f84..78a64e1b33a7 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -932,7 +932,7 @@ extern const struct inet_connection_sock_af_ops ipv6_specific; INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); -INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); +void tcp_v6_early_demux(struct sk_buff *skb); #endif @@ -1403,8 +1403,8 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); s32 delta; - if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out || - ca_ops->cong_control) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) || + tp->packets_out || ca_ops->cong_control) return; delta = tcp_jiffies32 - tp->lsndtime; if (delta > inet_csk(sk)->icsk_rto) @@ -1419,7 +1419,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, static inline int tcp_win_from_space(const struct sock *sk, int space) { - int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale; + int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale); return tcp_adv_win_scale <= 0 ? (space>>(-tcp_adv_win_scale)) : @@ -1493,21 +1493,24 @@ static inline int keepalive_intvl_when(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl; + return tp->keepalive_intvl ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl); } static inline int keepalive_time_when(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time; + return tp->keepalive_time ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time); } static inline int keepalive_probes(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes; + return tp->keepalive_probes ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes); } static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) @@ -1520,7 +1523,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) static inline int tcp_fin_time(const struct sock *sk) { - int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; + int fin_timeout = tcp_sk(sk)->linger2 ? : + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout); const int rto = inet_csk(sk)->icsk_rto; if (fin_timeout < (rto << 2) - (rto >> 1)) @@ -2023,7 +2027,7 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; + return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat); } bool tcp_stream_memory_free(const struct sock *sk, int wake); diff --git a/include/net/tls.h b/include/net/tls.h index 8017f1703447..8bd938f98bdd 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -704,7 +704,7 @@ int tls_sw_fallback_init(struct sock *sk, struct tls_crypto_info *crypto_info); #ifdef CONFIG_TLS_DEVICE -void tls_device_init(void); +int tls_device_init(void); void tls_device_cleanup(void); void tls_device_sk_destruct(struct sock *sk); int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); @@ -724,7 +724,7 @@ static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk) return tls_get_ctx(sk)->rx_conf == TLS_HW; } #else -static inline void tls_device_init(void) {} +static inline int tls_device_init(void) { return 0; } static inline void tls_device_cleanup(void) {} static inline int diff --git a/include/net/udp.h b/include/net/udp.h index b83a00330566..8dd4aa1485a6 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -167,7 +167,7 @@ static inline void udp_csum_pull_header(struct sk_buff *skb) typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport, __be16 dport); -INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *)); +void udp_v6_early_demux(struct sk_buff *skb); INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *)); struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, @@ -238,7 +238,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept, + return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept), bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); diff --git a/include/sound/soc.h b/include/sound/soc.h index f20f5f890794..b276dcb5d4e8 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -408,8 +408,6 @@ struct snd_soc_jack_pin; struct snd_soc_jack_gpio; -typedef int (*hw_write_t)(void *,const char* ,int); - enum snd_soc_pcm_subclass { SND_SOC_PCM_CLASS_PCM = 0, SND_SOC_PCM_CLASS_BE = 1, diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h index 32088c603244..bad21222130e 100644 --- a/include/trace/events/dlm.h +++ b/include/trace/events/dlm.h @@ -49,38 +49,52 @@ /* note: we begin tracing dlm_lock_start() only if ls and lkb are found */ TRACE_EVENT(dlm_lock_start, - TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode, - __u32 flags), + TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, void *name, + unsigned int namelen, int mode, __u32 flags), - TP_ARGS(ls, lkb, mode, flags), + TP_ARGS(ls, lkb, name, namelen, mode, flags), TP_STRUCT__entry( __field(__u32, ls_id) __field(__u32, lkb_id) __field(int, mode) __field(__u32, flags) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : namelen) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; __entry->mode = mode; __entry->flags = flags; + + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); + else if (name) + memcpy(__get_dynamic_array(res_name), name, + __get_dynamic_array_len(res_name)); ), - TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s", + TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s res_name=%s", __entry->ls_id, __entry->lkb_id, show_lock_mode(__entry->mode), - show_lock_flags(__entry->flags)) + show_lock_flags(__entry->flags), + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); TRACE_EVENT(dlm_lock_end, - TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode, __u32 flags, - int error), + TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, void *name, + unsigned int namelen, int mode, __u32 flags, int error), - TP_ARGS(ls, lkb, mode, flags, error), + TP_ARGS(ls, lkb, name, namelen, mode, flags, error), TP_STRUCT__entry( __field(__u32, ls_id) @@ -88,14 +102,26 @@ TRACE_EVENT(dlm_lock_end, __field(int, mode) __field(__u32, flags) __field(int, error) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : namelen) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; __entry->mode = mode; __entry->flags = flags; + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); + else if (name) + memcpy(__get_dynamic_array(res_name), name, + __get_dynamic_array_len(res_name)); + /* return value will be zeroed in those cases by dlm_lock() * we do it here again to not introduce more overhead if * trace isn't running and error reflects the return value. @@ -104,12 +130,15 @@ TRACE_EVENT(dlm_lock_end, __entry->error = 0; else __entry->error = error; + ), - TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s error=%d", + TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s error=%d res_name=%s", __entry->ls_id, __entry->lkb_id, show_lock_mode(__entry->mode), - show_lock_flags(__entry->flags), __entry->error) + show_lock_flags(__entry->flags), __entry->error, + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); @@ -123,42 +152,65 @@ TRACE_EVENT(dlm_bast, __field(__u32, ls_id) __field(__u32, lkb_id) __field(int, mode) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; __entry->mode = mode; + + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); ), - TP_printk("ls_id=%u lkb_id=%x mode=%s", __entry->ls_id, - __entry->lkb_id, show_lock_mode(__entry->mode)) + TP_printk("ls_id=%u lkb_id=%x mode=%s res_name=%s", + __entry->ls_id, __entry->lkb_id, + show_lock_mode(__entry->mode), + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); TRACE_EVENT(dlm_ast, - TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_lksb *lksb), + TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb), - TP_ARGS(ls, lkb, lksb), + TP_ARGS(ls, lkb), TP_STRUCT__entry( __field(__u32, ls_id) __field(__u32, lkb_id) __field(u8, sb_flags) __field(int, sb_status) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; - __entry->sb_flags = lksb->sb_flags; - __entry->sb_status = lksb->sb_status; + __entry->sb_flags = lkb->lkb_lksb->sb_flags; + __entry->sb_status = lkb->lkb_lksb->sb_status; + + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); ), - TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d", + TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d res_name=%s", __entry->ls_id, __entry->lkb_id, - show_dlm_sb_flags(__entry->sb_flags), __entry->sb_status) + show_dlm_sb_flags(__entry->sb_flags), __entry->sb_status, + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); @@ -173,17 +225,28 @@ TRACE_EVENT(dlm_unlock_start, __field(__u32, ls_id) __field(__u32, lkb_id) __field(__u32, flags) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; __entry->flags = flags; + + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); ), - TP_printk("ls_id=%u lkb_id=%x flags=%s", + TP_printk("ls_id=%u lkb_id=%x flags=%s res_name=%s", __entry->ls_id, __entry->lkb_id, - show_lock_flags(__entry->flags)) + show_lock_flags(__entry->flags), + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); @@ -199,18 +262,29 @@ TRACE_EVENT(dlm_unlock_end, __field(__u32, lkb_id) __field(__u32, flags) __field(int, error) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; __entry->flags = flags; __entry->error = error; + + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); ), - TP_printk("ls_id=%u lkb_id=%x flags=%s error=%d", + TP_printk("ls_id=%u lkb_id=%x flags=%s error=%d res_name=%s", __entry->ls_id, __entry->lkb_id, - show_lock_flags(__entry->flags), __entry->error) + show_lock_flags(__entry->flags), __entry->error, + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index aa2f951b07cd..c5b21ff0ac85 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -7,6 +7,7 @@ #include <linux/tracepoint.h> #include <uapi/linux/io_uring.h> +#include <linux/io_uring_types.h> #include <linux/io_uring.h> struct io_wq_work; @@ -97,9 +98,7 @@ TRACE_EVENT(io_uring_register, /** * io_uring_file_get - called before getting references to an SQE file * - * @ctx: pointer to a ring context structure * @req: pointer to a submitted request - * @user_data: user data associated with the request * @fd: SQE file descriptor * * Allows to trace out how often an SQE file reference is obtained, which can @@ -108,9 +107,9 @@ TRACE_EVENT(io_uring_register, */ TRACE_EVENT(io_uring_file_get, - TP_PROTO(void *ctx, void *req, unsigned long long user_data, int fd), + TP_PROTO(struct io_kiocb *req, int fd), - TP_ARGS(ctx, req, user_data, fd), + TP_ARGS(req, fd), TP_STRUCT__entry ( __field( void *, ctx ) @@ -120,9 +119,9 @@ TRACE_EVENT(io_uring_file_get, ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; + __entry->user_data = req->cqe.user_data; __entry->fd = fd; ), @@ -133,22 +132,16 @@ TRACE_EVENT(io_uring_file_get, /** * io_uring_queue_async_work - called before submitting a new async work * - * @ctx: pointer to a ring context structure * @req: pointer to a submitted request - * @user_data: user data associated with the request - * @opcode: opcode of request - * @flags request flags - * @work: pointer to a submitted io_wq_work * @rw: type of workqueue, hashed or normal * * Allows to trace asynchronous work submission. */ TRACE_EVENT(io_uring_queue_async_work, - TP_PROTO(void *ctx, void * req, unsigned long long user_data, u8 opcode, - unsigned int flags, struct io_wq_work *work, int rw), + TP_PROTO(struct io_kiocb *req, int rw), - TP_ARGS(ctx, req, user_data, opcode, flags, work, rw), + TP_ARGS(req, rw), TP_STRUCT__entry ( __field( void *, ctx ) @@ -159,19 +152,19 @@ TRACE_EVENT(io_uring_queue_async_work, __field( struct io_wq_work *, work ) __field( int, rw ) - __string( op_str, io_uring_get_opcode(opcode) ) + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->flags = flags; - __entry->opcode = opcode; - __entry->work = work; + __entry->user_data = req->cqe.user_data; + __entry->flags = req->flags; + __entry->opcode = req->opcode; + __entry->work = &req->work; __entry->rw = rw; - __assign_str(op_str, io_uring_get_opcode(opcode)); + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p", @@ -183,19 +176,16 @@ TRACE_EVENT(io_uring_queue_async_work, /** * io_uring_defer - called when an io_uring request is deferred * - * @ctx: pointer to a ring context structure * @req: pointer to a deferred request - * @user_data: user data associated with the request - * @opcode: opcode of request * * Allows to track deferred requests, to get an insight about what requests are * not started immediately. */ TRACE_EVENT(io_uring_defer, - TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode), + TP_PROTO(struct io_kiocb *req), - TP_ARGS(ctx, req, user_data, opcode), + TP_ARGS(req), TP_STRUCT__entry ( __field( void *, ctx ) @@ -203,16 +193,16 @@ TRACE_EVENT(io_uring_defer, __field( unsigned long long, data ) __field( u8, opcode ) - __string( op_str, io_uring_get_opcode(opcode) ) + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->data = user_data; - __entry->opcode = opcode; + __entry->data = req->cqe.user_data; + __entry->opcode = req->opcode; - __assign_str(op_str, io_uring_get_opcode(opcode)); + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s", @@ -224,7 +214,6 @@ TRACE_EVENT(io_uring_defer, * io_uring_link - called before the io_uring request added into link_list of * another request * - * @ctx: pointer to a ring context structure * @req: pointer to a linked request * @target_req: pointer to a previous request, that would contain @req * @@ -233,9 +222,9 @@ TRACE_EVENT(io_uring_defer, */ TRACE_EVENT(io_uring_link, - TP_PROTO(void *ctx, void *req, void *target_req), + TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req), - TP_ARGS(ctx, req, target_req), + TP_ARGS(req, target_req), TP_STRUCT__entry ( __field( void *, ctx ) @@ -244,7 +233,7 @@ TRACE_EVENT(io_uring_link, ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; __entry->target_req = target_req; ), @@ -285,10 +274,7 @@ TRACE_EVENT(io_uring_cqring_wait, /** * io_uring_fail_link - called before failing a linked request * - * @ctx: pointer to a ring context structure * @req: request, which links were cancelled - * @user_data: user data associated with the request - * @opcode: opcode of request * @link: cancelled link * * Allows to track linked requests cancellation, to see not only that some work @@ -296,9 +282,9 @@ TRACE_EVENT(io_uring_cqring_wait, */ TRACE_EVENT(io_uring_fail_link, - TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, void *link), + TP_PROTO(struct io_kiocb *req, struct io_kiocb *link), - TP_ARGS(ctx, req, user_data, opcode, link), + TP_ARGS(req, link), TP_STRUCT__entry ( __field( void *, ctx ) @@ -307,17 +293,17 @@ TRACE_EVENT(io_uring_fail_link, __field( u8, opcode ) __field( void *, link ) - __string( op_str, io_uring_get_opcode(opcode) ) + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->opcode = opcode; + __entry->user_data = req->cqe.user_data; + __entry->opcode = req->opcode; __entry->link = link; - __assign_str(op_str, io_uring_get_opcode(opcode)); + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p", @@ -376,23 +362,17 @@ TRACE_EVENT(io_uring_complete, /** * io_uring_submit_sqe - called before submitting one SQE * - * @ctx: pointer to a ring context structure * @req: pointer to a submitted request - * @user_data: user data associated with the request - * @opcode: opcode of request - * @flags request flags * @force_nonblock: whether a context blocking or not - * @sq_thread: true if sq_thread has submitted this SQE * * Allows to track SQE submitting, to understand what was the source of it, SQ * thread or io_uring_enter call. */ TRACE_EVENT(io_uring_submit_sqe, - TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, u32 flags, - bool force_nonblock, bool sq_thread), + TP_PROTO(struct io_kiocb *req, bool force_nonblock), - TP_ARGS(ctx, req, user_data, opcode, flags, force_nonblock, sq_thread), + TP_ARGS(req, force_nonblock), TP_STRUCT__entry ( __field( void *, ctx ) @@ -403,19 +383,19 @@ TRACE_EVENT(io_uring_submit_sqe, __field( bool, force_nonblock ) __field( bool, sq_thread ) - __string( op_str, io_uring_get_opcode(opcode) ) + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->opcode = opcode; - __entry->flags = flags; + __entry->user_data = req->cqe.user_data; + __entry->opcode = req->opcode; + __entry->flags = req->flags; __entry->force_nonblock = force_nonblock; - __entry->sq_thread = sq_thread; + __entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL; - __assign_str(op_str, io_uring_get_opcode(opcode)); + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, " @@ -427,10 +407,7 @@ TRACE_EVENT(io_uring_submit_sqe, /* * io_uring_poll_arm - called after arming a poll wait if successful * - * @ctx: pointer to a ring context structure * @req: pointer to the armed request - * @user_data: user data associated with the request - * @opcode: opcode of request * @mask: request poll events mask * @events: registered events of interest * @@ -439,10 +416,9 @@ TRACE_EVENT(io_uring_submit_sqe, */ TRACE_EVENT(io_uring_poll_arm, - TP_PROTO(void *ctx, void *req, u64 user_data, u8 opcode, - int mask, int events), + TP_PROTO(struct io_kiocb *req, int mask, int events), - TP_ARGS(ctx, req, user_data, opcode, mask, events), + TP_ARGS(req, mask, events), TP_STRUCT__entry ( __field( void *, ctx ) @@ -452,18 +428,18 @@ TRACE_EVENT(io_uring_poll_arm, __field( int, mask ) __field( int, events ) - __string( op_str, io_uring_get_opcode(opcode) ) + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->opcode = opcode; + __entry->user_data = req->cqe.user_data; + __entry->opcode = req->opcode; __entry->mask = mask; __entry->events = events; - __assign_str(op_str, io_uring_get_opcode(opcode)); + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x", @@ -475,18 +451,15 @@ TRACE_EVENT(io_uring_poll_arm, /* * io_uring_task_add - called after adding a task * - * @ctx: pointer to a ring context structure * @req: pointer to request - * @user_data: user data associated with the request - * @opcode: opcode of request * @mask: request poll events mask * */ TRACE_EVENT(io_uring_task_add, - TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, int mask), + TP_PROTO(struct io_kiocb *req, int mask), - TP_ARGS(ctx, req, user_data, opcode, mask), + TP_ARGS(req, mask), TP_STRUCT__entry ( __field( void *, ctx ) @@ -495,17 +468,17 @@ TRACE_EVENT(io_uring_task_add, __field( u8, opcode ) __field( int, mask ) - __string( op_str, io_uring_get_opcode(opcode) ) + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->opcode = opcode; + __entry->user_data = req->cqe.user_data; + __entry->opcode = req->opcode; __entry->mask = mask; - __assign_str(op_str, io_uring_get_opcode(opcode)); + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x", @@ -518,7 +491,6 @@ TRACE_EVENT(io_uring_task_add, * io_uring_req_failed - called when an sqe is errored dring submission * * @sqe: pointer to the io_uring_sqe that failed - * @ctx: pointer to a ring context structure * @req: pointer to request * @error: error it failed with * @@ -526,9 +498,9 @@ TRACE_EVENT(io_uring_task_add, */ TRACE_EVENT(io_uring_req_failed, - TP_PROTO(const struct io_uring_sqe *sqe, void *ctx, void *req, int error), + TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error), - TP_ARGS(sqe, ctx, req, error), + TP_ARGS(sqe, req, error), TP_STRUCT__entry ( __field( void *, ctx ) @@ -552,7 +524,7 @@ TRACE_EVENT(io_uring_req_failed, ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; __entry->user_data = sqe->user_data; __entry->opcode = sqe->opcode; @@ -622,12 +594,67 @@ TRACE_EVENT(io_uring_cqe_overflow, __entry->ocqe = ocqe; ), - TP_printk("ring %p, user_data 0x%llx, res %d, flags %x, " + TP_printk("ring %p, user_data 0x%llx, res %d, cflags 0x%x, " "overflow_cqe %p", __entry->ctx, __entry->user_data, __entry->res, __entry->cflags, __entry->ocqe) ); +/* + * io_uring_task_work_run - ran task work + * + * @tctx: pointer to a io_uring_task + * @count: how many functions it ran + * @loops: how many loops it ran + * + */ +TRACE_EVENT(io_uring_task_work_run, + + TP_PROTO(void *tctx, unsigned int count, unsigned int loops), + + TP_ARGS(tctx, count, loops), + + TP_STRUCT__entry ( + __field( void *, tctx ) + __field( unsigned int, count ) + __field( unsigned int, loops ) + ), + + TP_fast_assign( + __entry->tctx = tctx; + __entry->count = count; + __entry->loops = loops; + ), + + TP_printk("tctx %p, count %u, loops %u", + __entry->tctx, __entry->count, __entry->loops) +); + +TRACE_EVENT(io_uring_short_write, + + TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got), + + TP_ARGS(ctx, fpos, wanted, got), + + TP_STRUCT__entry( + __field(void *, ctx) + __field(u64, fpos) + __field(u64, wanted) + __field(u64, got) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->fpos = fpos; + __entry->wanted = wanted; + __entry->got = got; + ), + + TP_printk("ring %p, fpos %lld, wanted %lld, got %lld", + __entry->ctx, __entry->fpos, + __entry->wanted, __entry->got) +); + #endif /* _TRACE_IO_URING_H */ /* This part must be outside protection */ diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h index e282ce02fa2d..6d1626e7a4ce 100644 --- a/include/trace/events/iocost.h +++ b/include/trace/events/iocost.h @@ -160,7 +160,7 @@ TRACE_EVENT(iocost_ioc_vrate_adj, TP_fast_assign( __assign_str(devname, ioc_name(ioc)); - __entry->old_vrate = atomic64_read(&ioc->vtime_rate);; + __entry->old_vrate = atomic64_read(&ioc->vtime_rate); __entry->new_vrate = new_vrate; __entry->busy_level = ioc->busy_level; __entry->read_missed_ppm = missed_ppm[READ]; diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index f76668305ac5..4cb51ace600d 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -13,11 +13,12 @@ DECLARE_EVENT_CLASS(kmem_alloc, TP_PROTO(unsigned long call_site, const void *ptr, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags), TP_STRUCT__entry( __field( unsigned long, call_site ) @@ -25,6 +26,7 @@ DECLARE_EVENT_CLASS(kmem_alloc, __field( size_t, bytes_req ) __field( size_t, bytes_alloc ) __field( unsigned long, gfp_flags ) + __field( bool, accounted ) ), TP_fast_assign( @@ -33,42 +35,47 @@ DECLARE_EVENT_CLASS(kmem_alloc, __entry->bytes_req = bytes_req; __entry->bytes_alloc = bytes_alloc; __entry->gfp_flags = (__force unsigned long)gfp_flags; + __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ? + ((gfp_flags & __GFP_ACCOUNT) || + (s && s->flags & SLAB_ACCOUNT)) : false; ), - TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", + TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s", (void *)__entry->call_site, __entry->ptr, __entry->bytes_req, __entry->bytes_alloc, - show_gfp_flags(__entry->gfp_flags)) + show_gfp_flags(__entry->gfp_flags), + __entry->accounted ? "true" : "false") ); DEFINE_EVENT(kmem_alloc, kmalloc, - TP_PROTO(unsigned long call_site, const void *ptr, + TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags) ); DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, - TP_PROTO(unsigned long call_site, const void *ptr, + TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags) ); DECLARE_EVENT_CLASS(kmem_alloc_node, TP_PROTO(unsigned long call_site, const void *ptr, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node), TP_STRUCT__entry( __field( unsigned long, call_site ) @@ -77,6 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __field( size_t, bytes_alloc ) __field( unsigned long, gfp_flags ) __field( int, node ) + __field( bool, accounted ) ), TP_fast_assign( @@ -86,33 +94,37 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __entry->bytes_alloc = bytes_alloc; __entry->gfp_flags = (__force unsigned long)gfp_flags; __entry->node = node; + __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ? + ((gfp_flags & __GFP_ACCOUNT) || + (s && s->flags & SLAB_ACCOUNT)) : false; ), - TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", + TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s", (void *)__entry->call_site, __entry->ptr, __entry->bytes_req, __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags), - __entry->node) + __entry->node, + __entry->accounted ? "true" : "false") ); DEFINE_EVENT(kmem_alloc_node, kmalloc_node, TP_PROTO(unsigned long call_site, const void *ptr, - size_t bytes_req, size_t bytes_alloc, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node) ); DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, TP_PROTO(unsigned long call_site, const void *ptr, - size_t bytes_req, size_t bytes_alloc, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node) ); TRACE_EVENT(kfree, diff --git a/include/trace/events/power.h b/include/trace/events/power.h index af5018aa9517..c708521e4ed5 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -500,6 +500,35 @@ DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_remove_request, TP_ARGS(name, type, new_value) ); + +TRACE_EVENT(guest_halt_poll_ns, + + TP_PROTO(bool grow, unsigned int new, unsigned int old), + + TP_ARGS(grow, new, old), + + TP_STRUCT__entry( + __field(bool, grow) + __field(unsigned int, new) + __field(unsigned int, old) + ), + + TP_fast_assign( + __entry->grow = grow; + __entry->new = new; + __entry->old = old; + ), + + TP_printk("halt_poll_ns %u (%s %u)", + __entry->new, + __entry->grow ? "grow" : "shrink", + __entry->old) +); + +#define trace_guest_halt_poll_ns_grow(new, old) \ + trace_guest_halt_poll_ns(true, new, old) +#define trace_guest_halt_poll_ns_shrink(new, old) \ + trace_guest_halt_poll_ns(false, new, old) #endif /* _TRACE_POWER_H */ /* This part must be outside protection */ diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h index cee4b2b64ae4..65016a767b7a 100644 --- a/include/trace/events/scmi.h +++ b/include/trace/events/scmi.h @@ -7,6 +7,31 @@ #include <linux/tracepoint.h> +TRACE_EVENT(scmi_fc_call, + TP_PROTO(u8 protocol_id, u8 msg_id, u32 res_id, u32 val1, u32 val2), + TP_ARGS(protocol_id, msg_id, res_id, val1, val2), + + TP_STRUCT__entry( + __field(u8, protocol_id) + __field(u8, msg_id) + __field(u32, res_id) + __field(u32, val1) + __field(u32, val2) + ), + + TP_fast_assign( + __entry->protocol_id = protocol_id; + __entry->msg_id = msg_id; + __entry->res_id = res_id; + __entry->val1 = val1; + __entry->val2 = val2; + ), + + TP_printk("[0x%02X]:[0x%02X]:[%08X]:%u:%u", + __entry->protocol_id, __entry->msg_id, + __entry->res_id, __entry->val1, __entry->val2) +); + TRACE_EVENT(scmi_xfer_begin, TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq, bool poll), @@ -112,6 +137,37 @@ TRACE_EVENT(scmi_rx_done, __entry->transfer_id, __entry->msg_id, __entry->protocol_id, __entry->seq, __entry->msg_type) ); + +TRACE_EVENT(scmi_msg_dump, + TP_PROTO(u8 protocol_id, u8 msg_id, unsigned char *tag, u16 seq, + int status, void *buf, size_t len), + TP_ARGS(protocol_id, msg_id, tag, seq, status, buf, len), + + TP_STRUCT__entry( + __field(u8, protocol_id) + __field(u8, msg_id) + __array(char, tag, 5) + __field(u16, seq) + __field(int, status) + __field(size_t, len) + __dynamic_array(unsigned char, cmd, len) + ), + + TP_fast_assign( + __entry->protocol_id = protocol_id; + __entry->msg_id = msg_id; + strscpy(__entry->tag, tag, 5); + __entry->seq = seq; + __entry->status = status; + __entry->len = len; + memcpy(__get_dynamic_array(cmd), buf, __entry->len); + ), + + TP_printk("pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s", + __entry->protocol_id, __entry->tag, __entry->msg_id, + __entry->seq, __entry->status, + __print_hex_str(__get_dynamic_array(cmd), __entry->len)) +); #endif /* _TRACE_SCMI_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index 12c315782766..777ee6cbe933 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -98,7 +98,7 @@ TRACE_EVENT(sock_exceed_buf_limit, TP_STRUCT__entry( __array(char, name, 32) - __field(long *, sysctl_mem) + __array(long, sysctl_mem, 3) __field(long, allocated) __field(int, sysctl_rmem) __field(int, rmem_alloc) @@ -110,7 +110,9 @@ TRACE_EVENT(sock_exceed_buf_limit, TP_fast_assign( strncpy(__entry->name, prot->name, 32); - __entry->sysctl_mem = prot->sysctl_mem; + __entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]); + __entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]); + __entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]); __entry->allocated = allocated; __entry->sysctl_rmem = sk_get_rmem0(sk, prot); __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h index 8a5f04888abd..e58bf3072f32 100644 --- a/include/trace/events/thermal.h +++ b/include/trace/events/thermal.h @@ -92,34 +92,22 @@ TRACE_EVENT(thermal_zone_trip, ); #ifdef CONFIG_CPU_THERMAL -TRACE_EVENT(thermal_power_cpu_get_power, - TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load, - size_t load_len, u32 dynamic_power), +TRACE_EVENT(thermal_power_cpu_get_power_simple, + TP_PROTO(int cpu, u32 power), - TP_ARGS(cpus, freq, load, load_len, dynamic_power), + TP_ARGS(cpu, power), TP_STRUCT__entry( - __bitmask(cpumask, num_possible_cpus()) - __field(unsigned long, freq ) - __dynamic_array(u32, load, load_len) - __field(size_t, load_len ) - __field(u32, dynamic_power ) + __field(int, cpu) + __field(u32, power) ), TP_fast_assign( - __assign_bitmask(cpumask, cpumask_bits(cpus), - num_possible_cpus()); - __entry->freq = freq; - memcpy(__get_dynamic_array(load), load, - load_len * sizeof(*load)); - __entry->load_len = load_len; - __entry->dynamic_power = dynamic_power; + __entry->cpu = cpu; + __entry->power = power; ), - TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d", - __get_bitmask(cpumask), __entry->freq, - __print_array(__get_dynamic_array(load), __entry->load_len, 4), - __entry->dynamic_power) + TP_printk("cpu=%d power=%u", __entry->cpu, __entry->power) ); TRACE_EVENT(thermal_power_cpu_limit, diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h index f13d37b60775..1ecdb911add8 100644 --- a/include/uapi/asm-generic/fcntl.h +++ b/include/uapi/asm-generic/fcntl.h @@ -192,6 +192,7 @@ struct f_owner_ex { #define F_LINUX_SPECIFIC_BASE 1024 +#ifndef HAVE_ARCH_STRUCT_FLOCK struct flock { short l_type; short l_whence; @@ -216,5 +217,6 @@ struct flock64 { __ARCH_FLOCK64_PAD #endif }; +#endif /* HAVE_ARCH_STRUCT_FLOCK */ #endif /* _ASM_GENERIC_FCNTL_H */ diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index f1972154a594..0980678d502d 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -1444,11 +1444,11 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) #define AMD_FMT_MOD_PIPE_MASK 0x7 #define AMD_FMT_MOD_SET(field, value) \ - ((uint64_t)(value) << AMD_FMT_MOD_##field##_SHIFT) + ((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT) #define AMD_FMT_MOD_GET(field, value) \ (((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK) #define AMD_FMT_MOD_CLEAR(field) \ - (~((uint64_t)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT)) + (~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT)) #if defined(__cplusplus) } diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index f4009dbdf62d..ef78e0e1a754 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5222,22 +5222,25 @@ union bpf_attr { * Return * Nothing. Always succeeds. * - * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset) + * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags) * Description * Read *len* bytes from *src* into *dst*, starting from *offset* * into *src*. + * *flags* is currently unused. * Return * 0 on success, -E2BIG if *offset* + *len* exceeds the length - * of *src*'s data, -EINVAL if *src* is an invalid dynptr. + * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if + * *flags* is not 0. * - * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len) + * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags) * Description * Write *len* bytes from *src* into *dst*, starting from *offset* * into *dst*. + * *flags* is currently unused. * Return * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* - * is a read-only dynptr. + * is a read-only dynptr or if *flags* is not 0. * * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len) * Description diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index f1f89132d60e..d8536d77fea1 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -83,12 +83,20 @@ #define FAN_MARK_FLUSH 0x00000080 /* FAN_MARK_FILESYSTEM is 0x00000100 */ #define FAN_MARK_EVICTABLE 0x00000200 +/* This bit is mutually exclusive with FAN_MARK_IGNORED_MASK bit */ +#define FAN_MARK_IGNORE 0x00000400 /* These are NOT bitwise flags. Both bits can be used togther. */ #define FAN_MARK_INODE 0x00000000 #define FAN_MARK_MOUNT 0x00000010 #define FAN_MARK_FILESYSTEM 0x00000100 +/* + * Convenience macro - FAN_MARK_IGNORE requires FAN_MARK_IGNORED_SURV_MODIFY + * for non-inode mark types. + */ +#define FAN_MARK_IGNORE_SURV (FAN_MARK_IGNORE | FAN_MARK_IGNORED_SURV_MODIFY) + /* Deprecated - do not use this in programs and do not add new flags here! */ #define FAN_ALL_MARK_FLAGS (FAN_MARK_ADD |\ FAN_MARK_REMOVE |\ diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index ef4257ab3026..2557eb7b0561 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -78,10 +78,13 @@ struct input_id { * Note that input core does not clamp reported values to the * [minimum, maximum] limits, such task is left to userspace. * - * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z) - * is reported in units per millimeter (units/mm), resolution - * for rotational axes (ABS_RX, ABS_RY, ABS_RZ) is reported - * in units per radian. + * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z, + * ABS_MT_POSITION_X, ABS_MT_POSITION_Y) is reported in units + * per millimeter (units/mm), resolution for rotational axes + * (ABS_RX, ABS_RY, ABS_RZ) is reported in units per radian. + * The resolution for the size axes (ABS_MT_TOUCH_MAJOR, + * ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MAJOR, ABS_MT_WIDTH_MINOR) + * is reported in units per millimeter (units/mm). * When INPUT_PROP_ACCELEROMETER is set the resolution changes. * The main axes (ABS_X, ABS_Y, ABS_Z) are then reported in * units per g (units/g) and in units per degree per second diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 53e7dae92e42..1463cfecb56b 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -10,6 +10,7 @@ #include <linux/fs.h> #include <linux/types.h> +#include <linux/time_types.h> /* * IO submission data structure (Submission Queue Entry) @@ -22,7 +23,10 @@ struct io_uring_sqe { union { __u64 off; /* offset into file */ __u64 addr2; - __u32 cmd_op; + struct { + __u32 cmd_op; + __u32 __pad1; + }; }; union { __u64 addr; /* pointer to buffer or iovecs */ @@ -47,6 +51,7 @@ struct io_uring_sqe { __u32 unlink_flags; __u32 hardlink_flags; __u32 xattr_flags; + __u32 msg_ring_flags; }; __u64 user_data; /* data to be passed back at completion time */ /* pack this to avoid bogus arm OABI complaints */ @@ -61,6 +66,10 @@ struct io_uring_sqe { union { __s32 splice_fd_in; __u32 file_index; + struct { + __u16 notification_idx; + __u16 addr_len; + }; }; union { struct { @@ -137,9 +146,12 @@ enum { * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN. */ #define IORING_SETUP_TASKRUN_FLAG (1U << 9) - #define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */ #define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */ +/* + * Only one task is allowed to submit requests + */ +#define IORING_SETUP_SINGLE_ISSUER (1U << 12) enum io_uring_op { IORING_OP_NOP, @@ -162,7 +174,8 @@ enum io_uring_op { IORING_OP_FALLOCATE, IORING_OP_OPENAT, IORING_OP_CLOSE, - IORING_OP_FILES_UPDATE, + IORING_OP_RSRC_UPDATE, + IORING_OP_FILES_UPDATE = IORING_OP_RSRC_UPDATE, IORING_OP_STATX, IORING_OP_READ, IORING_OP_WRITE, @@ -189,6 +202,7 @@ enum io_uring_op { IORING_OP_GETXATTR, IORING_OP_SOCKET, IORING_OP_URING_CMD, + IORING_OP_SENDZC_NOTIF, /* this goes last, obviously */ IORING_OP_LAST, @@ -210,6 +224,7 @@ enum io_uring_op { #define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5) #define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) #define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) + /* * sqe->splice_flags * extends splice(2) flags @@ -226,10 +241,13 @@ enum io_uring_op { * * IORING_POLL_UPDATE Update existing poll request, matching * sqe->addr as the old user_data field. + * + * IORING_POLL_LEVEL Level triggered poll. */ #define IORING_POLL_ADD_MULTI (1U << 0) #define IORING_POLL_UPDATE_EVENTS (1U << 1) #define IORING_POLL_UPDATE_USER_DATA (1U << 2) +#define IORING_POLL_ADD_LEVEL (1U << 3) /* * ASYNC_CANCEL flags. @@ -238,26 +256,66 @@ enum io_uring_op { * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the * request 'user_data' * IORING_ASYNC_CANCEL_ANY Match any request + * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor */ #define IORING_ASYNC_CANCEL_ALL (1U << 0) #define IORING_ASYNC_CANCEL_FD (1U << 1) #define IORING_ASYNC_CANCEL_ANY (1U << 2) +#define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3) /* - * send/sendmsg and recv/recvmsg flags (sqe->addr2) + * send/sendmsg and recv/recvmsg flags (sqe->ioprio) * * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send * or receive and arm poll if that yields an * -EAGAIN result, arm poll upfront and skip * the initial transfer attempt. + * + * IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if + * the handler will continue to report + * CQEs on behalf of the same SQE. + * + * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in + * the buf_index field. + * + * IORING_RECVSEND_NOTIF_FLUSH Flush a notification after a successful + * successful. Only for zerocopy sends. */ #define IORING_RECVSEND_POLL_FIRST (1U << 0) +#define IORING_RECV_MULTISHOT (1U << 1) +#define IORING_RECVSEND_FIXED_BUF (1U << 2) +#define IORING_RECVSEND_NOTIF_FLUSH (1U << 3) /* * accept flags stored in sqe->ioprio */ #define IORING_ACCEPT_MULTISHOT (1U << 0) + +/* + * IORING_OP_RSRC_UPDATE flags + */ +enum { + IORING_RSRC_UPDATE_FILES, + IORING_RSRC_UPDATE_NOTIF, +}; + +/* + * IORING_OP_MSG_RING command types, stored in sqe->addr + */ +enum { + IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */ + IORING_MSG_SEND_FD, /* send a registered fd to another ring */ +}; + +/* + * IORING_OP_MSG_RING flags (sqe->msg_ring_flags) + * + * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not + * applicable for IORING_MSG_DATA, obviously. + */ +#define IORING_MSG_RING_CQE_SKIP (1U << 0) + /* * IO completion data structure (Completion Queue Entry) */ @@ -417,6 +475,16 @@ enum { IORING_REGISTER_PBUF_RING = 22, IORING_UNREGISTER_PBUF_RING = 23, + /* sync cancelation API */ + IORING_REGISTER_SYNC_CANCEL = 24, + + /* register a range of fixed file slots for automatic slot allocation */ + IORING_REGISTER_FILE_ALLOC_RANGE = 25, + + /* zerocopy notification API */ + IORING_REGISTER_NOTIFIERS = 26, + IORING_UNREGISTER_NOTIFIERS = 27, + /* this goes last */ IORING_REGISTER_LAST }; @@ -463,6 +531,19 @@ struct io_uring_rsrc_update2 { __u32 resv2; }; +struct io_uring_notification_slot { + __u64 tag; + __u64 resv[3]; +}; + +struct io_uring_notification_register { + __u32 nr_slots; + __u32 resv; + __u64 resv2; + __u64 data; + __u64 resv3; +}; + /* Skip updating fd indexes set to this value in the fd table */ #define IORING_REGISTER_FILES_SKIP (-2) @@ -480,7 +561,7 @@ struct io_uring_probe { __u8 ops_len; /* length of ops[] array below */ __u16 resv; __u32 resv2[3]; - struct io_uring_probe_op ops[0]; + struct io_uring_probe_op ops[]; }; struct io_uring_restriction { @@ -552,4 +633,32 @@ struct io_uring_getevents_arg { __u64 ts; }; +/* + * Argument for IORING_REGISTER_SYNC_CANCEL + */ +struct io_uring_sync_cancel_reg { + __u64 addr; + __s32 fd; + __u32 flags; + struct __kernel_timespec timeout; + __u64 pad[4]; +}; + +/* + * Argument for IORING_REGISTER_FILE_ALLOC_RANGE + * The range is specified as [off, off + len) + */ +struct io_uring_file_index_range { + __u32 off; + __u32 len; + __u64 resv; +}; + +struct io_uring_recvmsg_out { + __u32 namelen; + __u32 controllen; + __u32 payloadlen; + __u32 flags; +}; + #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5088bd9f1922..860f867c50c0 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2083,7 +2083,8 @@ struct kvm_stats_header { #define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT) #define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT) #define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT) -#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES +#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN #define KVM_STATS_BASE_SHIFT 8 #define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT) diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 921963589904..dfe19bf13f4c 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -2,16 +2,17 @@ #ifndef _UAPI_MPTCP_H #define _UAPI_MPTCP_H +#ifndef __KERNEL__ +#include <netinet/in.h> /* for sockaddr_in and sockaddr_in6 */ +#include <sys/socket.h> /* for struct sockaddr */ +#endif + #include <linux/const.h> #include <linux/types.h> #include <linux/in.h> /* for sockaddr_in */ #include <linux/in6.h> /* for sockaddr_in6 */ #include <linux/socket.h> /* for sockaddr_storage and sa_family */ -#ifndef __KERNEL__ -#include <sys/socket.h> /* for struct sockaddr */ -#endif - #define MPTCP_SUBFLOW_FLAG_MCAP_REM _BITUL(0) #define MPTCP_SUBFLOW_FLAG_MCAP_LOC _BITUL(1) #define MPTCP_SUBFLOW_FLAG_JOIN_REM _BITUL(2) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index d37629dbad72..0474ee362151 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -301,6 +301,7 @@ enum { * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 id; } && PERF_FORMAT_ID + * { u64 lost; } && PERF_FORMAT_LOST * } && !PERF_FORMAT_GROUP * * { u64 nr; @@ -308,6 +309,7 @@ enum { * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 value; * { u64 id; } && PERF_FORMAT_ID + * { u64 lost; } && PERF_FORMAT_LOST * } cntr[nr]; * } && PERF_FORMAT_GROUP * }; @@ -317,8 +319,9 @@ enum perf_event_read_format { PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, PERF_FORMAT_ID = 1U << 2, PERF_FORMAT_GROUP = 1U << 3, + PERF_FORMAT_LOST = 1U << 4, - PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ + PERF_FORMAT_MAX = 1U << 5, /* non-ABI */ }; #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h index 9d0f06bfbac3..68aeae2addec 100644 --- a/include/uapi/linux/tty.h +++ b/include/uapi/linux/tty.h @@ -38,8 +38,9 @@ #define N_NULL 27 /* Null ldisc used for error handling */ #define N_MCTP 28 /* MCTP-over-serial */ #define N_DEVELOPMENT 29 /* Manual out-of-tree testing */ +#define N_CAN327 30 /* ELM327 based OBD-II interfaces */ /* Always the newest line discipline + 1 */ -#define NR_LDISCS 30 +#define NR_LDISCS 31 #endif /* _UAPI_LINUX_TTY_H */ diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h index e1126a74882a..eff166fdd81b 100644 --- a/include/video/of_display_timing.h +++ b/include/video/of_display_timing.h @@ -8,6 +8,8 @@ #ifndef __LINUX_OF_DISPLAY_TIMING_H #define __LINUX_OF_DISPLAY_TIMING_H +#include <linux/errno.h> + struct device_node; struct display_timing; struct display_timings; |