diff options
Diffstat (limited to 'include/linux')
281 files changed, 5807 insertions, 2150 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 28c3fb2bef0d..e93059f71c71 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -24,6 +24,7 @@ struct irq_domain_ops; #define _LINUX #endif #include <acpi/acpi.h> +#include <acpi/acpi_numa.h> #ifdef CONFIG_ACPI @@ -35,7 +36,6 @@ struct irq_domain_ops; #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> -#include <acpi/acpi_numa.h> #include <acpi/acpi_io.h> #include <asm/acpi.h> @@ -237,11 +237,6 @@ acpi_table_parse_cedt(enum acpi_cedt_type id, int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); -static inline bool acpi_gicc_is_usable(struct acpi_madt_generic_interrupt *gicc) -{ - return gicc->flags & ACPI_MADT_ENABLED; -} - #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else @@ -304,6 +299,8 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int acpi_unmap_cpu(int cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ +acpi_handle acpi_get_processor_handle(int cpu); + #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); #endif @@ -576,6 +573,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000 #define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000 +#define OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT 0x00080000 #define OSC_SB_PRM_SUPPORT 0x00200000 #define OSC_SB_FFH_OPR_SUPPORT 0x00400000 @@ -761,6 +759,7 @@ static inline u64 acpi_arch_get_root_pointer(void) } #endif +int acpi_get_local_u64_address(acpi_handle handle, u64 *addr); int acpi_get_local_address(acpi_handle handle, u32 *addr); const char *acpi_get_subsystem_id(acpi_handle handle); @@ -777,8 +776,6 @@ const char *acpi_get_subsystem_id(acpi_handle handle); #define acpi_dev_uid_match(adev, uid2) (adev && false) #define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false) -#include <acpi/acpi_numa.h> - struct fwnode_handle; static inline bool acpi_dev_found(const char *hid) @@ -1076,6 +1073,11 @@ static inline bool acpi_sleep_state_supported(u8 sleep_state) return false; } +static inline acpi_handle acpi_get_processor_handle(int cpu) +{ + return NULL; +} + #endif /* !CONFIG_ACPI */ extern void arch_post_acpi_subsys_init(void); diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index abd24016a900..8c61ccd161ba 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -122,7 +122,7 @@ static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag "alloc_tag was not cleared (got tag for %s:%u)\n", ref->ct->filename, ref->ct->lineno); - WARN_ONCE(!tag, "current->alloc_tag not set"); + WARN_ONCE(!tag, "current->alloc_tag not set\n"); } static inline void alloc_tag_sub_check(union codetag_ref *ref) diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index c82d56768101..c6d18f50f671 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -212,6 +212,9 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; } extern const struct bus_type ffa_bus_type; +/* The FF-A 1.0 partition structure lacks the uuid[4] */ +#define FFA_1_0_PARTITON_INFO_SZ (8) + /* FFA transport related */ struct ffa_partition_info { u16 id; diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h index de21d9d24a95..1539bbd263d2 100644 --- a/include/linux/auxiliary_bus.h +++ b/include/linux/auxiliary_bus.h @@ -58,6 +58,10 @@ * in * @name: Match name found by the auxiliary device driver, * @id: unique identitier if multiple devices of the same name are exported, + * @sysfs: embedded struct which hold all sysfs related fields, + * @sysfs.irqs: irqs xarray contains irq indices which are used by the device, + * @sysfs.lock: Synchronize irq sysfs creation, + * @sysfs.irq_dir_exists: whether "irqs" directory exists, * * An auxiliary_device represents a part of its parent device's functionality. * It is given a name that, combined with the registering drivers @@ -139,6 +143,11 @@ struct auxiliary_device { struct device dev; const char *name; u32 id; + struct { + struct xarray irqs; + struct mutex lock; /* Synchronize irq sysfs creation */ + bool irq_dir_exists; + } sysfs; }; /** @@ -212,8 +221,24 @@ int auxiliary_device_init(struct auxiliary_device *auxdev); int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname); #define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME) +#ifdef CONFIG_SYSFS +int auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq); +void auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, + int irq); +#else /* CONFIG_SYSFS */ +static inline int +auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq) +{ + return 0; +} + +static inline void +auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, int irq) {} +#endif + static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev) { + mutex_destroy(&auxdev->sysfs.lock); put_device(&auxdev->dev); } diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 19a1c0e22629..ea9c1bc8148e 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -209,15 +209,19 @@ struct backlight_properties { * attribute: /sys/class/backlight/<backlight>/bl_power * When the power property is updated update_status() is called. * - * The possible values are: (0: full on, 1 to 3: power saving - * modes; 4: full off), see FB_BLANK_XXX. + * The possible values are: (0: full on, 4: full off), see + * BACKLIGHT_POWER constants. * - * When the backlight device is enabled @power is set - * to FB_BLANK_UNBLANK. When the backlight device is disabled - * @power is set to FB_BLANK_POWERDOWN. + * When the backlight device is enabled, @power is set to + * BACKLIGHT_POWER_ON. When the backlight device is disabled, + * @power is set to BACKLIGHT_POWER_OFF. */ int power; +#define BACKLIGHT_POWER_ON (0) +#define BACKLIGHT_POWER_OFF (4) +#define BACKLIGHT_POWER_REDUCED (1) // deprecated; don't use in new code + /** * @type: The type of backlight supported. * @@ -346,7 +350,7 @@ static inline int backlight_enable(struct backlight_device *bd) if (!bd) return 0; - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; bd->props.state &= ~BL_CORE_FBBLANK; return backlight_update_status(bd); @@ -361,7 +365,7 @@ static inline int backlight_disable(struct backlight_device *bd) if (!bd) return 0; - bd->props.power = FB_BLANK_POWERDOWN; + bd->props.power = BACKLIGHT_POWER_OFF; bd->props.state |= BL_CORE_FBBLANK; return backlight_update_status(bd); @@ -380,7 +384,7 @@ static inline int backlight_disable(struct backlight_device *bd) */ static inline bool backlight_is_blank(const struct backlight_device *bd) { - return bd->props.power != FB_BLANK_UNBLANK || + return bd->props.power != BACKLIGHT_POWER_ON || bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); } diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 70f97f685bff..e6c00e860951 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -19,13 +19,13 @@ struct linux_binprm { #ifdef CONFIG_MMU struct vm_area_struct *vma; unsigned long vma_pages; + unsigned long argmin; /* rlimit marker for copy_strings() */ #else # define MAX_ARG_PAGES 32 struct page *page[MAX_ARG_PAGES]; #endif struct mm_struct *mm; unsigned long p; /* current top of mem */ - unsigned long argmin; /* rlimit marker for copy_strings() */ unsigned int /* Should an execfd be passed to userspace? */ have_execfd:1, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4d0d4b83bc74..b8196e219ac2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -268,6 +268,7 @@ static inline dev_t disk_devt(struct gendisk *disk) return MKDEV(disk->major, disk->first_minor); } +/* blk_validate_limits() validates bsize, so drivers don't usually need to */ static inline int blk_validate_block_size(unsigned long bsize) { if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) @@ -318,9 +319,6 @@ typedef unsigned int __bitwise blk_features_t; /* is a zoned device */ #define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10)) -/* supports Zone Reset All */ -#define BLK_FEAT_ZONE_RESETALL ((__force blk_features_t)(1u << 11)) - /* supports PCI(e) p2p requests */ #define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12)) @@ -618,8 +616,6 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL)) #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) -#define blk_queue_zone_resetall(q) \ - ((q)->limits.features & BLK_FEAT_ZONE_RESETALL) #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA) #ifdef CONFIG_BLK_RQ_ALLOC_TIME @@ -1100,6 +1096,7 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ +#define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */ extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, @@ -1620,8 +1617,7 @@ int sync_blockdev(struct block_device *bdev); int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); int sync_blockdev_nowait(struct block_device *bdev); void sync_bdevs(bool wait); -void bdev_statx(struct inode *backing_inode, struct kstat *stat, - u32 request_mask); +void bdev_statx(struct path *, struct kstat *, u32); void printk_all_partitions(void); int __init early_lookup_bdev(const char *pathname, dev_t *dev); #else @@ -1639,7 +1635,7 @@ static inline int sync_blockdev_nowait(struct block_device *bdev) static inline void sync_bdevs(bool wait) { } -static inline void bdev_statx(struct inode *backing_inode, struct kstat *stat, +static inline void bdev_statx(struct path *path, struct kstat *stat, u32 request_mask) { } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5e694a308081..3b94ec161e8c 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -275,7 +275,7 @@ struct bpf_map { u32 btf_value_type_id; u32 btf_vmlinux_value_type_id; struct btf *btf; -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG struct obj_cgroup *objcg; #endif char name[BPF_OBJ_NAME_LEN]; @@ -1612,6 +1612,7 @@ struct bpf_link_ops { struct bpf_link_info *info); int (*update_map)(struct bpf_link *link, struct bpf_map *new_map, struct bpf_map *old_map); + __poll_t (*poll)(struct file *file, struct poll_table_struct *pts); }; struct bpf_tramp_link { @@ -1730,9 +1731,9 @@ struct bpf_struct_ops { int (*init_member)(const struct btf_type *t, const struct btf_member *member, void *kdata, const void *udata); - int (*reg)(void *kdata); - void (*unreg)(void *kdata); - int (*update)(void *kdata, void *old_kdata); + int (*reg)(void *kdata, struct bpf_link *link); + void (*unreg)(void *kdata, struct bpf_link *link); + int (*update)(void *kdata, void *old_kdata, struct bpf_link *link); int (*validate)(void *kdata); void *cfi_stubs; struct module *owner; @@ -2252,7 +2253,7 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array); -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, int node); void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); @@ -2261,6 +2262,10 @@ void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, gfp_t flags); #else +/* + * These specialized allocators have to be macros for their allocations to be + * accounted separately (to have separate alloc_tag). + */ #define bpf_map_kmalloc_node(_map, _size, _flags, _node) \ kmalloc_node(_size, _flags, _node) #define bpf_map_kzalloc(_map, _size, _flags) \ @@ -2333,6 +2338,7 @@ int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); int bpf_link_settle(struct bpf_link_primer *primer); void bpf_link_cleanup(struct bpf_link_primer *primer); void bpf_link_inc(struct bpf_link *link); +struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link); void bpf_link_put(struct bpf_link *link); int bpf_link_new_fd(struct bpf_link *link); struct bpf_link *bpf_link_get_from_fd(u32 ufd); @@ -2492,7 +2498,7 @@ struct sk_buff; struct bpf_dtab_netdev; struct bpf_cpu_map_entry; -void __dev_flush(void); +void __dev_flush(struct list_head *flush_list); int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, struct net_device *dev_rx); int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, @@ -2505,7 +2511,7 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, struct bpf_prog *xdp_prog, struct bpf_map *map, bool exclude_ingress); -void __cpu_map_flush(void); +void __cpu_map_flush(struct list_head *flush_list); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, struct net_device *dev_rx); int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, @@ -2642,8 +2648,6 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); -bool dev_check_flush(void); -bool cpu_map_check_flush(void); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -2704,6 +2708,11 @@ static inline void bpf_link_inc(struct bpf_link *link) { } +static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) +{ + return NULL; +} + static inline void bpf_link_put(struct bpf_link *link) { } @@ -2731,7 +2740,7 @@ static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd) return ERR_PTR(-EOPNOTSUPP); } -static inline void __dev_flush(void) +static inline void __dev_flush(struct list_head *flush_list) { } @@ -2777,7 +2786,7 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, return 0; } -static inline void __cpu_map_flush(void) +static inline void __cpu_map_flush(struct list_head *flush_list) { } @@ -2926,8 +2935,7 @@ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) return ret; } -void __bpf_free_used_btfs(struct bpf_prog_aux *aux, - struct btf_mod_pair *used_btfs, u32 len); +void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len); static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) @@ -3258,8 +3266,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size); -int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, - struct bpf_dynptr_kern *ptr); +int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, + struct bpf_dynptr *ptr); #else static inline bool bpf_sock_common_is_valid_access(int off, int size, enum bpf_access_type type, @@ -3281,8 +3289,8 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, { return 0; } -static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, - struct bpf_dynptr_kern *ptr) +static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, + struct bpf_dynptr *ptr) { return -EOPNOTSUPP; } diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e4070fb02b11..6503c85b10a3 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -73,7 +73,10 @@ enum bpf_iter_state { struct bpf_reg_state { /* Ordering of fields matters. See states_equal() */ enum bpf_reg_type type; - /* Fixed part of pointer offset, pointer types only */ + /* + * Fixed part of pointer offset, pointer types only. + * Or constant delta between "linked" scalars with the same ID. + */ s32 off; union { /* valid when type == PTR_TO_PACKET */ @@ -167,6 +170,13 @@ struct bpf_reg_state { * Similarly to dynptrs, we use ID to track "belonging" of a reference * to a specific instance of bpf_iter. */ + /* + * Upper bit of ID is used to remember relationship between "linked" + * registers. Example: + * r1 = r2; both will have r1->id == r2->id == N + * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10 + */ +#define BPF_ADD_CONST (1U << 31) u32 id; /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned * from a pointer-cast helper, bpf_sk_fullsock() and @@ -846,7 +856,7 @@ static inline u32 type_flag(u32 type) /* only use after check_attach_btf_id() */ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog) { - return prog->type == BPF_PROG_TYPE_EXT ? + return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ? prog->aux->dst_prog->type : prog->type; } diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 1394ba302367..028b3e00378e 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -271,12 +271,100 @@ #define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ #define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ +/* BroadR-Reach LRE Registers. */ +#define MII_BCM54XX_LRECR 0x00 /* LRE Control Register */ +#define MII_BCM54XX_LRESR 0x01 /* LRE Status Register */ +#define MII_BCM54XX_LREPHYSID1 0x02 /* LRE PHYS ID 1 */ +#define MII_BCM54XX_LREPHYSID2 0x03 /* LRE PHYS ID 2 */ +#define MII_BCM54XX_LREANAA 0x04 /* LDS Auto-Negotiation Advertised Ability */ +#define MII_BCM54XX_LREANAC 0x05 /* LDS Auto-Negotiation Advertised Control */ +#define MII_BCM54XX_LREANPT 0x06 /* LDS Ability Next Page Transmit */ +#define MII_BCM54XX_LRELPA 0x07 /* LDS Link Partner Ability */ +#define MII_BCM54XX_LRELPNPM 0x08 /* LDS Link Partner Next Page Message */ +#define MII_BCM54XX_LRELPNPC 0x09 /* LDS Link Partner Next Page Control */ +#define MII_BCM54XX_LRELDSE 0x0a /* LDS Expansion Register */ +#define MII_BCM54XX_LREES 0x0f /* LRE Extended Status */ + +/* LRE control register. */ +#define LRECR_RESET 0x8000 /* Reset to default state */ +#define LRECR_LOOPBACK 0x4000 /* Internal Loopback */ +#define LRECR_LDSRES 0x2000 /* Restart LDS Process */ +#define LRECR_LDSEN 0x1000 /* LDS Enable */ +#define LRECR_PDOWN 0x0800 /* Enable low power state */ +#define LRECR_ISOLATE 0x0400 /* Isolate data paths from MII */ +#define LRECR_SPEED100 0x0200 /* Select 100 Mbps */ +#define LRECR_SPEED10 0x0000 /* Select 10 Mbps */ +#define LRECR_4PAIRS 0x0020 /* Select 4 Pairs */ +#define LRECR_2PAIRS 0x0010 /* Select 2 Pairs */ +#define LRECR_1PAIR 0x0000 /* Select 1 Pair */ +#define LRECR_MASTER 0x0008 /* Force Master when LDS disabled */ +#define LRECR_SLAVE 0x0000 /* Force Slave when LDS disabled */ + +/* LRE status register. */ +#define LRESR_100_1PAIR 0x2000 /* Can do 100Mbps 1 Pair */ +#define LRESR_100_4PAIR 0x1000 /* Can do 100Mbps 4 Pairs */ +#define LRESR_100_2PAIR 0x0800 /* Can do 100Mbps 2 Pairs */ +#define LRESR_10_2PAIR 0x0400 /* Can do 10Mbps 2 Pairs */ +#define LRESR_10_1PAIR 0x0200 /* Can do 10Mbps 1 Pair */ +#define LRESR_ESTATEN 0x0100 /* Extended Status in R15 */ +#define LRESR_RESV 0x0080 /* Unused... */ +#define LRESR_MFPS 0x0040 /* Can suppress Management Frames Preamble */ +#define LRESR_LDSCOMPLETE 0x0020 /* LDS Auto-negotiation complete */ +#define LRESR_8023 0x0010 /* Has IEEE 802.3 Support */ +#define LRESR_LDSABILITY 0x0008 /* LDS auto-negotiation capable */ +#define LRESR_LSTATUS 0x0004 /* Link status */ +#define LRESR_JCD 0x0002 /* Jabber detected */ +#define LRESR_ERCAP 0x0001 /* Ext-reg capability */ + +/* LDS Auto-Negotiation Advertised Ability. */ +#define LREANAA_PAUSE_ASYM 0x8000 /* Can pause asymmetrically */ +#define LREANAA_PAUSE 0x4000 /* Can pause */ +#define LREANAA_100_1PAIR 0x0020 /* Can do 100Mbps 1 Pair */ +#define LREANAA_100_4PAIR 0x0010 /* Can do 100Mbps 4 Pair */ +#define LREANAA_100_2PAIR 0x0008 /* Can do 100Mbps 2 Pair */ +#define LREANAA_10_2PAIR 0x0004 /* Can do 10Mbps 2 Pair */ +#define LREANAA_10_1PAIR 0x0002 /* Can do 10Mbps 1 Pair */ + +#define LRE_ADVERTISE_FULL (LREANAA_100_1PAIR | LREANAA_100_4PAIR | \ + LREANAA_100_2PAIR | LREANAA_10_2PAIR | \ + LREANAA_10_1PAIR) + +#define LRE_ADVERTISE_ALL LRE_ADVERTISE_FULL + +/* LDS Link Partner Ability. */ +#define LRELPA_PAUSE_ASYM 0x8000 /* Supports asymmetric pause */ +#define LRELPA_PAUSE 0x4000 /* Supports pause capability */ +#define LRELPA_100_1PAIR 0x0020 /* 100Mbps 1 Pair capable */ +#define LRELPA_100_4PAIR 0x0010 /* 100Mbps 4 Pair capable */ +#define LRELPA_100_2PAIR 0x0008 /* 100Mbps 2 Pair capable */ +#define LRELPA_10_2PAIR 0x0004 /* 10Mbps 2 Pair capable */ +#define LRELPA_10_1PAIR 0x0002 /* 10Mbps 1 Pair capable */ + +/* LDS Expansion register. */ +#define LDSE_DOWNGRADE 0x8000 /* Can do LDS Speed Downgrade */ +#define LDSE_MASTER 0x4000 /* Master / Slave */ +#define LDSE_PAIRS_MASK 0x3000 /* Pair Count Mask */ +#define LDSE_PAIRS_SHIFT 12 +#define LDSE_4PAIRS (2 << LDSE_PAIRS_SHIFT) /* 4 Pairs Connection */ +#define LDSE_2PAIRS (1 << LDSE_PAIRS_SHIFT) /* 2 Pairs Connection */ +#define LDSE_1PAIR (0 << LDSE_PAIRS_SHIFT) /* 1 Pair Connection */ +#define LDSE_CABLEN_MASK 0x0FFF /* Cable Length Mask */ + /* BCM54810 Registers */ #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90) #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) #define BCM54810_SHD_CLK_CTL 0x3 #define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) +/* BCM54811 Registers */ +#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL (MII_BCM54XX_EXP_SEL_ER + 0x9A) +/* Access Control Override Enable */ +#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_EN BIT(15) +/* Access Control Override Value */ +#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_OVERRIDE_VAL BIT(14) +/* Access Control Value */ +#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_VAL BIT(13) + /* BCM54612E Registers */ #define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34) #define BCM54612E_LED4_CLK125OUT_EN (1 << 1) diff --git a/include/linux/btf.h b/include/linux/btf.h index 7c3e40c3295e..cffb43133c68 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -140,6 +140,7 @@ extern const struct file_operations btf_fops; const char *btf_get_name(const struct btf *btf); void btf_get(struct btf *btf); void btf_put(struct btf *btf); +const struct btf_header *btf_header(const struct btf *btf); int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz); struct btf *btf_get_by_fd(int fd); int btf_get_info_by_fd(const struct btf *btf, @@ -212,8 +213,10 @@ int btf_get_fd_by_id(u32 id); u32 btf_obj_id(const struct btf *btf); bool btf_is_kernel(const struct btf *btf); bool btf_is_module(const struct btf *btf); +bool btf_is_vmlinux(const struct btf *btf); struct module *btf_try_get_module(const struct btf *btf); u32 btf_nr_types(const struct btf *btf); +struct btf *btf_base_btf(const struct btf *btf); bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); @@ -339,6 +342,11 @@ static inline u8 btf_int_offset(const struct btf_type *t) return BTF_INT_OFFSET(*(u32 *)(t + 1)); } +static inline __u8 btf_int_bits(const struct btf_type *t) +{ + return BTF_INT_BITS(*(__u32 *)(t + 1)); +} + static inline bool btf_type_is_scalar(const struct btf_type *t) { return btf_type_is_int(t) || btf_type_is_enum(t); @@ -478,6 +486,11 @@ static inline struct btf_param *btf_params(const struct btf_type *t) return (struct btf_param *)(t + 1); } +static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t) +{ + return (struct btf_decl_tag *)(t + 1); +} + static inline int btf_id_cmp_func(const void *a, const void *b) { const int *pa = a, *pb = b; @@ -515,9 +528,38 @@ static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf * } #endif +enum btf_field_iter_kind { + BTF_FIELD_ITER_IDS, + BTF_FIELD_ITER_STRS, +}; + +struct btf_field_desc { + /* once-per-type offsets */ + int t_off_cnt, t_offs[2]; + /* member struct size, or zero, if no members */ + int m_sz; + /* repeated per-member offsets */ + int m_off_cnt, m_offs[1]; +}; + +struct btf_field_iter { + struct btf_field_desc desc; + void *p; + int m_idx; + int off_idx; + int vlen; +}; + #ifdef CONFIG_BPF_SYSCALL const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); +void btf_set_base_btf(struct btf *btf, const struct btf *base_btf); +int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **map_ids); +int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, + enum btf_field_iter_kind iter_kind); +__u32 *btf_field_iter_next(struct btf_field_iter *it); + const char *btf_name_by_offset(const struct btf *btf, u32 offset); +const char *btf_str_by_offset(const struct btf *btf, u32 offset); struct btf *btf_parse_vmlinux(void); struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog); u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id, @@ -531,6 +573,7 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id); int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt, struct module *owner); struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id); +bool btf_is_projection_of(const char *pname, const char *tname); bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, enum bpf_prog_type prog_type, int arg); @@ -543,6 +586,28 @@ static inline const struct btf_type *btf_type_by_id(const struct btf *btf, { return NULL; } + +static inline void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) +{ +} + +static inline int btf_relocate(void *log, struct btf *btf, const struct btf *base_btf, + __u32 **map_ids) +{ + return -EOPNOTSUPP; +} + +static inline int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, + enum btf_field_iter_kind iter_kind) +{ + return -EOPNOTSUPP; +} + +static inline __u32 *btf_field_iter_next(struct btf_field_iter *it) +{ + return NULL; +} + static inline const char *btf_name_by_offset(const struct btf *btf, u32 offset) { diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index e022e40b099e..14acf1bbe0ce 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -53,7 +53,7 @@ typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); * filesystem and block layers. Nowadays the basic I/O unit * is the bio, and buffer_heads are used for extracting block * mappings (via a get_block_t call), for tracking state within - * a page (via a page_mapping) and for wrapping bio submission + * a folio (via a folio_mapping) and for wrapping bio submission * for backward compatibility reasons (e.g. submit_bh). */ struct buffer_head { diff --git a/include/linux/bvec.h b/include/linux/bvec.h index bd1e361b351c..f41c7f0ef91e 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -280,4 +280,18 @@ static inline void *bvec_virt(struct bio_vec *bvec) return page_address(bvec->bv_page) + bvec->bv_offset; } +/** + * bvec_phys - return the physical address for a bvec + * @bvec: bvec to return the physical address for + */ +static inline phys_addr_t bvec_phys(const struct bio_vec *bvec) +{ + /* + * Note this open codes page_to_phys because page_to_phys is defined in + * <asm/io.h>, which we don't want to pull in here. If it ever moves to + * a sensible place we should start using it. + */ + return PFN_PHYS(page_to_pfn(bvec->bv_page)) + bvec->bv_offset; +} + #endif /* __LINUX_BVEC_H */ diff --git a/include/linux/cache.h b/include/linux/cache.h index 0ecb17bb6883..ca2a05682a54 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -13,6 +13,32 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES #endif +/** + * SMP_CACHE_ALIGN - align a value to the L2 cacheline size + * @x: value to align + * + * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes, + * this needs to be accounted. + * + * Return: aligned value. + */ +#ifndef SMP_CACHE_ALIGN +#define SMP_CACHE_ALIGN(x) ALIGN(x, SMP_CACHE_BYTES) +#endif + +/* + * ``__aligned_largest`` aligns a field to the value most optimal for the + * target architecture to perform memory operations. Get the actual value + * to be able to use it anywhere else. + */ +#ifndef __LARGEST_ALIGN +#define __LARGEST_ALIGN sizeof(struct { long x; } __aligned_largest) +#endif + +#ifndef LARGEST_ALIGN +#define LARGEST_ALIGN(x) ALIGN(x, __LARGEST_ALIGN) +#endif + /* * __read_mostly is used to keep rarely changing variables out of frequently * updated cachelines. Its use should be reserved for data that is used @@ -95,6 +121,39 @@ __u8 __cacheline_group_end__##GROUP[0] #endif +/** + * __cacheline_group_begin_aligned - declare an aligned group start + * @GROUP: name of the group + * @...: optional group alignment + * + * The following block inside a struct: + * + * __cacheline_group_begin_aligned(grp); + * field a; + * field b; + * __cacheline_group_end_aligned(grp); + * + * will always be aligned to either the specified alignment or + * ``SMP_CACHE_BYTES``. + */ +#define __cacheline_group_begin_aligned(GROUP, ...) \ + __cacheline_group_begin(GROUP) \ + __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES) + +/** + * __cacheline_group_end_aligned - declare an aligned group end + * @GROUP: name of the group + * @...: optional alignment (same as was in __cacheline_group_begin_aligned()) + * + * Note that the end marker is aligned to sizeof(long) to allow more precise + * size assertion. It also declares a padding at the end to avoid next field + * falling into this cacheline. + */ +#define __cacheline_group_end_aligned(GROUP, ...) \ + __cacheline_group_end(GROUP) __aligned(sizeof(long)); \ + struct { } __cacheline_group_pad__##GROUP \ + __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES) + #ifndef CACHELINE_ASSERT_GROUP_MEMBER #define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \ BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 2cb15fe4fe12..108060612bb8 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -3,7 +3,8 @@ #define _LINUX_CACHEINFO_H #include <linux/bitops.h> -#include <linux/cpumask.h> +#include <linux/cpuhplock.h> +#include <linux/cpumask_types.h> #include <linux/smp.h> struct device_node; @@ -113,23 +114,37 @@ int acpi_get_cache_info(unsigned int cpu, const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); /* - * Get the id of the cache associated with @cpu at level @level. + * Get the cacheinfo structure for the cache associated with @cpu at + * level @level. * cpuhp lock must be held. */ -static inline int get_cpu_cacheinfo_id(int cpu, int level) +static inline struct cacheinfo *get_cpu_cacheinfo_level(int cpu, int level) { struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); int i; + lockdep_assert_cpus_held(); + for (i = 0; i < ci->num_leaves; i++) { if (ci->info_list[i].level == level) { if (ci->info_list[i].attributes & CACHE_ID) - return ci->info_list[i].id; - return -1; + return &ci->info_list[i]; + return NULL; } } - return -1; + return NULL; +} + +/* + * Get the id of the cache associated with @cpu at level @level. + * cpuhp lock must be held. + */ +static inline int get_cpu_cacheinfo_id(int cpu, int level) +{ + struct cacheinfo *ci = get_cpu_cacheinfo_level(cpu, level); + + return ci ? ci->id : -1; } #ifdef CONFIG_ARM64 diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 1b92aed49363..23492213ea35 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -186,7 +186,7 @@ void close_candev(struct net_device *dev); int can_change_mtu(struct net_device *dev, int new_mtu); int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd); int can_ethtool_op_get_ts_info_hwts(struct net_device *dev, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int register_candev(struct net_device *dev); void unregister_candev(struct net_device *dev); diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h index 60693a145894..caa4b4430634 100644 --- a/include/linux/cc_platform.h +++ b/include/linux/cc_platform.h @@ -82,16 +82,6 @@ enum cc_attr { CC_ATTR_GUEST_SEV_SNP, /** - * @CC_ATTR_HOTPLUG_DISABLED: Hotplug is not supported or disabled. - * - * The platform/OS is running as a guest/virtual machine does not - * support CPU hotplug feature. - * - * Examples include TDX Guest. - */ - CC_ATTR_HOTPLUG_DISABLED, - - /** * @CC_ATTR_HOST_SEV_SNP: AMD SNP enabled on the host. * * The host kernel is running with the necessary features diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index ea48c861cd36..293af7f8a694 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -119,7 +119,12 @@ enum { /* * Enable hugetlb accounting for the memory controller. */ - CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19), + CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19), + + /* + * Enable legacy local pids.events. + */ + CGRP_ROOT_PIDS_LOCAL_EVENTS = (1 << 20), }; /* cftype->flags */ @@ -676,9 +681,7 @@ struct cftype { __poll_t (*poll)(struct kernfs_open_file *of, struct poll_table_struct *pt); -#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key lockdep_key; -#endif }; /* diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 2150ca60394b..c60ba0ab1462 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -10,7 +10,6 @@ */ #include <linux/sched.h> -#include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/rculist.h> #include <linux/cgroupstats.h> diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index c2d09bc4f976..d9e613803df1 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_GUARDS_H -#define __LINUX_GUARDS_H +#ifndef _LINUX_CLEANUP_H +#define _LINUX_CLEANUP_H #include <linux/compiler.h> @@ -63,17 +63,20 @@ #define __free(_name) __cleanup(__free_##_name) -#define __get_and_null_ptr(p) \ - ({ __auto_type __ptr = &(p); \ - __auto_type __val = *__ptr; \ - *__ptr = NULL; __val; }) +#define __get_and_null(p, nullvalue) \ + ({ \ + __auto_type __ptr = &(p); \ + __auto_type __val = *__ptr; \ + *__ptr = nullvalue; \ + __val; \ + }) static inline __must_check const volatile void * __must_check_fn(const volatile void *val) { return val; } #define no_free_ptr(p) \ - ((typeof(p)) __must_check_fn(__get_and_null_ptr(p))) + ((typeof(p)) __must_check_fn(__get_and_null(p, NULL))) #define return_ptr(p) return no_free_ptr(p) @@ -247,4 +250,4 @@ __DEFINE_LOCK_GUARD_0(_name, _lock) { return class_##_name##_lock_ptr(_T); } -#endif /* __LINUX_GUARDS_H */ +#endif /* _LINUX_CLEANUP_H */ diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 9aac31d856f3..b0df28ddd394 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -12,7 +12,7 @@ #ifdef CONFIG_GENERIC_CLOCKEVENTS # include <linux/clocksource.h> -# include <linux/cpumask.h> +# include <linux/cpumask_types.h> # include <linux/ktime.h> # include <linux/notifier.h> diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 0ad8b550bb4b..d35b677b08fe 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -21,6 +21,7 @@ #include <asm/div64.h> #include <asm/io.h> +struct clocksource_base; struct clocksource; struct module; @@ -50,6 +51,7 @@ struct module; * multiplication * @name: Pointer to clocksource name * @list: List head for registration (internal) + * @freq_khz: Clocksource frequency in khz. * @rating: Rating value for selection (higher is better) * To avoid rating inflation the following * list should give you a guide as to how @@ -70,6 +72,8 @@ struct module; * validate the clocksource from which the snapshot was * taken. * @flags: Flags describing special properties + * @base: Hardware abstraction for clock on which a clocksource + * is based * @enable: Optional function to enable the clocksource * @disable: Optional function to disable the clocksource * @suspend: Optional suspend function for the clocksource @@ -107,10 +111,12 @@ struct clocksource { u64 max_cycles; const char *name; struct list_head list; + u32 freq_khz; int rating; enum clocksource_ids id; enum vdso_clock_mode vdso_clock_mode; unsigned long flags; + struct clocksource_base *base; int (*enable)(struct clocksource *cs); void (*disable)(struct clocksource *cs); @@ -306,4 +312,25 @@ static inline unsigned int clocksource_get_max_watchdog_retry(void) void clocksource_verify_percpu(struct clocksource *cs); +/** + * struct clocksource_base - hardware abstraction for clock on which a clocksource + * is based + * @id: Defaults to CSID_GENERIC. The id value is used for conversion + * functions which require that the current clocksource is based + * on a clocksource_base with a particular ID in certain snapshot + * functions to allow callers to validate the clocksource from + * which the snapshot was taken. + * @freq_khz: Nominal frequency of the base clock in kHz + * @offset: Offset between the base clock and the clocksource + * @numerator: Numerator of the clock ratio between base clock and the clocksource + * @denominator: Denominator of the clock ratio between base clock and the clocksource + */ +struct clocksource_base { + enum clocksource_ids id; + u32 freq_khz; + u64 offset; + u32 numerator; + u32 denominator; +}; + #endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h index a4fa3436940c..2bb4d8c2f1b0 100644 --- a/include/linux/clocksource_ids.h +++ b/include/linux/clocksource_ids.h @@ -9,6 +9,7 @@ enum clocksource_ids { CSID_X86_TSC_EARLY, CSID_X86_TSC, CSID_X86_KVM_CLK, + CSID_X86_ART, CSID_MAX, }; diff --git a/include/linux/closure.h b/include/linux/closure.h index 59b8c06b11ff..2af44427107d 100644 --- a/include/linux/closure.h +++ b/include/linux/closure.h @@ -159,6 +159,7 @@ struct closure { #ifdef CONFIG_DEBUG_CLOSURES #define CLOSURE_MAGIC_DEAD 0xc054dead #define CLOSURE_MAGIC_ALIVE 0xc054a11e +#define CLOSURE_MAGIC_STACK 0xc05451cc unsigned int magic; struct list_head all; @@ -323,12 +324,18 @@ static inline void closure_init_stack(struct closure *cl) { memset(cl, 0, sizeof(struct closure)); atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); +#ifdef CONFIG_DEBUG_CLOSURES + cl->magic = CLOSURE_MAGIC_STACK; +#endif } static inline void closure_init_stack_release(struct closure *cl) { memset(cl, 0, sizeof(struct closure)); atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); +#ifdef CONFIG_DEBUG_CLOSURES + cl->magic = CLOSURE_MAGIC_STACK; +#endif } /** diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index aff92b1d284f..f805adaa316e 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -64,26 +64,6 @@ __builtin_unreachable(); \ } while (0) -/* - * GCC 'asm goto' with outputs miscompiles certain code sequences: - * - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921 - * - * Work around it via the same compiler barrier quirk that we used - * to use for the old 'asm goto' workaround. - * - * Also, always mark such 'asm goto' statements as volatile: all - * asm goto statements are supposed to be volatile as per the - * documentation, but some versions of gcc didn't actually do - * that for asms with outputs: - * - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619 - */ -#ifdef CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND -#define asm_goto_output(x...) \ - do { asm volatile goto(x); asm (""); } while (0) -#endif - #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 8c252e073bd8..2594553bb30b 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -194,16 +194,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, * This data_race() macro is useful for situations in which data races * should be forgiven. One example is diagnostic code that accesses * shared variables but is not a part of the core synchronization design. + * For example, if accesses to a given variable are protected by a lock, + * except for diagnostic code, then the accesses under the lock should + * be plain C-language accesses and those in the diagnostic code should + * use data_race(). This way, KCSAN will complain if buggy lockless + * accesses to that variable are introduced, even if the buggy accesses + * are protected by READ_ONCE() or WRITE_ONCE(). * * This macro *does not* affect normal code generation, but is a hint - * to tooling that data races here are to be ignored. + * to tooling that data races here are to be ignored. If the access must + * be atomic *and* KCSAN should ignore the access, use both data_race() + * and READ_ONCE(), for example, data_race(READ_ONCE(x)). */ #define data_race(expr) \ ({ \ - __unqual_scalar_typeof(({ expr; })) __v = ({ \ - __kcsan_disable_current(); \ - expr; \ - }); \ + __kcsan_disable_current(); \ + __auto_type __v = (expr); \ __kcsan_enable_current(); \ __v; \ }) diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 2606711adb18..c771e9d0d0b9 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -216,6 +216,9 @@ struct configfs_group_operations { struct config_group *(*make_group)(struct config_group *group, const char *name); void (*disconnect_notify)(struct config_group *group, struct config_item *item); void (*drop_item)(struct config_group *group, struct config_item *item); + bool (*is_visible)(struct config_item *item, struct configfs_attribute *attr, int n); + bool (*is_bin_visible)(struct config_item *item, struct configfs_bin_attribute *attr, + int n); }; struct configfs_subsystem { diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 861c3bfc5f17..bdcec1732445 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -16,8 +16,8 @@ #include <linux/node.h> #include <linux/compiler.h> -#include <linux/cpumask.h> #include <linux/cpuhotplug.h> +#include <linux/cpuhplock.h> #include <linux/cpu_smt.h> struct device; @@ -132,38 +132,6 @@ static inline int add_cpu(unsigned int cpu) { return 0;} #endif /* CONFIG_SMP */ extern const struct bus_type cpu_subsys; -extern int lockdep_is_cpus_held(void); - -#ifdef CONFIG_HOTPLUG_CPU -extern void cpus_write_lock(void); -extern void cpus_write_unlock(void); -extern void cpus_read_lock(void); -extern void cpus_read_unlock(void); -extern int cpus_read_trylock(void); -extern void lockdep_assert_cpus_held(void); -extern void cpu_hotplug_disable(void); -extern void cpu_hotplug_enable(void); -void clear_tasks_mm_cpumask(int cpu); -int remove_cpu(unsigned int cpu); -int cpu_device_down(struct device *dev); -extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu); - -#else /* CONFIG_HOTPLUG_CPU */ - -static inline void cpus_write_lock(void) { } -static inline void cpus_write_unlock(void) { } -static inline void cpus_read_lock(void) { } -static inline void cpus_read_unlock(void) { } -static inline int cpus_read_trylock(void) { return true; } -static inline void lockdep_assert_cpus_held(void) { } -static inline void cpu_hotplug_disable(void) { } -static inline void cpu_hotplug_enable(void) { } -static inline int remove_cpu(unsigned int cpu) { return -EPERM; } -static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } -#endif /* !CONFIG_HOTPLUG_CPU */ - -DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock()) - #ifdef CONFIG_PM_SLEEP_SMP extern int freeze_secondary_cpus(int primary); extern void thaw_secondary_cpus(void); diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h index a3bdc8a98f2c..2c774fb3c091 100644 --- a/include/linux/cpu_cooling.h +++ b/include/linux/cpu_cooling.h @@ -15,7 +15,6 @@ #include <linux/of.h> #include <linux/thermal.h> -#include <linux/cpumask.h> struct cpufreq_policy; diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h index cae324d10965..20b5729903d7 100644 --- a/include/linux/cpu_rmap.h +++ b/include/linux/cpu_rmap.h @@ -7,7 +7,7 @@ * Copyright 2011 Solarflare Communications Inc. */ -#include <linux/cpumask.h> +#include <linux/cpumask_types.h> #include <linux/gfp.h> #include <linux/slab.h> #include <linux/kref.h> diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 20f7e98ee8af..d4d2f4d1d7cb 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -396,7 +396,7 @@ struct cpufreq_driver { int (*online)(struct cpufreq_policy *policy); int (*offline)(struct cpufreq_policy *policy); - int (*exit)(struct cpufreq_policy *policy); + void (*exit)(struct cpufreq_policy *policy); int (*suspend)(struct cpufreq_policy *policy); int (*resume)(struct cpufreq_policy *policy); @@ -785,7 +785,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf); #ifdef CONFIG_CPU_FREQ int cpufreq_boost_trigger_state(int state); -int cpufreq_boost_enabled(void); +bool cpufreq_boost_enabled(void); int cpufreq_enable_boost_support(void); bool policy_has_boost_freq(struct cpufreq_policy *policy); @@ -1164,9 +1164,9 @@ static inline int cpufreq_boost_trigger_state(int state) { return 0; } -static inline int cpufreq_boost_enabled(void) +static inline bool cpufreq_boost_enabled(void) { - return 0; + return false; } static inline int cpufreq_enable_boost_support(void) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 7a5785f405b6..89f5c34ce4df 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -27,7 +27,7 @@ * startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE * during a CPU online operation. During a CPU offline operation the * installed teardown callbacks are invoked in the reverse order from - * CPU_ONLINE - 1 down to CPUHP_OFFLINE. + * CPUHP_ONLINE - 1 down to CPUHP_OFFLINE. * * The state space has three sections: PREPARE, STARTING and ONLINE. * @@ -171,6 +171,7 @@ enum cpuhp_state { CPUHP_AP_ARMADA_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_ARC_TIMER_STARTING, + CPUHP_AP_REALTEK_TIMER_STARTING, CPUHP_AP_RISCV_TIMER_STARTING, CPUHP_AP_CLINT_TIMER_STARTING, CPUHP_AP_CSKY_TIMER_STARTING, diff --git a/include/linux/cpuhplock.h b/include/linux/cpuhplock.h new file mode 100644 index 000000000000..f7aa20f62b87 --- /dev/null +++ b/include/linux/cpuhplock.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/cpuhplock.h - CPU hotplug locking + * + * Locking functions for CPU hotplug. + */ +#ifndef _LINUX_CPUHPLOCK_H_ +#define _LINUX_CPUHPLOCK_H_ + +#include <linux/cleanup.h> +#include <linux/errno.h> + +struct device; + +extern int lockdep_is_cpus_held(void); + +#ifdef CONFIG_HOTPLUG_CPU +void cpus_write_lock(void); +void cpus_write_unlock(void); +void cpus_read_lock(void); +void cpus_read_unlock(void); +int cpus_read_trylock(void); +void lockdep_assert_cpus_held(void); +void cpu_hotplug_disable_offlining(void); +void cpu_hotplug_disable(void); +void cpu_hotplug_enable(void); +void clear_tasks_mm_cpumask(int cpu); +int remove_cpu(unsigned int cpu); +int cpu_device_down(struct device *dev); +void smp_shutdown_nonboot_cpus(unsigned int primary_cpu); + +#else /* CONFIG_HOTPLUG_CPU */ + +static inline void cpus_write_lock(void) { } +static inline void cpus_write_unlock(void) { } +static inline void cpus_read_lock(void) { } +static inline void cpus_read_unlock(void) { } +static inline int cpus_read_trylock(void) { return true; } +static inline void lockdep_assert_cpus_held(void) { } +static inline void cpu_hotplug_disable_offlining(void) { } +static inline void cpu_hotplug_disable(void) { } +static inline void cpu_hotplug_enable(void) { } +static inline int remove_cpu(unsigned int cpu) { return -EPERM; } +static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } +#endif /* !CONFIG_HOTPLUG_CPU */ + +DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock()) + +#endif /* _LINUX_CPUHPLOCK_H_ */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 23686bed441d..099e8b32dd68 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -9,25 +9,13 @@ */ #include <linux/cleanup.h> #include <linux/kernel.h> -#include <linux/threads.h> #include <linux/bitmap.h> +#include <linux/cpumask_types.h> #include <linux/atomic.h> #include <linux/bug.h> #include <linux/gfp_types.h> #include <linux/numa.h> -/* Don't assign or return these: may not be this big! */ -typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; - -/** - * cpumask_bits - get the bits in a cpumask - * @maskp: the struct cpumask * - * - * You should only assume nr_cpu_ids bits of this mask are valid. This is - * a macro so it's const-correct. - */ -#define cpumask_bits(maskp) ((maskp)->bits) - /** * cpumask_pr_args - printf args to output a cpumask * @maskp: cpumask to be printed @@ -93,6 +81,7 @@ static inline void set_nr_cpu_ids(unsigned int nr) * * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable * cpu_present_mask - has bit 'cpu' set iff cpu is populated + * cpu_enabled_mask - has bit 'cpu' set iff cpu can be brought online * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler * cpu_active_mask - has bit 'cpu' set iff cpu available to migration * @@ -125,11 +114,13 @@ static inline void set_nr_cpu_ids(unsigned int nr) extern struct cpumask __cpu_possible_mask; extern struct cpumask __cpu_online_mask; +extern struct cpumask __cpu_enabled_mask; extern struct cpumask __cpu_present_mask; extern struct cpumask __cpu_active_mask; extern struct cpumask __cpu_dying_mask; #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) +#define cpu_enabled_mask ((const struct cpumask *)&__cpu_enabled_mask) #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) #define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask) @@ -922,48 +913,7 @@ static inline unsigned int cpumask_size(void) return bitmap_size(large_cpumask_bits); } -/* - * cpumask_var_t: struct cpumask for stack usage. - * - * Oh, the wicked games we play! In order to make kernel coding a - * little more difficult, we typedef cpumask_var_t to an array or a - * pointer: doing &mask on an array is a noop, so it still works. - * - * i.e. - * cpumask_var_t tmpmask; - * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) - * return -ENOMEM; - * - * ... use 'tmpmask' like a normal struct cpumask * ... - * - * free_cpumask_var(tmpmask); - * - * - * However, one notable exception is there. alloc_cpumask_var() allocates - * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has - * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t. - * - * cpumask_var_t tmpmask; - * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) - * return -ENOMEM; - * - * var = *tmpmask; - * - * This code makes NR_CPUS length memcopy and brings to a memory corruption. - * cpumask_copy() provide safe copy functionality. - * - * Note that there is another evil here: If you define a cpumask_var_t - * as a percpu variable then the way to obtain the address of the cpumask - * structure differently influences what this_cpu_* operation needs to be - * used. Please use this_cpu_cpumask_var_t in those cases. The direct use - * of this_cpu_ptr() or this_cpu_read() will lead to failures when the - * other type of cpumask_var_t implementation is configured. - * - * Please also note that __cpumask_var_read_mostly can be used to declare - * a cpumask_var_t variable itself (not its content) as read mostly. - */ #ifdef CONFIG_CPUMASK_OFFSTACK -typedef struct cpumask *cpumask_var_t; #define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) #define __cpumask_var_read_mostly __read_mostly @@ -1010,7 +960,6 @@ static inline bool cpumask_available(cpumask_var_t mask) } #else -typedef struct cpumask cpumask_var_t[1]; #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) #define __cpumask_var_read_mostly @@ -1075,6 +1024,7 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); #else #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) +#define for_each_enabled_cpu(cpu) for_each_cpu((cpu), cpu_enabled_mask) #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) #endif @@ -1093,6 +1043,15 @@ set_cpu_possible(unsigned int cpu, bool possible) } static inline void +set_cpu_enabled(unsigned int cpu, bool can_be_onlined) +{ + if (can_be_onlined) + cpumask_set_cpu(cpu, &__cpu_enabled_mask); + else + cpumask_clear_cpu(cpu, &__cpu_enabled_mask); +} + +static inline void set_cpu_present(unsigned int cpu, bool present) { if (present) @@ -1173,6 +1132,7 @@ static __always_inline unsigned int num_online_cpus(void) return raw_atomic_read(&__num_online_cpus); } #define num_possible_cpus() cpumask_weight(cpu_possible_mask) +#define num_enabled_cpus() cpumask_weight(cpu_enabled_mask) #define num_present_cpus() cpumask_weight(cpu_present_mask) #define num_active_cpus() cpumask_weight(cpu_active_mask) @@ -1181,6 +1141,11 @@ static inline bool cpu_online(unsigned int cpu) return cpumask_test_cpu(cpu, cpu_online_mask); } +static inline bool cpu_enabled(unsigned int cpu) +{ + return cpumask_test_cpu(cpu, cpu_enabled_mask); +} + static inline bool cpu_possible(unsigned int cpu) { return cpumask_test_cpu(cpu, cpu_possible_mask); @@ -1205,6 +1170,7 @@ static inline bool cpu_dying(unsigned int cpu) #define num_online_cpus() 1U #define num_possible_cpus() 1U +#define num_enabled_cpus() 1U #define num_present_cpus() 1U #define num_active_cpus() 1U @@ -1218,6 +1184,11 @@ static inline bool cpu_possible(unsigned int cpu) return cpu == 0; } +static inline bool cpu_enabled(unsigned int cpu) +{ + return cpu == 0; +} + static inline bool cpu_present(unsigned int cpu) { return cpu == 0; diff --git a/include/linux/cpumask_types.h b/include/linux/cpumask_types.h new file mode 100644 index 000000000000..461ed1b6bcdb --- /dev/null +++ b/include/linux/cpumask_types.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_CPUMASK_TYPES_H +#define __LINUX_CPUMASK_TYPES_H + +#include <linux/bitops.h> +#include <linux/threads.h> + +/* Don't assign or return these: may not be this big! */ +typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; + +/** + * cpumask_bits - get the bits in a cpumask + * @maskp: the struct cpumask * + * + * You should only assume nr_cpu_ids bits of this mask are valid. This is + * a macro so it's const-correct. + */ +#define cpumask_bits(maskp) ((maskp)->bits) + +/* + * cpumask_var_t: struct cpumask for stack usage. + * + * Oh, the wicked games we play! In order to make kernel coding a + * little more difficult, we typedef cpumask_var_t to an array or a + * pointer: doing &mask on an array is a noop, so it still works. + * + * i.e. + * cpumask_var_t tmpmask; + * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) + * return -ENOMEM; + * + * ... use 'tmpmask' like a normal struct cpumask * ... + * + * free_cpumask_var(tmpmask); + * + * + * However, one notable exception is there. alloc_cpumask_var() allocates + * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has + * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t. + * + * cpumask_var_t tmpmask; + * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) + * return -ENOMEM; + * + * var = *tmpmask; + * + * This code makes NR_CPUS length memcopy and brings to a memory corruption. + * cpumask_copy() provide safe copy functionality. + * + * Note that there is another evil here: If you define a cpumask_var_t + * as a percpu variable then the way to obtain the address of the cpumask + * structure differently influences what this_cpu_* operation needs to be + * used. Please use this_cpu_cpumask_var_t in those cases. The direct use + * of this_cpu_ptr() or this_cpu_read() will lead to failures when the + * other type of cpumask_var_t implementation is configured. + * + * Please also note that __cpumask_var_read_mostly can be used to declare + * a cpumask_var_t variable itself (not its content) as read mostly. + */ +#ifdef CONFIG_CPUMASK_OFFSTACK +typedef struct cpumask *cpumask_var_t; +#else +typedef struct cpumask cpumask_var_t[1]; +#endif /* CONFIG_CPUMASK_OFFSTACK */ + +#endif /* __LINUX_CPUMASK_TYPES_H */ diff --git a/include/linux/crc32.h b/include/linux/crc32.h index 9e8a032c1788..87f788c0d607 100644 --- a/include/linux/crc32.h +++ b/include/linux/crc32.h @@ -9,7 +9,9 @@ #include <linux/bitrev.h> u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len); +u32 __pure crc32_le_base(u32 crc, unsigned char const *p, size_t len); u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len); +u32 __pure crc32_be_base(u32 crc, unsigned char const *p, size_t len); /** * crc32_le_combine - Combine two crc32 check values into one. For two @@ -37,6 +39,7 @@ static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2) } u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len); +u32 __pure __crc32c_le_base(u32 crc, unsigned char const *p, size_t len); /** * __crc32c_le_combine - Combine two crc32c check values into one. For two diff --git a/include/linux/damon.h b/include/linux/damon.h index f7da65e1ac04..27c546bfc6d4 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -105,6 +105,8 @@ struct damon_target { * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE. * @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists. * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists. + * @DAMOS_MIGRATE_HOT: Migrate the regions prioritizing warmer regions. + * @DAMOS_MIGRATE_COLD: Migrate the regions prioritizing colder regions. * @DAMOS_STAT: Do nothing but count the stat. * @NR_DAMOS_ACTIONS: Total number of DAMOS actions * @@ -122,6 +124,8 @@ enum damos_action { DAMOS_NOHUGEPAGE, DAMOS_LRU_PRIO, DAMOS_LRU_DEPRIO, + DAMOS_MIGRATE_HOT, + DAMOS_MIGRATE_COLD, DAMOS_STAT, /* Do nothing but only record the stat */ NR_DAMOS_ACTIONS, }; @@ -374,6 +378,7 @@ struct damos_access_pattern { * @apply_interval_us: The time between applying the @action. * @quota: Control the aggressiveness of this scheme. * @wmarks: Watermarks for automated (in)activation of this scheme. + * @target_nid: Destination node if @action is "migrate_{hot,cold}". * @filters: Additional set of &struct damos_filter for &action. * @stat: Statistics of this scheme. * @list: List head for siblings. @@ -389,6 +394,10 @@ struct damos_access_pattern { * monitoring context are inactive, DAMON stops monitoring either, and just * repeatedly checks the watermarks. * + * @target_nid is used to set the migration target node for migrate_hot or + * migrate_cold actions, which means it's only meaningful when @action is either + * "migrate_hot" or "migrate_cold". + * * Before applying the &action to a memory region, &struct damon_operations * implementation could check pages of the region and skip &action to respect * &filters @@ -410,6 +419,9 @@ struct damos { /* public: */ struct damos_quota quota; struct damos_watermarks wmarks; + union { + int target_nid; + }; struct list_head filters; struct damos_stat stat; struct list_head list; @@ -726,9 +738,11 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, enum damos_action action, unsigned long apply_interval_us, struct damos_quota *quota, - struct damos_watermarks *wmarks); + struct damos_watermarks *wmarks, + int target_nid); void damon_add_scheme(struct damon_ctx *ctx, struct damos *s); void damon_destroy_scheme(struct damos *s); +int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src); struct damon_target *damon_new_target(void); void damon_add_target(struct damon_ctx *ctx, struct damon_target *t); @@ -742,6 +756,7 @@ void damon_destroy_ctx(struct damon_ctx *ctx); int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs); void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, ssize_t nr_schemes); +int damon_commit_ctx(struct damon_ctx *old_ctx, struct damon_ctx *new_ctx); int damon_nr_running_ctxs(void); bool damon_is_registered_ops(enum damon_ops_id id); int damon_register_ops(struct damon_operations *ops); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index bf53e3894aae..bff956f7b2b9 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -71,7 +71,7 @@ extern const struct qstr dotdot_name; # define DNAME_INLINE_LEN 40 /* 192 bytes */ #else # ifdef CONFIG_SMP -# define DNAME_INLINE_LEN 40 /* 128 bytes */ +# define DNAME_INLINE_LEN 36 /* 128 bytes */ # else # define DNAME_INLINE_LEN 44 /* 128 bytes */ # endif @@ -89,13 +89,18 @@ struct dentry { struct inode *d_inode; /* Where the name belongs to - NULL is * negative */ unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ + /* --- cacheline 1 boundary (64 bytes) was 32 bytes ago --- */ /* Ref lookup also touches following */ - struct lockref d_lockref; /* per-dentry lock and refcount */ const struct dentry_operations *d_op; struct super_block *d_sb; /* The root of the dentry tree */ unsigned long d_time; /* used by d_revalidate */ void *d_fsdata; /* fs-specific data */ + /* --- cacheline 2 boundary (128 bytes) --- */ + struct lockref d_lockref; /* per-dentry lock and refcount + * keep separate from RCU lookup area if + * possible! + */ union { struct list_head d_lru; /* LRU list */ @@ -278,6 +283,8 @@ static inline unsigned d_count(const struct dentry *dentry) return dentry->d_lockref.count; } +ino_t d_parent_ino(struct dentry *dentry); + /* * helper function for dentry_operations.d_dname() members */ diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h index ae80a303c216..ca32b5bb28eb 100644 --- a/include/linux/dev_printk.h +++ b/include/linux/dev_printk.h @@ -277,4 +277,12 @@ do { \ __printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...); +/* Simple helper for dev_err_probe() when ERR_PTR() is to be returned. */ +#define dev_err_ptr_probe(dev, ___err, fmt, ...) \ + ERR_PTR(dev_err_probe(dev, ___err, fmt, ##__VA_ARGS__)) + +/* Simple helper for dev_err_probe() when ERR_CAST() is to be returned. */ +#define dev_err_cast_probe(dev, ___err_ptr, fmt, ...) \ + ERR_PTR(dev_err_probe(dev, PTR_ERR(___err_ptr), fmt, ##__VA_ARGS__)) + #endif /* _DEVICE_PRINTK_H_ */ diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h index c8f7eb6cc191..377892604ff4 100644 --- a/include/linux/devcoredump.h +++ b/include/linux/devcoredump.h @@ -12,6 +12,9 @@ #include <linux/scatterlist.h> #include <linux/slab.h> +/* if data isn't read by userspace after 5 minutes then delete it */ +#define DEVCD_TIMEOUT (HZ * 60 * 5) + /* * _devcd_free_sgtable - free all the memory of the given scatterlist table * (i.e. both pages and scatterlist instances) @@ -50,16 +53,17 @@ static inline void _devcd_free_sgtable(struct scatterlist *table) kfree(delete_iter); } - #ifdef CONFIG_DEV_COREDUMP void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp); -void dev_coredumpm(struct device *dev, struct module *owner, - void *data, size_t datalen, gfp_t gfp, - ssize_t (*read)(char *buffer, loff_t offset, size_t count, - void *data, size_t datalen), - void (*free)(void *data)); +void dev_coredumpm_timeout(struct device *dev, struct module *owner, + void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, + size_t count, void *data, + size_t datalen), + void (*free)(void *data), + unsigned long timeout); void dev_coredumpsg(struct device *dev, struct scatterlist *table, size_t datalen, gfp_t gfp); @@ -73,11 +77,13 @@ static inline void dev_coredumpv(struct device *dev, void *data, } static inline void -dev_coredumpm(struct device *dev, struct module *owner, - void *data, size_t datalen, gfp_t gfp, - ssize_t (*read)(char *buffer, loff_t offset, size_t count, - void *data, size_t datalen), - void (*free)(void *data)) +dev_coredumpm_timeout(struct device *dev, struct module *owner, + void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, + size_t count, void *data, + size_t datalen), + void (*free)(void *data), + unsigned long timeout) { free(data); } @@ -92,4 +98,29 @@ static inline void dev_coredump_put(struct device *dev) } #endif /* CONFIG_DEV_COREDUMP */ +/** + * dev_coredumpm - create device coredump with read/free methods + * @dev: the struct device for the crashed device + * @owner: the module that contains the read/free functions, use %THIS_MODULE + * @data: data cookie for the @read/@free functions + * @datalen: length of the data + * @gfp: allocation flags + * @read: function to read from the given buffer + * @free: function to free the given buffer + * + * Creates a new device coredump for the given device. If a previous one hasn't + * been read yet, the new coredump is discarded. The data lifetime is determined + * by the device coredump framework and when it is no longer needed the @free + * function will be called to free the data. + */ +static inline void dev_coredumpm(struct device *dev, struct module *owner, + void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen), + void (*free)(void *data)) +{ + dev_coredumpm_timeout(dev, owner, data, datalen, gfp, read, free, + DEVCD_TIMEOUT); +} + #endif /* __DEVCOREDUMP_H */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 82b2195efaca..53ca3a913d06 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -180,6 +180,11 @@ int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, void dm_put_device(struct dm_target *ti, struct dm_dev *d); /* + * Helper function for getting devices + */ +int dm_devt_from_path(const char *path, dev_t *dev_p); + +/* * Information about a target type */ @@ -358,22 +363,17 @@ struct dm_target { bool discards_supported:1; /* - * Set if this target requires that discards be split on - * 'max_discard_sectors' boundaries. - */ - bool max_discard_granularity:1; - - /* - * Set if this target requires that secure_erases be split on - * 'max_secure_erase_sectors' boundaries. + * Automatically set by dm-core if this target supports + * REQ_OP_ZONE_RESET_ALL. Otherwise, this operation will be emulated + * using REQ_OP_ZONE_RESET. Target drivers must not set this manually. */ - bool max_secure_erase_granularity:1; + bool zone_reset_all_supported:1; /* - * Set if this target requires that write_zeroes be split on - * 'max_write_zeroes_sectors' boundaries. + * Set if this target requires that discards be split on + * 'max_discard_sectors' boundaries. */ - bool max_write_zeroes_granularity:1; + bool max_discard_granularity:1; /* * Set if we need to limit the number of in-flight bios when swapping. @@ -397,6 +397,27 @@ struct dm_target { * bio_set_dev(). NOTE: ideally a target should _not_ need this. */ bool needs_bio_set_dev:1; + + /* + * Set if the target supports flush optimization. If all the targets in + * a table have flush_bypasses_map set, the dm core will not send + * flushes to the targets via a ->map method. It will iterate over + * dm_table->devices and send flushes to the devices directly. This + * optimization reduces the number of flushes being sent when multiple + * targets in a table use the same underlying device. + * + * This optimization may be enabled on targets that just pass the + * flushes to the underlying devices without performing any other + * actions on the flush request. Currently, dm-linear and dm-stripe + * support it. + */ + bool flush_bypasses_map:1; + + /* + * Set if the target calls bio_integrity_alloc on bios received + * in the map method. + */ + bool mempool_needs_integrity:1; }; void *dm_per_bio_data(struct bio *bio, size_t data_size); diff --git a/include/linux/dim.h b/include/linux/dim.h index f343bc9aa2ec..1b581ff25a15 100644 --- a/include/linux/dim.h +++ b/include/linux/dim.h @@ -10,6 +10,15 @@ #include <linux/types.h> #include <linux/workqueue.h> +struct net_device; + +/* Number of DIM profiles and period mode. */ +#define NET_DIM_PARAMS_NUM_PROFILES 5 +#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256 +#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128 +#define NET_DIM_DEF_PROFILE_CQE 1 +#define NET_DIM_DEF_PROFILE_EQE 1 + /* * Number of events between DIM iterations. * Causes a moderation of the algorithm run. @@ -38,12 +47,45 @@ * @pkts: CQ packet counter suggestion (by DIM) * @comps: Completion counter * @cq_period_mode: CQ period count mode (from CQE/EQE) + * @rcu: for asynchronous kfree_rcu */ struct dim_cq_moder { u16 usec; u16 pkts; u16 comps; u8 cq_period_mode; + struct rcu_head rcu; +}; + +#define DIM_PROFILE_RX BIT(0) /* support rx profile modification */ +#define DIM_PROFILE_TX BIT(1) /* support tx profile modification */ + +#define DIM_COALESCE_USEC BIT(0) /* support usec field modification */ +#define DIM_COALESCE_PKTS BIT(1) /* support pkts field modification */ +#define DIM_COALESCE_COMPS BIT(2) /* support comps field modification */ + +/** + * struct dim_irq_moder - Structure for irq moderation information. + * Used to collect irq moderation related information. + * + * @profile_flags: DIM_PROFILE_* + * @coal_flags: DIM_COALESCE_* for Rx and Tx + * @dim_rx_mode: Rx DIM period count mode: CQE or EQE + * @dim_tx_mode: Tx DIM period count mode: CQE or EQE + * @rx_profile: DIM profile list for Rx + * @tx_profile: DIM profile list for Tx + * @rx_dim_work: Rx DIM worker scheduled by net_dim() + * @tx_dim_work: Tx DIM worker scheduled by net_dim() + */ +struct dim_irq_moder { + u8 profile_flags; + u8 coal_flags; + u8 dim_rx_mode; + u8 dim_tx_mode; + struct dim_cq_moder __rcu *rx_profile; + struct dim_cq_moder __rcu *tx_profile; + void (*rx_dim_work)(struct work_struct *work); + void (*tx_dim_work)(struct work_struct *work); }; /** @@ -192,6 +234,77 @@ enum dim_step_result { }; /** + * net_dim_init_irq_moder - collect information to initialize irq moderation + * @dev: target network device + * @profile_flags: Rx or Tx profile modification capability + * @coal_flags: irq moderation params flags + * @rx_mode: CQ period mode for Rx + * @tx_mode: CQ period mode for Tx + * @rx_dim_work: Rx worker called after dim decision + * @tx_dim_work: Tx worker called after dim decision + * + * Return: 0 on success or a negative error code. + */ +int net_dim_init_irq_moder(struct net_device *dev, u8 profile_flags, + u8 coal_flags, u8 rx_mode, u8 tx_mode, + void (*rx_dim_work)(struct work_struct *work), + void (*tx_dim_work)(struct work_struct *work)); + +/** + * net_dim_free_irq_moder - free fields for irq moderation + * @dev: target network device + */ +void net_dim_free_irq_moder(struct net_device *dev); + +/** + * net_dim_setting - initialize DIM's cq mode and schedule worker + * @dev: target network device + * @dim: DIM context + * @is_tx: true indicates the tx direction, false indicates the rx direction + */ +void net_dim_setting(struct net_device *dev, struct dim *dim, bool is_tx); + +/** + * net_dim_work_cancel - synchronously cancel dim's worker + * @dim: DIM context + */ +void net_dim_work_cancel(struct dim *dim); + +/** + * net_dim_get_rx_irq_moder - get DIM rx results based on profile_ix + * @dev: target network device + * @dim: DIM context + * + * Return: DIM irq moderation + */ +struct dim_cq_moder +net_dim_get_rx_irq_moder(struct net_device *dev, struct dim *dim); + +/** + * net_dim_get_tx_irq_moder - get DIM tx results based on profile_ix + * @dev: target network device + * @dim: DIM context + * + * Return: DIM irq moderation + */ +struct dim_cq_moder +net_dim_get_tx_irq_moder(struct net_device *dev, struct dim *dim); + +/** + * net_dim_set_rx_mode - set DIM rx cq mode + * @dev: target network device + * @rx_mode: target rx cq mode + */ +void net_dim_set_rx_mode(struct net_device *dev, u8 rx_mode); + +/** + * net_dim_set_tx_mode - set DIM tx cq mode + * @dev: target network device + * @tx_mode: target tx cq mode + */ +void net_dim_set_tx_mode(struct net_device *dev, u8 tx_mode); + +/** * dim_on_top - check if current state is a good place to stop (top location) * @dim: DIM context * diff --git a/include/linux/dlm.h b/include/linux/dlm.h index c58c4f790c04..bacda9898f2b 100644 --- a/include/linux/dlm.h +++ b/include/linux/dlm.h @@ -35,6 +35,9 @@ struct dlm_lockspace_ops { int num_slots, int our_slot, uint32_t generation); }; +/* only relevant for kernel lockspaces, will be removed in future */ +#define DLM_LSFL_SOFTIRQ __DLM_LSFL_RESERVED0 + /* * dlm_new_lockspace * @@ -55,6 +58,11 @@ struct dlm_lockspace_ops { * used to select the directory node. Must be the same on all nodes. * DLM_LSFL_NEWEXCL * dlm_new_lockspace() should return -EEXIST if the lockspace exists. + * DLM_LSFL_SOFTIRQ + * dlm request callbacks (ast, bast) are softirq safe. Flag should be + * preferred by users. Will be default in some future. If set the + * strongest context for ast, bast callback is softirq as it avoids + * an additional context switch. * * lvblen: length of lvb in bytes. Must be multiple of 8. * dlm_new_lockspace() returns an error if this does not match @@ -121,7 +129,14 @@ int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force); * call. * * AST routines should not block (at least not for long), but may make - * any locking calls they please. + * any locking calls they please. If DLM_LSFL_SOFTIRQ for kernel + * users of dlm_new_lockspace() is passed the ast and bast callbacks + * can be processed in softirq context. Also some of the callback + * contexts are in the same context as the DLM lock request API, users + * must not hold locks while calling dlm lock request API and trying + * to acquire this lock in the callback again, this will end in a + * lock recursion. For newer implementation the DLM_LSFL_SOFTIRQ + * should be used. */ int dlm_lock(dlm_lockspace_t *lockspace, diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index ec7f25def392..29c5650c1038 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h @@ -33,6 +33,7 @@ struct dma_fence_array_cb { * @num_pending: fences in the array still pending * @fences: array of the fences * @work: internal irq_work function + * @callbacks: array of callback helpers */ struct dma_fence_array { struct dma_fence base; @@ -43,6 +44,8 @@ struct dma_fence_array { struct dma_fence **fences; struct irq_work work; + + struct dma_fence_array_cb callbacks[] __counted_by(num_fences); }; /** diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h index ad9e2506c2f4..68c3c1e41014 100644 --- a/include/linux/dma-fence-chain.h +++ b/include/linux/dma-fence-chain.h @@ -85,6 +85,10 @@ dma_fence_chain_contained(struct dma_fence *fence) * dma_fence_chain_alloc * * Returns a new struct dma_fence_chain object or NULL on failure. + * + * This specialized allocator has to be a macro for its allocations to be + * accounted separately (to have a separate alloc_tag). The typecast is + * intentional to enforce typesafety. */ #define dma_fence_chain_alloc() \ ((struct dma_fence_chain *)kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL)) diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h index 0c05561cad6e..064bad725061 100644 --- a/include/linux/dma-heap.h +++ b/include/linux/dma-heap.h @@ -23,8 +23,8 @@ struct dma_heap; struct dma_heap_ops { struct dma_buf *(*allocate)(struct dma_heap *heap, unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags); + u32 fd_flags, + u64 heap_flags); }; /** diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 752dbde4cec1..9fc03068cabc 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -161,6 +161,16 @@ struct dma_interleaved_template { }; /** + * struct dma_vec - DMA vector + * @addr: Bus address of the start of the vector + * @len: Length in bytes of the DMA vector + */ +struct dma_vec { + dma_addr_t addr; + size_t len; +}; + +/** * enum dma_ctrl_flags - DMA flags to augment operation preparation, * control completion, and communicate status. * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of @@ -910,6 +920,10 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_chan *chan, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_peripheral_dma_vec)( + struct dma_chan *chan, const struct dma_vec *vecs, + size_t nents, enum dma_transfer_direction direction, + unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_slave_sg)( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, @@ -973,6 +987,25 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( dir, flags, NULL); } +/** + * dmaengine_prep_peripheral_dma_vec() - Prepare a DMA scatter-gather descriptor + * @chan: The channel to be used for this descriptor + * @vecs: The array of DMA vectors that should be transferred + * @nents: The number of DMA vectors in the array + * @dir: Specifies the direction of the data transfer + * @flags: DMA engine flags + */ +static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec( + struct dma_chan *chan, const struct dma_vec *vecs, size_t nents, + enum dma_transfer_direction dir, unsigned long flags) +{ + if (!chan || !chan->device || !chan->device->device_prep_peripheral_dma_vec) + return NULL; + + return chan->device->device_prep_peripheral_dma_vec(chan, vecs, nents, + dir, flags); +} + static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags) diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index f3664ee12170..d13aabdeb4b2 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -8,12 +8,18 @@ #include <net/dsa.h> #include <linux/types.h> +/* VBID is limited to three bits only and zero is reserved. + * Only 7 bridges can be enumerated. + */ +#define DSA_TAG_8021Q_MAX_NUM_BRIDGES 7 + int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto); void dsa_tag_8021q_unregister(struct dsa_switch *ds); int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port, - struct dsa_bridge bridge); + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack); void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge); diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h index b4f22112ba75..3ce7cbcc37a3 100644 --- a/include/linux/dsa/lan9303.h +++ b/include/linux/dsa/lan9303.h @@ -5,8 +5,8 @@ struct lan9303; struct lan9303_phy_ops { /* PHY 1 and 2 access*/ - int (*phy_read)(struct lan9303 *chip, int port, int regnum); - int (*phy_write)(struct lan9303 *chip, int port, + int (*phy_read)(struct lan9303 *chip, int addr, int regnum); + int (*phy_write)(struct lan9303 *chip, int addr, int regnum, u16 val); }; diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h deleted file mode 100644 index 34c2175e6a1e..000000000000 --- a/include/linux/eeprom_93xx46.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Module: eeprom_93xx46 - * platform description for 93xx46 EEPROMs. - */ -#include <linux/gpio/consumer.h> - -struct eeprom_93xx46_platform_data { - unsigned char flags; -#define EE_ADDR8 0x01 /* 8 bit addr. cfg */ -#define EE_ADDR16 0x02 /* 16 bit addr. cfg */ -#define EE_READONLY 0x08 /* forbid writing */ -#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */ -#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */ -#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */ - - unsigned int quirks; -/* Single word read transfers only; no sequential read. */ -#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) -/* Instructions such as EWEN are (addrlen + 2) in length. */ -#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) -/* Add extra cycle after address during a read */ -#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2) - - /* - * optional hooks to control additional logic - * before and after spi transfer. - */ - void (*prepare)(void *); - void (*finish)(void *); - struct gpio_desc *select; -}; diff --git a/include/linux/efi.h b/include/linux/efi.h index 418e555459da..6bf3c4fe8511 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -74,10 +74,10 @@ typedef void *efi_handle_t; */ typedef guid_t efi_guid_t __aligned(__alignof__(u32)); -#define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \ +#define EFI_GUID(a, b, c, d...) ((efi_guid_t){ { \ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ (b) & 0xff, ((b) >> 8) & 0xff, \ - (c) & 0xff, ((c) >> 8) & 0xff, d } } + (c) & 0xff, ((c) >> 8) & 0xff, d } }) /* * Generic EFI table header @@ -385,6 +385,7 @@ void efi_native_runtime_setup(void); #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) +#define APPLE_SET_OS_PROTOCOL_GUID EFI_GUID(0xc5c5da95, 0x7d5c, 0x45e6, 0xb2, 0xf1, 0x3f, 0xd5, 0x2b, 0xb1, 0x00, 0x77) #define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f) #define EFI_TCG2_FINAL_EVENTS_TABLE_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25) #define EFI_LOAD_FILE_PROTOCOL_GUID EFI_GUID(0x56ec3091, 0x954c, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) @@ -607,7 +608,11 @@ typedef struct { u32 num_entries; u32 desc_size; u32 flags; - efi_memory_desc_t entry[0]; + /* + * There are @num_entries following, each of size @desc_size bytes, + * including an efi_memory_desc_t header. See efi_memdesc_ptr(). + */ + efi_memory_desc_t entry[]; } efi_memory_attributes_table_t; typedef struct { @@ -783,7 +788,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm, efi_memattr_perm_setter fn); /* - * efi_early_memdesc_ptr - get the n-th EFI memmap descriptor + * efi_memdesc_ptr - get the n-th EFI memmap descriptor * @map: the start of efi memmap * @desc_size: the size of space for each EFI memmap descriptor * @n: the index of efi memmap descriptor @@ -801,7 +806,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm, * during bootup since for_each_efi_memory_desc_xxx() is available after the * kernel initializes the EFI subsystem to set up struct efi_memory_map. */ -#define efi_early_memdesc_ptr(map, desc_size, n) \ +#define efi_memdesc_ptr(map, desc_size, n) \ (efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size))) /* Iterate through an efi_memory_map */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 6fd9107d3cc0..303fda54ef17 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -18,6 +18,7 @@ #include <linux/if_ether.h> #include <linux/netlink.h> #include <uapi/linux/ethtool.h> +#include <uapi/linux/net_tstamp.h> struct compat_ethtool_rx_flow_spec { u32 flow_type; @@ -159,6 +160,49 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) return index % n_rx_rings; } +/** + * struct ethtool_rxfh_context - a custom RSS context configuration + * @indir_size: Number of u32 entries in indirection table + * @key_size: Size of hash key, in bytes + * @priv_size: Size of driver private data, in bytes + * @hfunc: RSS hash function identifier. One of the %ETH_RSS_HASH_* + * @input_xfrm: Defines how the input data is transformed. Valid values are one + * of %RXH_XFRM_*. + * @indir_configured: indir has been specified (at create time or subsequently) + * @key_configured: hkey has been specified (at create time or subsequently) + */ +struct ethtool_rxfh_context { + u32 indir_size; + u32 key_size; + u16 priv_size; + u8 hfunc; + u8 input_xfrm; + u8 indir_configured:1; + u8 key_configured:1; + /* private: driver private data, indirection table, and hash key are + * stored sequentially in @data area. Use below helpers to access. + */ + u32 key_off; + u8 data[] __aligned(sizeof(void *)); +}; + +static inline void *ethtool_rxfh_context_priv(struct ethtool_rxfh_context *ctx) +{ + return ctx->data; +} + +static inline u32 *ethtool_rxfh_context_indir(struct ethtool_rxfh_context *ctx) +{ + return (u32 *)(ctx->data + ALIGN(ctx->priv_size, sizeof(u32))); +} + +static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx) +{ + return &ctx->data[ctx->key_off]; +} + +void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id); + /* declare a link mode bitmap */ #define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS) @@ -284,7 +328,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, #define ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES BIT(24) #define ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES BIT(25) #define ETHTOOL_COALESCE_TX_AGGR_TIME_USECS BIT(26) -#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(26, 0) +#define ETHTOOL_COALESCE_RX_PROFILE BIT(27) +#define ETHTOOL_COALESCE_TX_PROFILE BIT(28) +#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(28, 0) #define ETHTOOL_COALESCE_USECS \ (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS) @@ -504,17 +550,16 @@ struct ethtool_ts_stats { #define ETH_MODULE_MAX_I2C_ADDRESS 0x7f /** - * struct ethtool_module_eeprom - EEPROM dump from specified page - * @offset: Offset within the specified EEPROM page to begin read, in bytes. - * @length: Number of bytes to read. - * @page: Page number to read from. - * @bank: Page bank number to read from, if applicable by EEPROM spec. + * struct ethtool_module_eeprom - plug-in module EEPROM read / write parameters + * @offset: When @offset is 0-127, it is used as an address to the Lower Memory + * (@page must be 0). Otherwise, it is used as an address to the + * Upper Memory. + * @length: Number of bytes to read / write. + * @page: Page number. + * @bank: Bank number, if supported by EEPROM spec. * @i2c_address: I2C address of a page. Value less than 0x7f expected. Most * EEPROMs use 0x50 or 0x51. * @data: Pointer to buffer with EEPROM data of @length size. - * - * This can be used to manage pages during EEPROM dump in ethtool and pass - * required information to the driver. */ struct ethtool_module_eeprom { u32 offset; @@ -662,6 +707,22 @@ struct ethtool_rxfh_param { }; /** + * struct kernel_ethtool_ts_info - kernel copy of struct ethtool_ts_info + * @cmd: command number = %ETHTOOL_GET_TS_INFO + * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags + * @phc_index: device index of the associated PHC, or -1 if there is none + * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values + * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values + */ +struct kernel_ethtool_ts_info { + u32 cmd; + u32 so_timestamping; + int phc_index; + enum hwtstamp_tx_types tx_types; + enum hwtstamp_rx_filters rx_filters; +}; + +/** * struct ethtool_ops - optional netdev operations * @cap_link_lanes_supported: indicates if the driver supports lanes * parameter. @@ -669,6 +730,16 @@ struct ethtool_rxfh_param { * contexts. * @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor * RSS. + * @rxfh_indir_space: max size of RSS indirection tables, if indirection table + * size as returned by @get_rxfh_indir_size may change during lifetime + * of the device. Leave as 0 if the table size is constant. + * @rxfh_key_space: same as @rxfh_indir_space, but for the key. + * @rxfh_priv_size: size of the driver private data area the core should + * allocate for an RSS context (in &struct ethtool_rxfh_context). + * @rxfh_max_context_id: maximum (exclusive) supported RSS context ID. If this + * is zero then the core may choose any (nonzero) ID, otherwise the core + * will only use IDs strictly less than this value, as the @rss_context + * argument to @create_rxfh_context and friends. * @supported_coalesce_params: supported types of interrupt coalescing. * @supported_ring_params: supported ring params. * @get_drvinfo: Report driver/device information. Modern drivers no @@ -765,6 +836,32 @@ struct ethtool_rxfh_param { * will remain unchanged. * Returns a negative error code or zero. An error code must be returned * if at least one unsupported change was requested. + * @create_rxfh_context: Create a new RSS context with the specified RX flow + * hash indirection table, hash key, and hash function. + * The &struct ethtool_rxfh_context for this context is passed in @ctx; + * note that the indir table, hkey and hfunc are not yet populated as + * of this call. The driver does not need to update these; the core + * will do so if this op succeeds. + * However, if @rxfh.indir is set to %NULL, the driver must update the + * indir table in @ctx with the (default or inherited) table actually in + * use; similarly, if @rxfh.key is %NULL, @rxfh.hfunc is + * %ETH_RSS_HASH_NO_CHANGE, or @rxfh.input_xfrm is %RXH_XFRM_NO_CHANGE, + * the driver should update the corresponding information in @ctx. + * If the driver provides this method, it must also provide + * @modify_rxfh_context and @remove_rxfh_context. + * Returns a negative error code or zero. + * @modify_rxfh_context: Reconfigure the specified RSS context. Allows setting + * the contents of the RX flow hash indirection table, hash key, and/or + * hash function associated with the given context. + * Parameters which are set to %NULL or zero will remain unchanged. + * The &struct ethtool_rxfh_context for this context is passed in @ctx; + * note that it will still contain the *old* settings. The driver does + * not need to update these; the core will do so if this op succeeds. + * Returns a negative error code or zero. An error code must be returned + * if at least one unsupported change was requested. + * @remove_rxfh_context: Remove the specified RSS context. + * The &struct ethtool_rxfh_context for this context is passed in @ctx. + * Returns a negative error code or zero. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or * zero. @@ -822,6 +919,8 @@ struct ethtool_rxfh_param { * @get_module_eeprom_by_page: Get a region of plug-in module EEPROM data from * specified page. Returns a negative error code or the amount of bytes * read. + * @set_module_eeprom_by_page: Write to a region of plug-in module EEPROM, + * from kernel space only. Returns a negative error code or zero. * @get_eth_phy_stats: Query some of the IEEE 802.3 PHY statistics. * @get_eth_mac_stats: Query some of the IEEE 802.3 MAC statistics. * @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics. @@ -852,6 +951,10 @@ struct ethtool_ops { u32 cap_link_lanes_supported:1; u32 cap_rss_ctx_supported:1; u32 cap_rss_sym_xor_supported:1; + u32 rxfh_indir_space; + u16 rxfh_key_space; + u16 rxfh_priv_size; + u32 rxfh_max_context_id; u32 supported_coalesce_params; u32 supported_ring_params; void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); @@ -914,13 +1017,25 @@ struct ethtool_ops { int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *); int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *, struct netlink_ext_ack *extack); + int (*create_rxfh_context)(struct net_device *, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack); + int (*modify_rxfh_context)(struct net_device *, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack); + int (*remove_rxfh_context)(struct net_device *, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); - int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); + int (*get_ts_info)(struct net_device *, struct kernel_ethtool_ts_info *); void (*get_ts_stats)(struct net_device *dev, struct ethtool_ts_stats *ts_stats); int (*get_module_info)(struct net_device *, @@ -956,6 +1071,9 @@ struct ethtool_ops { int (*get_module_eeprom_by_page)(struct net_device *dev, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack); + int (*set_module_eeprom_by_page)(struct net_device *dev, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack); void (*get_eth_phy_stats)(struct net_device *dev, struct ethtool_eth_phy_stats *phy_stats); void (*get_eth_mac_stats)(struct net_device *dev, @@ -998,6 +1116,21 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd, u32 *dev_speed, u8 *dev_duplex); +/** + * struct ethtool_netdev_state - per-netdevice state for ethtool features + * @rss_ctx: XArray of custom RSS contexts + * @rss_lock: Protects entries in @rss_ctx. May be taken from + * within RTNL. + * @wol_enabled: Wake-on-LAN is enabled + * @module_fw_flash_in_progress: Module firmware flashing is in progress. + */ +struct ethtool_netdev_state { + struct xarray rss_ctx; + struct mutex rss_lock; + unsigned wol_enabled:1; + unsigned module_fw_flash_in_progress:1; +}; + struct phy_device; struct phy_tdr_config; struct phy_plca_cfg; @@ -1063,7 +1196,8 @@ int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index); /* Some generic methods drivers may use in their ethtool_ops */ u32 ethtool_op_get_link(struct net_device *dev); -int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti); +int ethtool_op_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *eti); /** * ethtool_mm_frag_size_add_to_min - Translate (standard) additional fragment @@ -1112,7 +1246,8 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add, * @info: buffer to hold the result * Returns zero on success, non-zero otherwise. */ -int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info); +int ethtool_get_ts_info_by_layer(struct net_device *dev, + struct kernel_ethtool_ts_info *info); /** * ethtool_sprintf - Write formatted string to ethtool string data @@ -1155,4 +1290,24 @@ struct ethtool_forced_speed_map { void ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size); + +/* C33 PSE extended state and substate. */ +struct ethtool_c33_pse_ext_state_info { + enum ethtool_c33_pse_ext_state c33_pse_ext_state; + union { + enum ethtool_c33_pse_ext_substate_error_condition error_condition; + enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable; + enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted; + enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim; + enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected; + enum ethtool_c33_pse_ext_substate_power_not_available power_not_available; + enum ethtool_c33_pse_ext_substate_short_detected short_detected; + u32 __c33_pse_ext_substate; + }; +}; + +struct ethtool_c33_pse_pw_limit_range { + u32 min; + u32 max; +}; #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index bb37ad5cc954..893a1d21dc1c 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -158,6 +158,7 @@ struct fid { #define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */ #define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */ +#define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */ /** * struct export_operations - for nfsd to communicate with file systems @@ -305,6 +306,7 @@ static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid, extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len, int fileid_type, + unsigned int flags, int (*acceptable)(void *, struct dentry *), void *context); extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 6d5edef09d45..354413950d34 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -91,22 +91,19 @@ static inline void fault_config_init(struct fault_config *config, struct kmem_cache; -bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); - #ifdef CONFIG_FAIL_PAGE_ALLOC -bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); +bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); #else -static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { return false; } #endif /* CONFIG_FAIL_PAGE_ALLOC */ -int should_failslab(struct kmem_cache *s, gfp_t gfpflags); #ifdef CONFIG_FAILSLAB -extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags); +int should_failslab(struct kmem_cache *s, gfp_t gfpflags); #else -static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) +static inline int should_failslab(struct kmem_cache *s, gfp_t gfpflags) { return false; } diff --git a/include/linux/fb.h b/include/linux/fb.h index 0f0834939b6a..db7d97b10964 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -225,6 +225,7 @@ struct fb_deferred_io { struct mutex lock; /* mutex that protects the pageref list */ struct list_head pagereflist; /* list of pagerefs for touched pages */ /* callback */ + struct page *(*get_page)(struct fb_info *info, unsigned long offset); void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); }; #endif diff --git a/include/linux/file.h b/include/linux/file.h index 45d0f4800abd..237931f20739 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -97,6 +97,26 @@ extern void put_unused_fd(unsigned int fd); DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T), get_unused_fd_flags(flags), unsigned flags) +/* + * take_fd() will take care to set @fd to -EBADF ensuring that + * CLASS(get_unused_fd) won't call put_unused_fd(). This makes it + * easier to rely on CLASS(get_unused_fd): + * + * struct file *f; + * + * CLASS(get_unused_fd, fd)(O_CLOEXEC); + * if (fd < 0) + * return fd; + * + * f = dentry_open(&path, O_RDONLY, current_cred()); + * if (IS_ERR(f)) + * return PTR_ERR(fd); + * + * fd_install(fd, f); + * return take_fd(fd); + */ +#define take_fd(fd) __get_and_null(fd, -EBADF) + extern void fd_install(unsigned int fd, struct file *file); int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags); diff --git a/include/linux/filter.h b/include/linux/filter.h index 5669da513cd7..b6672ff61407 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -733,21 +733,128 @@ struct bpf_nh_params { }; }; +/* flags for bpf_redirect_info kern_flags */ +#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ +#define BPF_RI_F_RI_INIT BIT(1) +#define BPF_RI_F_CPU_MAP_INIT BIT(2) +#define BPF_RI_F_DEV_MAP_INIT BIT(3) +#define BPF_RI_F_XSK_MAP_INIT BIT(4) + struct bpf_redirect_info { u64 tgt_index; void *tgt_value; struct bpf_map *map; u32 flags; - u32 kern_flags; u32 map_id; enum bpf_map_type map_type; struct bpf_nh_params nh; + u32 kern_flags; }; -DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); +struct bpf_net_context { + struct bpf_redirect_info ri; + struct list_head cpu_map_flush_list; + struct list_head dev_map_flush_list; + struct list_head xskmap_map_flush_list; +}; -/* flags for bpf_redirect_info kern_flags */ -#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ +static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx) +{ + struct task_struct *tsk = current; + + if (tsk->bpf_net_context != NULL) + return NULL; + bpf_net_ctx->ri.kern_flags = 0; + + tsk->bpf_net_context = bpf_net_ctx; + return bpf_net_ctx; +} + +static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx) +{ + if (bpf_net_ctx) + current->bpf_net_context = NULL; +} + +static inline struct bpf_net_context *bpf_net_ctx_get(void) +{ + return current->bpf_net_context; +} + +static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void) +{ + struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); + + if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) { + memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh)); + bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT; + } + + return &bpf_net_ctx->ri; +} + +static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void) +{ + struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); + + if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) { + INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list); + bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT; + } + + return &bpf_net_ctx->cpu_map_flush_list; +} + +static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void) +{ + struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); + + if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) { + INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list); + bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT; + } + + return &bpf_net_ctx->dev_map_flush_list; +} + +static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void) +{ + struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); + + if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) { + INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list); + bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT; + } + + return &bpf_net_ctx->xskmap_map_flush_list; +} + +static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_map, + struct list_head **lh_dev, + struct list_head **lh_xsk) +{ + struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); + u32 kern_flags = bpf_net_ctx->ri.kern_flags; + struct list_head *lh; + + *lh_map = *lh_dev = *lh_xsk = NULL; + + if (!IS_ENABLED(CONFIG_BPF_SYSCALL)) + return; + + lh = &bpf_net_ctx->dev_map_flush_list; + if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh)) + *lh_dev = lh; + + lh = &bpf_net_ctx->cpu_map_flush_list; + if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh)) + *lh_map = lh; + + lh = &bpf_net_ctx->xskmap_map_flush_list; + if (IS_ENABLED(CONFIG_XDP_SOCKETS) && + kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh)) + *lh_xsk = lh; +} /* Compute the linear packet data range [data, data_end) which * will be accessed by various program types (cls_bpf, act_bpf, @@ -1018,25 +1125,23 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); -void bpf_clear_redirect_map(struct bpf_map *map); - static inline bool xdp_return_frame_no_direct(void) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; } static inline void xdp_set_return_frame_no_direct(void) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; } static inline void xdp_clear_return_frame_no_direct(void) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; } @@ -1129,8 +1234,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image, struct bpf_binary_header **rw_hdr, u8 **rw_image, bpf_jit_fill_hole_t bpf_fill_ill_insns); -int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, - struct bpf_binary_header *ro_header, +int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header); void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header); @@ -1406,7 +1510,7 @@ struct bpf_sock_ops_kern { struct bpf_sysctl_kern { struct ctl_table_header *head; - struct ctl_table *table; + const struct ctl_table *table; void *cur_val; size_t cur_len; void *new_val; @@ -1592,7 +1696,7 @@ static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 inde u64 flags, const u64 flag_mask, void *lookup_elem(struct bpf_map *map, u32 key)) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX; /* Lower bits of the flags are used as return code on lookup failure */ diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h index 82687e07a7c2..7cae703b3137 100644 --- a/include/linux/firmware/cirrus/cs_dsp.h +++ b/include/linux/firmware/cirrus/cs_dsp.h @@ -42,6 +42,16 @@ #define CS_DSP_ACKED_CTL_MIN_VALUE 0 #define CS_DSP_ACKED_CTL_MAX_VALUE 0xFFFFFF +/* + * Write sequence operation codes + */ +#define CS_DSP_WSEQ_FULL 0x00 +#define CS_DSP_WSEQ_ADDR8 0x02 +#define CS_DSP_WSEQ_L16 0x04 +#define CS_DSP_WSEQ_H16 0x05 +#define CS_DSP_WSEQ_UNLOCK 0xFD +#define CS_DSP_WSEQ_END 0xFF + /** * struct cs_dsp_region - Describes a logical memory region in DSP address space * @type: Memory region type @@ -167,7 +177,7 @@ struct cs_dsp { const struct cs_dsp_region *mem; int num_mems; - int fw_ver; + int wmfw_ver; bool booted; bool running; @@ -213,13 +223,13 @@ int cs_dsp_adsp2_init(struct cs_dsp *dsp); int cs_dsp_halo_init(struct cs_dsp *dsp); int cs_dsp_adsp1_power_up(struct cs_dsp *dsp, - const struct firmware *wmfw_firmware, char *wmfw_filename, - const struct firmware *coeff_firmware, char *coeff_filename, + const struct firmware *wmfw_firmware, const char *wmfw_filename, + const struct firmware *coeff_firmware, const char *coeff_filename, const char *fw_name); void cs_dsp_adsp1_power_down(struct cs_dsp *dsp); int cs_dsp_power_up(struct cs_dsp *dsp, - const struct firmware *wmfw_firmware, char *wmfw_filename, - const struct firmware *coeff_firmware, char *coeff_filename, + const struct firmware *wmfw_firmware, const char *wmfw_filename, + const struct firmware *coeff_firmware, const char *coeff_filename, const char *fw_name); void cs_dsp_power_down(struct cs_dsp *dsp); int cs_dsp_run(struct cs_dsp *dsp); @@ -259,6 +269,23 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp, const char *cs_dsp_mem_region_name(unsigned int type); /** + * struct cs_dsp_wseq - Describes a write sequence + * @ctl: Write sequence cs_dsp control + * @ops: Operations contained within + */ +struct cs_dsp_wseq { + struct cs_dsp_coeff_ctl *ctl; + struct list_head ops; +}; + +int cs_dsp_wseq_init(struct cs_dsp *dsp, struct cs_dsp_wseq *wseqs, unsigned int num_wseqs); +int cs_dsp_wseq_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq, u32 addr, u32 data, + u8 op_code, bool update); +int cs_dsp_wseq_multi_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq, + const struct reg_sequence *reg_seq, int num_regs, + u8 op_code, bool update); + +/** * struct cs_dsp_chunk - Describes a buffer holding data formatted for the DSP * @data: Pointer to underlying buffer memory * @max: Pointer to end of the buffer memory diff --git a/include/linux/firmware/mediatek/mtk-adsp-ipc.h b/include/linux/firmware/mediatek/mtk-adsp-ipc.h index 5b1d16fa3f56..6e86799a7dc4 100644 --- a/include/linux/firmware/mediatek/mtk-adsp-ipc.h +++ b/include/linux/firmware/mediatek/mtk-adsp-ipc.h @@ -40,7 +40,7 @@ struct mtk_adsp_chan { struct mtk_adsp_ipc { struct mtk_adsp_chan chans[MTK_ADSP_MBOX_NUM]; struct device *dev; - struct mtk_adsp_ipc_ops *ops; + const struct mtk_adsp_ipc_ops *ops; void *private_data; }; diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h index 366243ee9609..1dc5b3b50aa9 100644 --- a/include/linux/firmware/qcom/qcom_qseecom.h +++ b/include/linux/firmware/qcom/qcom_qseecom.h @@ -73,9 +73,9 @@ static inline void qseecom_dma_free(struct qseecom_client *client, size_t size, /** * qcom_qseecom_app_send() - Send to and receive data from a given QSEE app. * @client: The QSEECOM client associated with the target app. - * @req: DMA address of the request buffer sent to the app. + * @req: Request buffer sent to the app (must be TZ memory). * @req_size: Size of the request buffer. - * @rsp: DMA address of the response buffer, written to by the app. + * @rsp: Response buffer, written to by the app (must be TZ memory). * @rsp_size: Size of the response buffer. * * Sends a request to the QSEE app associated with the given client and read @@ -90,8 +90,8 @@ static inline void qseecom_dma_free(struct qseecom_client *client, size_t size, * Return: Zero on success, nonzero on failure. */ static inline int qcom_qseecom_app_send(struct qseecom_client *client, - dma_addr_t req, size_t req_size, - dma_addr_t rsp, size_t rsp_size) + void *req, size_t req_size, + void *rsp, size_t rsp_size) { return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size); } diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h index aaa19f93ac43..9f14976399ab 100644 --- a/include/linux/firmware/qcom/qcom_scm.h +++ b/include/linux/firmware/qcom/qcom_scm.h @@ -115,11 +115,40 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, int qcom_scm_lmh_profile_change(u32 profile_id); bool qcom_scm_lmh_dcvsh_available(void); +/* + * Request TZ to program set of access controlled registers necessary + * irrespective of any features + */ +#define QCOM_SCM_GPU_ALWAYS_EN_REQ BIT(0) +/* + * Request TZ to program BCL id to access controlled register when BCL is + * enabled + */ +#define QCOM_SCM_GPU_BCL_EN_REQ BIT(1) +/* + * Request TZ to program set of access controlled register for CLX feature + * when enabled + */ +#define QCOM_SCM_GPU_CLX_EN_REQ BIT(2) +/* + * Request TZ to program tsense ids to access controlled registers for reading + * gpu temperature sensors + */ +#define QCOM_SCM_GPU_TSENSE_EN_REQ BIT(3) + +int qcom_scm_gpu_init_regs(u32 gpu_req); + +int qcom_scm_shm_bridge_enable(void); +int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags, + u64 ipfn_and_s_perm_flags, u64 size_and_flags, + u64 ns_vmids, u64 *handle); +int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle); + #ifdef CONFIG_QCOM_QSEECOM int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id); -int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size, - dma_addr_t rsp, size_t rsp_size); +int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, + void *rsp, size_t rsp_size); #else /* CONFIG_QCOM_QSEECOM */ @@ -129,8 +158,8 @@ static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) } static inline int qcom_scm_qseecom_app_send(u32 app_id, - dma_addr_t req, size_t req_size, - dma_addr_t rsp, size_t rsp_size) + void *req, size_t req_size, + void *rsp, size_t rsp_size) { return -EINVAL; } diff --git a/include/linux/firmware/qcom/qcom_tzmem.h b/include/linux/firmware/qcom/qcom_tzmem.h new file mode 100644 index 000000000000..b83b63a0c049 --- /dev/null +++ b/include/linux/firmware/qcom/qcom_tzmem.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023-2024 Linaro Ltd. + */ + +#ifndef __QCOM_TZMEM_H +#define __QCOM_TZMEM_H + +#include <linux/cleanup.h> +#include <linux/gfp.h> +#include <linux/types.h> + +struct device; +struct qcom_tzmem_pool; + +/** + * enum qcom_tzmem_policy - Policy for pool growth. + */ +enum qcom_tzmem_policy { + /**< Static pool, never grow above initial size. */ + QCOM_TZMEM_POLICY_STATIC = 1, + /**< When out of memory, add increment * current size of memory. */ + QCOM_TZMEM_POLICY_MULTIPLIER, + /**< When out of memory add as much as is needed until max_size. */ + QCOM_TZMEM_POLICY_ON_DEMAND, +}; + +/** + * struct qcom_tzmem_pool_config - TZ memory pool configuration. + * @initial_size: Number of bytes to allocate for the pool during its creation. + * @policy: Pool size growth policy. + * @increment: Used with policies that allow pool growth. + * @max_size: Size above which the pool will never grow. + */ +struct qcom_tzmem_pool_config { + size_t initial_size; + enum qcom_tzmem_policy policy; + size_t increment; + size_t max_size; +}; + +struct qcom_tzmem_pool * +qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config); +void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool); +struct qcom_tzmem_pool * +devm_qcom_tzmem_pool_new(struct device *dev, + const struct qcom_tzmem_pool_config *config); + +void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp); +void qcom_tzmem_free(void *ptr); + +DEFINE_FREE(qcom_tzmem, void *, if (_T) qcom_tzmem_free(_T)) + +phys_addr_t qcom_tzmem_to_phys(void *ptr); + +#endif /* __QCOM_TZMEM */ diff --git a/include/linux/firmware/xlnx-event-manager.h b/include/linux/firmware/xlnx-event-manager.h index 82e8254b0f80..645dd34155e6 100644 --- a/include/linux/firmware/xlnx-event-manager.h +++ b/include/linux/firmware/xlnx-event-manager.h @@ -1,4 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * Xilinx Event Management Driver + * + * Copyright (C) 2024, Advanced Micro Devices, Inc. + */ #ifndef _FIRMWARE_XLNX_EVENT_MANAGER_H_ #define _FIRMWARE_XLNX_EVENT_MANAGER_H_ @@ -7,6 +12,11 @@ #define CB_MAX_PAYLOAD_SIZE (4U) /*In payload maximum 32bytes */ +#define EVENT_SUBSYSTEM_RESTART (4U) + +#define PM_DEV_ACPU_0_0 (0x1810c0afU) +#define PM_DEV_ACPU_0 (0x1810c003U) + /************************** Exported Function *****************************/ typedef void (*event_cb_func_t)(const u32 *payload, void *data); diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 1a069a56c961..d7d07afc0532 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -52,6 +52,9 @@ #define API_ID_MASK GENMASK(7, 0) #define MODULE_ID_MASK GENMASK(11, 8) +/* Firmware feature check version mask */ +#define FIRMWARE_VERSION_MASK 0xFFFFU + /* ATF only commands */ #define TF_A_PM_REGISTER_SGI 0xa04 #define PM_GET_TRUSTZONE_VERSION 0xa03 diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 7e0f340bf363..0d99bf11d260 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -601,11 +601,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, /* * Warn when writing beyond destination field size. * - * We must ignore p_size_field == 0 for existing 0-element - * fake flexible arrays, until they are all converted to - * proper flexible arrays. - * - * The implementation of __builtin_*object_size() behaves + * Note the implementation of __builtin_*object_size() behaves * like sizeof() when not directly referencing a flexible * array member, which means there will be many bounds checks * that will appear at run-time, without a way for them to be @@ -613,7 +609,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, * is specifically the flexible array member). * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832 */ - if (p_size_field != 0 && p_size_field != SIZE_MAX && + if (p_size_field != SIZE_MAX && p_size != p_size_field && p_size_field < size) return true; diff --git a/include/linux/fs.h b/include/linux/fs.h index db26b4a70c62..fd34b5755c0b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -664,9 +664,13 @@ struct inode { }; dev_t i_rdev; loff_t i_size; - struct timespec64 __i_atime; - struct timespec64 __i_mtime; - struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */ + time64_t i_atime_sec; + time64_t i_mtime_sec; + time64_t i_ctime_sec; + u32 i_atime_nsec; + u32 i_mtime_nsec; + u32 i_ctime_nsec; + u32 i_generation; spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ unsigned short i_bytes; u8 i_blkbits; @@ -723,10 +727,10 @@ struct inode { unsigned i_dir_seq; }; - __u32 i_generation; #ifdef CONFIG_FSNOTIFY __u32 i_fsnotify_mask; /* all events this inode cares about */ + /* 32-bit hole reserved for expanding i_fsnotify_mask */ struct fsnotify_mark_connector __rcu *i_fsnotify_marks; #endif @@ -1542,23 +1546,27 @@ struct timespec64 inode_set_ctime_current(struct inode *inode); static inline time64_t inode_get_atime_sec(const struct inode *inode) { - return inode->__i_atime.tv_sec; + return inode->i_atime_sec; } static inline long inode_get_atime_nsec(const struct inode *inode) { - return inode->__i_atime.tv_nsec; + return inode->i_atime_nsec; } static inline struct timespec64 inode_get_atime(const struct inode *inode) { - return inode->__i_atime; + struct timespec64 ts = { .tv_sec = inode_get_atime_sec(inode), + .tv_nsec = inode_get_atime_nsec(inode) }; + + return ts; } static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode, struct timespec64 ts) { - inode->__i_atime = ts; + inode->i_atime_sec = ts.tv_sec; + inode->i_atime_nsec = ts.tv_nsec; return ts; } @@ -1567,28 +1575,32 @@ static inline struct timespec64 inode_set_atime(struct inode *inode, { struct timespec64 ts = { .tv_sec = sec, .tv_nsec = nsec }; + return inode_set_atime_to_ts(inode, ts); } static inline time64_t inode_get_mtime_sec(const struct inode *inode) { - return inode->__i_mtime.tv_sec; + return inode->i_mtime_sec; } static inline long inode_get_mtime_nsec(const struct inode *inode) { - return inode->__i_mtime.tv_nsec; + return inode->i_mtime_nsec; } static inline struct timespec64 inode_get_mtime(const struct inode *inode) { - return inode->__i_mtime; + struct timespec64 ts = { .tv_sec = inode_get_mtime_sec(inode), + .tv_nsec = inode_get_mtime_nsec(inode) }; + return ts; } static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode, struct timespec64 ts) { - inode->__i_mtime = ts; + inode->i_mtime_sec = ts.tv_sec; + inode->i_mtime_nsec = ts.tv_nsec; return ts; } @@ -1602,23 +1614,27 @@ static inline struct timespec64 inode_set_mtime(struct inode *inode, static inline time64_t inode_get_ctime_sec(const struct inode *inode) { - return inode->__i_ctime.tv_sec; + return inode->i_ctime_sec; } static inline long inode_get_ctime_nsec(const struct inode *inode) { - return inode->__i_ctime.tv_nsec; + return inode->i_ctime_nsec; } static inline struct timespec64 inode_get_ctime(const struct inode *inode) { - return inode->__i_ctime; + struct timespec64 ts = { .tv_sec = inode_get_ctime_sec(inode), + .tv_nsec = inode_get_ctime_nsec(inode) }; + + return ts; } static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts) { - inode->__i_ctime = ts; + inode->i_ctime_sec = ts.tv_sec; + inode->i_ctime_nsec = ts.tv_nsec; return ts; } @@ -1930,6 +1946,8 @@ void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode, extern bool may_open_dev(const struct path *path); umode_t mode_strip_sgid(struct mnt_idmap *idmap, const struct inode *dir, umode_t mode); +bool in_group_or_capable(struct mnt_idmap *idmap, + const struct inode *inode, vfsgid_t vfsgid); /* * This is the "filldir" function type, used by readdir() to let @@ -2689,7 +2707,7 @@ static inline struct file *file_clone_open(struct file *file) } extern int filp_close(struct file *, fl_owner_t id); -extern struct filename *getname_flags(const char __user *, int, int *); +extern struct filename *getname_flags(const char __user *, int); extern struct filename *getname_uflags(const char __user *, int); extern struct filename *getname(const char __user *); extern struct filename *getname_kernel(const char *); @@ -3033,7 +3051,12 @@ extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data); -extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); +struct inode *iget5_locked(struct super_block *, unsigned long, + int (*test)(struct inode *, void *), + int (*set)(struct inode *, void *), void *); +struct inode *iget5_locked_rcu(struct super_block *, unsigned long, + int (*test)(struct inode *, void *), + int (*set)(struct inode *, void *), void *); extern struct inode * iget_locked(struct super_block *, unsigned long); extern struct inode *find_inode_nowait(struct super_block *, unsigned long, @@ -3358,6 +3381,10 @@ extern int generic_file_fsync(struct file *, loff_t, loff_t, int); extern int generic_check_addressable(unsigned, u64); extern void generic_set_sb_d_ops(struct super_block *sb); +extern int generic_ci_match(const struct inode *parent, + const struct qstr *name, + const struct qstr *folded_name, + const u8 *de_name, u32 de_name_len); static inline bool sb_has_encoding(const struct super_block *sb) { @@ -3450,20 +3477,6 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags, return 0; } -static inline ino_t parent_ino(struct dentry *dentry) -{ - ino_t res; - - /* - * Don't strictly need d_lock here? If the parent ino could change - * then surely we'd have a deeper race in the caller? - */ - spin_lock(&dentry->d_lock); - res = dentry->d_parent->d_inode->i_ino; - spin_unlock(&dentry->d_lock); - return res; -} - /* Transaction based IO helpers */ /* @@ -3588,7 +3601,7 @@ static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx) static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx) { return ctx->actor(ctx, "..", 2, ctx->pos, - parent_ino(file->f_path.dentry), DT_DIR); + d_parent_ino(file->f_path.dentry), DT_DIR); } static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx) { @@ -3627,6 +3640,23 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice); +static inline bool vfs_empty_path(int dfd, const char __user *path) +{ + char c; + + if (dfd < 0) + return false; + + /* We now allow NULL to be used for empty path. */ + if (!path) + return true; + + if (unlikely(get_user(c, path))) + return false; + + return !c; +} + bool generic_atomic_write_valid(struct iov_iter *iter, loff_t pos); #endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h index d3350979115f..6cf713a7e6c6 100644 --- a/include/linux/fs_parser.h +++ b/include/linux/fs_parser.h @@ -28,7 +28,7 @@ typedef int fs_param_type(struct p_log *, */ fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64, fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev, - fs_param_is_path, fs_param_is_fd; + fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid; /* * Specification of the type of value a parameter wants. @@ -57,6 +57,8 @@ struct fs_parse_result { int int_32; /* For spec_s32/spec_enum */ unsigned int uint_32; /* For spec_u32{,_octal,_hex}/spec_enum */ u64 uint_64; /* For spec_u64 */ + kuid_t uid; + kgid_t gid; }; }; @@ -131,6 +133,8 @@ static inline bool fs_validate_description(const char *name, #define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL) #define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL) #define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL) +#define fsparam_uid(NAME, OPT) __fsparam(fs_param_is_uid, NAME, OPT, 0, NULL) +#define fsparam_gid(NAME, OPT) __fsparam(fs_param_is_gid, NAME, OPT, 0, NULL) /* String parameter that allows empty argument */ #define fsparam_string_empty(NAME, OPT) \ diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index bdf7f3eddf0a..4c91a019972b 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -19,6 +19,7 @@ enum fscache_cache_trace; enum fscache_cookie_trace; enum fscache_access_trace; +enum fscache_volume_trace; enum fscache_cache_state { FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */ @@ -97,6 +98,11 @@ extern void fscache_withdraw_cookie(struct fscache_cookie *cookie); extern void fscache_io_error(struct fscache_cache *cache); +extern struct fscache_volume * +fscache_try_get_volume(struct fscache_volume *volume, + enum fscache_volume_trace where); +extern void fscache_put_volume(struct fscache_volume *volume, + enum fscache_volume_trace where); extern void fscache_end_volume_access(struct fscache_volume *volume, struct fscache_cookie *cookie, enum fscache_access_trace why); diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 4da80e92f804..278620e063ab 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -112,7 +112,13 @@ static inline int fsnotify_file(struct file *file, __u32 mask) { const struct path *path; - if (file->f_mode & FMODE_NONOTIFY) + /* + * FMODE_NONOTIFY are fds generated by fanotify itself which should not + * generate new events. We also don't want to generate events for + * FMODE_PATH fds (involves open & close events) as they are just + * handle creation / destruction events and not "real" file events. + */ + if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH)) return 0; path = &file->f_path; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 4dd6143db271..8be029bc50b1 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -594,12 +594,14 @@ static inline __u32 fsnotify_parent_needed_mask(__u32 mask) static inline int fsnotify_inode_watches_children(struct inode *inode) { + __u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask); + /* FS_EVENT_ON_CHILD is set if the inode may care */ - if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD)) + if (!(parent_mask & FS_EVENT_ON_CHILD)) return 0; /* this inode might care about child events, does it care about the * specific set of events that can happen on a child? */ - return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD; + return parent_mask & FS_EVENTS_POSS_ON_CHILD; } /* @@ -613,7 +615,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry) /* * Serialisation of setting PARENT_WATCHED on the dentries is provided * by d_lock. If inotify_inode_watched changes after we have taken - * d_lock, the following __fsnotify_update_child_dentry_flags call will + * d_lock, the following fsnotify_set_children_dentry_flags call will * find our entry, so it will spin until we complete here, and update * us with the new state. */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index b792274189a3..51575b76818e 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -227,6 +227,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * ftrace_enabled. * DIRECT - Used by the direct ftrace_ops helper for direct functions * (internal ftrace only, should not be used by others) + * SUBOP - Is controlled by another op in field managed. */ enum { FTRACE_OPS_FL_ENABLED = BIT(0), @@ -247,6 +248,7 @@ enum { FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), FTRACE_OPS_FL_PERMANENT = BIT(16), FTRACE_OPS_FL_DIRECT = BIT(17), + FTRACE_OPS_FL_SUBOP = BIT(18), }; #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS @@ -334,7 +336,9 @@ struct ftrace_ops { unsigned long trampoline; unsigned long trampoline_size; struct list_head list; + struct list_head subop_list; ftrace_ops_func_t ops_func; + struct ftrace_ops *managed; #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS unsigned long direct_call; #endif @@ -509,6 +513,15 @@ static inline void stack_tracer_disable(void) { } static inline void stack_tracer_enable(void) { } #endif +enum { + FTRACE_UPDATE_CALLS = (1 << 0), + FTRACE_DISABLE_CALLS = (1 << 1), + FTRACE_UPDATE_TRACE_FUNC = (1 << 2), + FTRACE_START_FUNC_RET = (1 << 3), + FTRACE_STOP_FUNC_RET = (1 << 4), + FTRACE_MAY_SLEEP = (1 << 5), +}; + #ifdef CONFIG_DYNAMIC_FTRACE void ftrace_arch_code_modify_prepare(void); @@ -603,15 +616,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); void ftrace_free_filter(struct ftrace_ops *ops); void ftrace_ops_set_global_filter(struct ftrace_ops *ops); -enum { - FTRACE_UPDATE_CALLS = (1 << 0), - FTRACE_DISABLE_CALLS = (1 << 1), - FTRACE_UPDATE_TRACE_FUNC = (1 << 2), - FTRACE_START_FUNC_RET = (1 << 3), - FTRACE_STOP_FUNC_RET = (1 << 4), - FTRACE_MAY_SLEEP = (1 << 5), -}; - /* * The FTRACE_UPDATE_* enum is used to pass information back * from the ftrace_update_record() and ftrace_test_record() @@ -1027,19 +1031,31 @@ struct ftrace_graph_ret { unsigned long long rettime; } __packed; +struct fgraph_ops; + /* Type of the callback handlers for tracing function graph*/ -typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ -typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ +typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *, + struct fgraph_ops *); /* return */ +typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *, + struct fgraph_ops *); /* entry */ -extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); +extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops); +bool ftrace_pids_enabled(struct ftrace_ops *ops); #ifdef CONFIG_FUNCTION_GRAPH_TRACER struct fgraph_ops { trace_func_graph_ent_t entryfunc; trace_func_graph_ret_t retfunc; + struct ftrace_ops ops; /* for the hash lists */ + void *private; + trace_func_graph_ent_t saved_func; + int idx; }; +void *fgraph_reserve_data(int idx, int size_bytes); +void *fgraph_retrieve_data(int idx, int *size_bytes); + /* * Stack of return addresses for functions * of a thread. @@ -1055,9 +1071,7 @@ struct ftrace_ret_stack { #ifdef HAVE_FUNCTION_GRAPH_FP_TEST unsigned long fp; #endif -#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR unsigned long *retp; -#endif }; /* @@ -1072,10 +1086,11 @@ function_graph_enter(unsigned long ret, unsigned long func, unsigned long frame_pointer, unsigned long *retp); struct ftrace_ret_stack * -ftrace_graph_get_ret_stack(struct task_struct *task, int idx); +ftrace_graph_get_ret_stack(struct task_struct *task, int skip); unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, unsigned long *retp); +unsigned long *fgraph_get_task_var(struct fgraph_ops *gops); /* * Sometimes we don't want to trace a function with the function @@ -1114,6 +1129,9 @@ extern void ftrace_graph_init_task(struct task_struct *t); extern void ftrace_graph_exit_task(struct task_struct *t); extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); +/* Used by assembly, but to quiet sparse warnings */ +extern struct ftrace_ops *function_trace_op; + static inline void pause_graph_tracing(void) { atomic_inc(¤t->tracing_graph_pause); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 7f9691d375f0..f53f76e0b17e 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -303,6 +303,8 @@ struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order); struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid); struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); +struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, + struct mempolicy *mpol, pgoff_t ilx, int nid); struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage); #else @@ -319,6 +321,11 @@ static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) { return __folio_alloc_node(gfp, order, numa_node_id()); } +static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, + struct mempolicy *mpol, pgoff_t ilx, int nid) +{ + return folio_alloc_noprof(gfp, order); +} #define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage) \ folio_alloc_noprof(gfp, order) #endif @@ -326,6 +333,7 @@ static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) #define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__)) #define alloc_pages_mpol(...) alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__)) #define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__)) +#define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__)) #define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__)) #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 56ac7e7a2889..063f71b18a7c 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * <linux/gpio.h> + * NOTE: This header *must not* be included. * * This is the LEGACY GPIO bulk include file, including legacy APIs. It is * used for GPIO drivers still referencing the global GPIO numberspace, @@ -16,8 +16,6 @@ struct device; -/* see Documentation/driver-api/gpio/legacy.rst */ - /* make these flag values available regardless of GPIO kconfig options */ #define GPIOF_DIR_OUT (0 << 0) #define GPIOF_DIR_IN (1 << 0) @@ -121,8 +119,6 @@ static inline int gpio_to_irq(unsigned gpio) int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); -/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */ - int devm_gpio_request(struct device *dev, unsigned gpio, const char *label); int devm_gpio_request_one(struct device *dev, unsigned gpio, unsigned long flags, const char *label); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 0032bb6e7d8f..2dd7cb9cc270 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -632,10 +632,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, devm_gpiochip_add_data_with_key(dev, gc, data, NULL, NULL) #endif /* CONFIG_LOCKDEP */ -static inline int gpiochip_add(struct gpio_chip *gc) -{ - return gpiochip_add_data(gc, NULL); -} void gpiochip_remove(struct gpio_chip *gc); int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data, struct lock_class_key *lock_key, @@ -791,7 +787,6 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc, enum gpiod_flags dflags); void gpiochip_free_own_desc(struct gpio_desc *desc); -struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum); struct gpio_desc * gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum); diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h index f316c8d0f3fc..7ed3fdd55dda 100644 --- a/include/linux/habanalabs/cpucp_if.h +++ b/include/linux/habanalabs/cpucp_if.h @@ -42,6 +42,12 @@ enum eq_event_id { EQ_EVENT_PWR_BRK_ENTRY, EQ_EVENT_PWR_BRK_EXIT, EQ_EVENT_HEARTBEAT, + EQ_EVENT_CPLD_RESET_REASON, + EQ_EVENT_CPLD_SHUTDOWN, + EQ_EVENT_POWER_EVT_START, + EQ_EVENT_POWER_EVT_END, + EQ_EVENT_THERMAL_EVT_START, + EQ_EVENT_THERMAL_EVT_END, }; /* @@ -391,6 +397,9 @@ struct hl_eq_entry { #define EQ_CTL_READY_SHIFT 31 #define EQ_CTL_READY_MASK 0x80000000 +#define EQ_CTL_EVENT_MODE_SHIFT 28 +#define EQ_CTL_EVENT_MODE_MASK 0x70000000 + #define EQ_CTL_EVENT_TYPE_SHIFT 16 #define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000 @@ -853,9 +862,6 @@ struct cpucp_packet { * result cannot be used to hold general purpose data. */ __le32 status_mask; - - /* random, used once number, for security packets */ - __le32 nonce; }; union { @@ -864,6 +870,9 @@ struct cpucp_packet { /* For Generic packet sub index */ __le32 pkt_subidx; + + /* random, used once number, for security packets */ + __le32 nonce; }; }; @@ -1140,6 +1149,7 @@ struct cpucp_security_info { * (0 = fully functional, 1 = lower-half is not functional, * 2 = upper-half is not functional) * @sec_info: security information + * @cpld_timestamp: CPLD programmed F/W timestamp. * @pll_map: Bit map of supported PLLs for current ASIC version. * @mme_binning_mask: MME binning mask, * bits [0:6] <==> dcore0 mme fma @@ -1165,7 +1175,7 @@ struct cpucp_security_info { struct cpucp_info { struct cpucp_sensor sensors[CPUCP_MAX_SENSORS]; __u8 kernel_version[VERSION_MAX_LEN]; - __le32 reserved; + __le32 reserved1; __le32 card_type; __le32 card_location; __le32 cpld_version; @@ -1187,7 +1197,7 @@ struct cpucp_info { __u8 substrate_version; __u8 eq_health_check_supported; struct cpucp_security_info sec_info; - __le32 fw_hbm_region_size; + __le32 cpld_timestamp; __u8 pll_map[PLL_MAP_LEN]; __le64 mme_binning_mask; __u8 fw_os_version[VERSION_MAX_LEN]; diff --git a/include/linux/habanalabs/hl_boot_if.h b/include/linux/habanalabs/hl_boot_if.h index 93366d5621fd..d2a9fc96424b 100644 --- a/include/linux/habanalabs/hl_boot_if.h +++ b/include/linux/habanalabs/hl_boot_if.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright 2018-2020 HabanaLabs, Ltd. + * Copyright 2018-2023 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -49,7 +49,6 @@ enum cpu_boot_err { #define CPU_BOOT_ERR_FATAL_MASK \ ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \ (1 << CPU_BOOT_ERR_PLL_FAIL) | \ - (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \ (1 << CPU_BOOT_ERR_BINNING_FAIL) | \ (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \ (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \ @@ -194,6 +193,8 @@ enum cpu_boot_dev_sts { CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24, CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25, CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26, + CPU_BOOT_DEV_STS_NIC_MEM_CLEAR_EN = 27, + CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN = 28, CPU_BOOT_DEV_STS_ENABLED = 31, CPU_BOOT_DEV_STS_SCND_EN = 63, CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */ @@ -331,6 +332,17 @@ enum cpu_boot_dev_sts { * HWMON enum mapping to cpucp enums. * Initialized in: linux * + * CPU_BOOT_DEV_STS0_NIC_MEM_CLEAR_EN + * If set, means f/w supports nic hbm memory clear and + * tmr,txs hbm memory init. + * Initialized in: zephyr-mgmt + * + * CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN + * MMU page tables are located in DRAM. + * F/W initializes security settings for MMU + * page tables to reside in DRAM. + * Initialized in: zephyr-mgmt + * * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled. * This is a main indication that the * running FW populates the device status @@ -367,6 +379,8 @@ enum cpu_boot_dev_sts { #define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN) #define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN) #define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN) +#define CPU_BOOT_DEV_STS0_NIC_MEM_CLEAR_EN (1 << CPU_BOOT_DEV_STS_NIC_MEM_CLEAR_EN) +#define CPU_BOOT_DEV_STS0_MMU_PGTBL_DRAM_EN (1 << CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN) #define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED) #define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED) @@ -450,11 +464,11 @@ struct cpu_dyn_regs { __le32 gic_dma_core_irq_ctrl; __le32 gic_host_halt_irq; __le32 gic_host_ints_irq; - __le32 gic_host_soft_rst_irq; + __le32 reserved0; __le32 gic_rot_qm_irq_ctrl; - __le32 cpu_rst_status; + __le32 reserved1; __le32 eng_arc_irq_ctrl; - __le32 reserved1[20]; /* reserve for future use */ + __le32 reserved2[20]; /* reserve for future use */ }; /* TODO: remove the desc magic after the code is updated to use message */ @@ -551,8 +565,9 @@ enum lkd_fw_ascii_msg_lvls { LKD_FW_ASCII_MSG_DBG = 3, }; -#define LKD_FW_ASCII_MSG_MAX_LEN 128 -#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */ +#define LKD_FW_ASCII_MSG_MAX_LEN 128 +#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */ +#define LKD_FW_ASCII_MSG_MIN_DESC_VERSION 3 struct lkd_fw_ascii_msg { __u8 valid; diff --git a/include/linux/hid.h b/include/linux/hid.h index 8e06d89698e6..1533c9dcd3a6 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -1125,6 +1125,13 @@ int __must_check hid_hw_open(struct hid_device *hdev); void hid_hw_close(struct hid_device *hdev); void hid_hw_request(struct hid_device *hdev, struct hid_report *report, enum hid_class_request reqtype); +int __hid_hw_raw_request(struct hid_device *hdev, + unsigned char reportnum, __u8 *buf, + size_t len, enum hid_report_type rtype, + enum hid_class_request reqtype, + __u64 source, bool from_bpf); +int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, __u64 source, + bool from_bpf); int hid_hw_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, enum hid_report_type rtype, diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h index eec2592dec12..d4d063cf63b5 100644 --- a/include/linux/hid_bpf.h +++ b/include/linux/hid_bpf.h @@ -4,7 +4,8 @@ #define __HID_BPF_H #include <linux/bpf.h> -#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/srcu.h> #include <uapi/linux/hid.h> struct hid_device; @@ -20,15 +21,9 @@ struct hid_device; * struct hid_bpf_ctx - User accessible data for all HID programs * * ``data`` is not directly accessible from the context. We need to issue - * a call to ``hid_bpf_get_data()`` in order to get a pointer to that field. + * a call to hid_bpf_get_data() in order to get a pointer to that field. * - * All of these fields are currently read-only. - * - * @index: program index in the jump table. No special meaning (a smaller index - * doesn't mean the program will be executed before another program with - * a bigger index). - * @hid: the ``struct hid_device`` representing the device itself - * @report_type: used for ``hid_bpf_device_event()`` + * @hid: the &struct hid_device representing the device itself * @allocated_size: Allocated size of data. * * This is how much memory is available and can be requested @@ -45,76 +40,147 @@ struct hid_device; * ``size`` must always be less or equal than ``allocated_size`` (it is enforced * once all BPF programs have been run). * @retval: Return value of the previous program. + * + * ``hid`` and ``allocated_size`` are read-only, ``size`` and ``retval`` are read-write. */ struct hid_bpf_ctx { - __u32 index; - const struct hid_device *hid; + struct hid_device *hid; __u32 allocated_size; - enum hid_report_type report_type; union { __s32 retval; __s32 size; }; }; -/** - * enum hid_bpf_attach_flags - flags used when attaching a HIF-BPF program - * - * @HID_BPF_FLAG_NONE: no specific flag is used, the kernel choses where to - * insert the program - * @HID_BPF_FLAG_INSERT_HEAD: insert the given program before any other program - * currently attached to the device. This doesn't - * guarantee that this program will always be first - * @HID_BPF_FLAG_MAX: sentinel value, not to be used by the callers - */ -enum hid_bpf_attach_flags { - HID_BPF_FLAG_NONE = 0, - HID_BPF_FLAG_INSERT_HEAD = _BITUL(0), - HID_BPF_FLAG_MAX, -}; - -/* Following functions are tracepoints that BPF programs can attach to */ -int hid_bpf_device_event(struct hid_bpf_ctx *ctx); -int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx); - /* * Below is HID internal */ -/* internal function to call eBPF programs, not to be used by anybody */ -int __hid_bpf_tail_call(struct hid_bpf_ctx *ctx); - #define HID_BPF_MAX_PROGS_PER_DEV 64 #define HID_BPF_FLAG_MASK (((HID_BPF_FLAG_MAX - 1) << 1) - 1) -/* types of HID programs to attach to */ -enum hid_bpf_prog_type { - HID_BPF_PROG_TYPE_UNDEF = -1, - HID_BPF_PROG_TYPE_DEVICE_EVENT, /* an event is emitted from the device */ - HID_BPF_PROG_TYPE_RDESC_FIXUP, - HID_BPF_PROG_TYPE_MAX, -}; struct hid_report_enum; -struct hid_bpf_ops { +struct hid_ops { struct hid_report *(*hid_get_report)(struct hid_report_enum *report_enum, const u8 *data); int (*hid_hw_raw_request)(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, enum hid_report_type rtype, - enum hid_class_request reqtype); - int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len); + enum hid_class_request reqtype, + u64 source, bool from_bpf); + int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len, + u64 source, bool from_bpf); int (*hid_input_report)(struct hid_device *hid, enum hid_report_type type, - u8 *data, u32 size, int interrupt); + u8 *data, u32 size, int interrupt, u64 source, bool from_bpf, + bool lock_already_taken); struct module *owner; const struct bus_type *bus_type; }; -extern struct hid_bpf_ops *hid_bpf_ops; +extern struct hid_ops *hid_ops; -struct hid_bpf_prog_list { - u16 prog_idx[HID_BPF_MAX_PROGS_PER_DEV]; - u8 prog_cnt; +/** + * struct hid_bpf_ops - A BPF struct_ops of callbacks allowing to attach HID-BPF + * programs to a HID device + * @hid_id: the HID uniq ID to attach to. This is writeable before ``load()``, and + * cannot be changed after + * @flags: flags used while attaching the struct_ops to the device. Currently only + * available value is %0 or ``BPF_F_BEFORE``. + * Writeable only before ``load()`` + */ +struct hid_bpf_ops { + /* hid_id needs to stay first so we can easily change it + * from userspace. + */ + int hid_id; + u32 flags; + + /* private: do not show up in the docs */ + struct list_head list; + + /* public: rest should show up in the docs */ + + /** + * @hid_device_event: called whenever an event is coming in from the device + * + * It has the following arguments: + * + * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx + * + * Return: %0 on success and keep processing; a positive + * value to change the incoming size buffer; a negative + * error code to interrupt the processing of this event + * + * Context: Interrupt context. + */ + int (*hid_device_event)(struct hid_bpf_ctx *ctx, enum hid_report_type report_type, + u64 source); + + /** + * @hid_rdesc_fixup: called when the probe function parses the report descriptor + * of the HID device + * + * It has the following arguments: + * + * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx + * + * Return: %0 on success and keep processing; a positive + * value to change the incoming size buffer; a negative + * error code to interrupt the processing of this device + */ + int (*hid_rdesc_fixup)(struct hid_bpf_ctx *ctx); + + /** + * @hid_hw_request: called whenever a hid_hw_raw_request() call is emitted + * on the HID device + * + * It has the following arguments: + * + * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx + * + * ``reportnum``: the report number, as in hid_hw_raw_request() + * + * ``rtype``: the report type (``HID_INPUT_REPORT``, ``HID_FEATURE_REPORT``, + * ``HID_OUTPUT_REPORT``) + * + * ``reqtype``: the request + * + * ``source``: a u64 referring to a uniq but identifiable source. If %0, the + * kernel itself emitted that call. For hidraw, ``source`` is set + * to the associated ``struct file *``. + * + * Return: %0 to keep processing the request by hid-core; any other value + * stops hid-core from processing that event. A positive value should be + * returned with the number of bytes returned in the incoming buffer; a + * negative error code interrupts the processing of this call. + */ + int (*hid_hw_request)(struct hid_bpf_ctx *ctx, unsigned char reportnum, + enum hid_report_type rtype, enum hid_class_request reqtype, + u64 source); + + /** + * @hid_hw_output_report: called whenever a hid_hw_output_report() call is emitted + * on the HID device + * + * It has the following arguments: + * + * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx + * + * ``source``: a u64 referring to a uniq but identifiable source. If %0, the + * kernel itself emitted that call. For hidraw, ``source`` is set + * to the associated ``struct file *``. + * + * Return: %0 to keep processing the request by hid-core; any other value + * stops hid-core from processing that event. A positive value should be + * returned with the number of bytes written to the device; a negative error + * code interrupts the processing of this call. + */ + int (*hid_hw_output_report)(struct hid_bpf_ctx *ctx, u64 source); + + + /* private: do not show up in the docs */ + struct hid_device *hdev; }; /* stored in each device */ @@ -124,34 +190,49 @@ struct hid_bpf { * to this HID device */ u32 allocated_data; - - struct hid_bpf_prog_list __rcu *progs[HID_BPF_PROG_TYPE_MAX]; /* attached BPF progs */ bool destroyed; /* prevents the assignment of any progs */ - spinlock_t progs_lock; /* protects RCU update of progs */ -}; - -/* specific HID-BPF link when a program is attached to a device */ -struct hid_bpf_link { - struct bpf_link link; - int hid_table_index; + struct hid_bpf_ops *rdesc_ops; + struct list_head prog_list; + struct mutex prog_list_lock; /* protects prog_list update */ + struct srcu_struct srcu; /* protects prog_list read-only access */ }; #ifdef CONFIG_HID_BPF u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data, - u32 *size, int interrupt); + u32 *size, int interrupt, u64 source, bool from_bpf); +int dispatch_hid_bpf_raw_requests(struct hid_device *hdev, + unsigned char reportnum, __u8 *buf, + u32 size, enum hid_report_type rtype, + enum hid_class_request reqtype, + u64 source, bool from_bpf); +int dispatch_hid_bpf_output_report(struct hid_device *hdev, __u8 *buf, u32 size, + u64 source, bool from_bpf); int hid_bpf_connect_device(struct hid_device *hdev); void hid_bpf_disconnect_device(struct hid_device *hdev); void hid_bpf_destroy_device(struct hid_device *hid); -void hid_bpf_device_init(struct hid_device *hid); +int hid_bpf_device_init(struct hid_device *hid); u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size); #else /* CONFIG_HID_BPF */ static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, - u8 *data, u32 *size, int interrupt) { return data; } + u8 *data, u32 *size, int interrupt, + u64 source, bool from_bpf) { return data; } +static inline int dispatch_hid_bpf_raw_requests(struct hid_device *hdev, + unsigned char reportnum, u8 *buf, + u32 size, enum hid_report_type rtype, + enum hid_class_request reqtype, + u64 source, bool from_bpf) { return 0; } +static inline int dispatch_hid_bpf_output_report(struct hid_device *hdev, __u8 *buf, u32 size, + u64 source, bool from_bpf) { return 0; } static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; } static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {} static inline void hid_bpf_destroy_device(struct hid_device *hid) {} -static inline void hid_bpf_device_init(struct hid_device *hid) {} +static inline int hid_bpf_device_init(struct hid_device *hid) { return 0; } +/* + * This specialized allocator has to be a macro for its allocations to be + * accounted separately (to have a separate alloc_tag). The typecast is + * intentional to enforce typesafety. + */ #define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \ ((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL)) diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index a3028e400a9c..dd100e849f5e 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -131,22 +131,17 @@ static inline void __kunmap_atomic(const void *addr) preempt_enable(); } -unsigned int __nr_free_highpages(void); -extern atomic_long_t _totalhigh_pages; +unsigned long __nr_free_highpages(void); +unsigned long __totalhigh_pages(void); -static inline unsigned int nr_free_highpages(void) +static inline unsigned long nr_free_highpages(void) { return __nr_free_highpages(); } static inline unsigned long totalhigh_pages(void) { - return (unsigned long)atomic_long_read(&_totalhigh_pages); -} - -static inline void totalhigh_pages_add(long count) -{ - atomic_long_add(count, &_totalhigh_pages); + return __totalhigh_pages(); } static inline bool is_kmap_addr(const void *x) @@ -239,8 +234,8 @@ static inline void __kunmap_atomic(const void *addr) preempt_enable(); } -static inline unsigned int nr_free_highpages(void) { return 0; } -static inline unsigned long totalhigh_pages(void) { return 0UL; } +static inline unsigned long nr_free_highpages(void) { return 0; } +static inline unsigned long totalhigh_pages(void) { return 0; } static inline bool is_kmap_addr(const void *x) { diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 00341b56d291..930a591b9b61 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -179,7 +179,7 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset); static inline void *kmap_atomic(struct page *page); /* Highmem related interfaces for management code */ -static inline unsigned int nr_free_highpages(void); +static inline unsigned long nr_free_highpages(void); static inline unsigned long totalhigh_pages(void); #ifndef ARCH_HAS_FLUSH_ANON_PAGE @@ -352,6 +352,9 @@ static inline int copy_mc_user_highpage(struct page *to, struct page *from, kunmap_local(vto); kunmap_local(vfrom); + if (ret) + memory_failure_queue(page_to_pfn(from), 0); + return ret; } @@ -368,6 +371,9 @@ static inline int copy_mc_highpage(struct page *to, struct page *from) kunmap_local(vto); kunmap_local(vfrom); + if (ret) + memory_failure_queue(page_to_pfn(from), 0); + return ret; } #else diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 2aa986a5cd1b..cff002be83eb 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -6,6 +6,7 @@ #include <linux/mm_types.h> #include <linux/fs.h> /* only for vma_is_dax() */ +#include <linux/kobject.h> vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, @@ -63,6 +64,7 @@ ssize_t single_hugepage_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag flag); extern struct kobj_attribute shmem_enabled_attr; +extern struct kobj_attribute thpsize_shmem_enabled_attr; /* * Mask of all large folio orders supported for anonymous THP; all orders up to @@ -126,18 +128,6 @@ static inline bool hugepage_global_always(void) (1<<TRANSPARENT_HUGEPAGE_FLAG); } -static inline bool hugepage_flags_enabled(void) -{ - /* - * We cover both the anon and the file-backed case here; we must return - * true if globally enabled, even when all anon sizes are set to never. - * So we don't need to look at huge_anon_orders_inherit. - */ - return hugepage_global_enabled() || - huge_anon_orders_always || - huge_anon_orders_madvise; -} - static inline int highest_order(unsigned long orders) { return fls_long(orders) - 1; @@ -265,12 +255,26 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders); } +struct thpsize { + struct kobject kobj; + struct list_head node; + int order; +}; + +#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) + enum mthp_stat_item { MTHP_STAT_ANON_FAULT_ALLOC, MTHP_STAT_ANON_FAULT_FALLBACK, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, MTHP_STAT_SWPOUT, MTHP_STAT_SWPOUT_FALLBACK, + MTHP_STAT_SHMEM_ALLOC, + MTHP_STAT_SHMEM_FALLBACK, + MTHP_STAT_SHMEM_FALLBACK_CHARGE, + MTHP_STAT_SPLIT, + MTHP_STAT_SPLIT_FAILED, + MTHP_STAT_SPLIT_DEFERRED, __MTHP_STAT_COUNT }; @@ -415,6 +419,11 @@ static inline bool thp_migration_supported(void) return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); } +void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmd, bool freeze, struct folio *folio); +bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmdp, struct folio *folio); + #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline bool folio_test_pmd_mappable(struct folio *folio) @@ -477,6 +486,16 @@ static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze, struct folio *folio) {} static inline void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct folio *folio) {} +static inline void split_huge_pmd_locked(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, + bool freeze, struct folio *folio) {} + +static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp, + struct folio *folio) +{ + return false; +} #define split_huge_pud(__vma, __pmd, __address) \ do { } while (0) @@ -550,6 +569,16 @@ static inline bool thp_migration_supported(void) { return false; } + +static inline int highest_order(unsigned long orders) +{ + return 0; +} + +static inline int next_order(unsigned long *orders, int prev) +{ + return 0; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int split_folio_to_list_to_order(struct folio *folio, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 2b3c3a404769..c9bf68c239a0 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -20,12 +20,6 @@ struct user_struct; struct mmu_gather; struct node; -#ifndef CONFIG_ARCH_HAS_HUGEPD -typedef struct { unsigned long pd; } hugepd_t; -#define is_hugepd(hugepd) (0) -#define __hugepd(x) ((hugepd_t) { (x) }) -#endif - void free_huge_folio(struct folio *folio); #ifdef CONFIG_HUGETLB_PAGE @@ -616,47 +610,35 @@ static __always_inline \ bool folio_test_hugetlb_##flname(struct folio *folio) \ { void *private = &folio->private; \ return test_bit(HPG_##flname, private); \ - } \ -static inline int HPage##uname(struct page *page) \ - { return test_bit(HPG_##flname, &(page->private)); } + } #define SETHPAGEFLAG(uname, flname) \ static __always_inline \ void folio_set_hugetlb_##flname(struct folio *folio) \ { void *private = &folio->private; \ set_bit(HPG_##flname, private); \ - } \ -static inline void SetHPage##uname(struct page *page) \ - { set_bit(HPG_##flname, &(page->private)); } + } #define CLEARHPAGEFLAG(uname, flname) \ static __always_inline \ void folio_clear_hugetlb_##flname(struct folio *folio) \ { void *private = &folio->private; \ clear_bit(HPG_##flname, private); \ - } \ -static inline void ClearHPage##uname(struct page *page) \ - { clear_bit(HPG_##flname, &(page->private)); } + } #else #define TESTHPAGEFLAG(uname, flname) \ static inline bool \ folio_test_hugetlb_##flname(struct folio *folio) \ - { return 0; } \ -static inline int HPage##uname(struct page *page) \ { return 0; } #define SETHPAGEFLAG(uname, flname) \ static inline void \ folio_set_hugetlb_##flname(struct folio *folio) \ - { } \ -static inline void SetHPage##uname(struct page *page) \ { } #define CLEARHPAGEFLAG(uname, flname) \ static inline void \ folio_clear_hugetlb_##flname(struct folio *folio) \ - { } \ -static inline void ClearHPage##uname(struct page *page) \ { } #endif @@ -681,6 +663,7 @@ HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) /* Defines one hugetlb page size */ struct hstate { struct mutex resize_lock; + struct lock_class_key resize_key; int next_nid_to_alloc; int next_nid_to_free; unsigned int order; @@ -698,11 +681,6 @@ struct hstate { unsigned int nr_huge_pages_node[MAX_NUMNODES]; unsigned int free_huge_pages_node[MAX_NUMNODES]; unsigned int surplus_huge_pages_node[MAX_NUMNODES]; -#ifdef CONFIG_CGROUP_HUGETLB - /* cgroup control files */ - struct cftype cgroup_files_dfl[8]; - struct cftype cgroup_files_legacy[10]; -#endif char name[HSTATE_NAME_LEN]; }; diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 136e9842120e..b424555753b1 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -13,9 +13,8 @@ #define LINUX_HWRANDOM_H_ #include <linux/completion.h> -#include <linux/types.h> -#include <linux/list.h> #include <linux/kref.h> +#include <linux/types.h> /** * struct hwrng - Hardware Random Number Generator driver diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index edf96f249eb5..e94314760aab 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -45,6 +45,7 @@ enum hwmon_chip_attributes { hwmon_chip_power_samples, hwmon_chip_temp_samples, hwmon_chip_beep_enable, + hwmon_chip_pec, }; #define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history) @@ -60,6 +61,7 @@ enum hwmon_chip_attributes { #define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples) #define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples) #define HWMON_C_BEEP_ENABLE BIT(hwmon_chip_beep_enable) +#define HWMON_C_PEC BIT(hwmon_chip_pec) enum hwmon_temp_attributes { hwmon_temp_enable, diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 424acb98c7c2..e9cc14b1f9a1 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -513,10 +513,10 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, /** * struct i2c_algorithm - represent I2C transfer method - * @master_xfer: Issue a set of i2c transactions to the given I2C adapter + * @xfer: Issue a set of i2c transactions to the given I2C adapter * defined by the msgs array, with num messages available to transfer via * the adapter specified by adap. - * @master_xfer_atomic: same as @master_xfer. Yet, only using atomic context + * @xfer_atomic: same as @xfer. Yet, only using atomic context * so e.g. PMICs can be accessed very late before shutdown. Optional. * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this * is not present, then the bus layer will try and convert the SMBus calls @@ -525,33 +525,47 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * so e.g. PMICs can be accessed very late before shutdown. Optional. * @functionality: Return the flags that this algorithm/adapter pair supports * from the ``I2C_FUNC_*`` flags. - * @reg_slave: Register given client to I2C slave mode of this adapter - * @unreg_slave: Unregister given client from I2C slave mode of this adapter + * @reg_target: Register given client to local target mode of this adapter + * @unreg_target: Unregister given client from local target mode of this adapter + * + * @master_xfer: deprecated, use @xfer + * @master_xfer_atomic: deprecated, use @xfer_atomic + * @reg_slave: deprecated, use @reg_target + * @unreg_slave: deprecated, use @unreg_target + * * * The following structs are for those who like to implement new bus drivers: * i2c_algorithm is the interface to a class of hardware solutions which can * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 * to name two of the most common. * - * The return codes from the ``master_xfer{_atomic}`` fields should indicate the + * The return codes from the ``xfer{_atomic}`` fields should indicate the * type of error code that occurred during the transfer, as documented in the * Kernel Documentation file Documentation/i2c/fault-codes.rst. Otherwise, the * number of messages executed should be returned. */ struct i2c_algorithm { /* - * If an adapter algorithm can't do I2C-level access, set master_xfer + * If an adapter algorithm can't do I2C-level access, set xfer * to NULL. If an adapter algorithm can do SMBus access, set * smbus_xfer. If set to NULL, the SMBus protocol is simulated * using common I2C messages. * - * master_xfer should return the number of messages successfully + * xfer should return the number of messages successfully * processed, or a negative value on error */ - int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, - int num); - int (*master_xfer_atomic)(struct i2c_adapter *adap, + union { + int (*xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num); + int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num); + }; + union { + int (*xfer_atomic)(struct i2c_adapter *adap, struct i2c_msg *msgs, int num); + int (*master_xfer_atomic)(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num); + }; int (*smbus_xfer)(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data); @@ -563,8 +577,14 @@ struct i2c_algorithm { u32 (*functionality)(struct i2c_adapter *adap); #if IS_ENABLED(CONFIG_I2C_SLAVE) - int (*reg_slave)(struct i2c_client *client); - int (*unreg_slave)(struct i2c_client *client); + union { + int (*reg_target)(struct i2c_client *client); + int (*reg_slave)(struct i2c_client *client); + }; + union { + int (*unreg_target)(struct i2c_client *client); + int (*unreg_slave)(struct i2c_client *client); + }; #endif }; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index de2dce743ee2..30cef3b940eb 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -373,6 +373,7 @@ struct ieee80211_trigger { /** * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame has to-DS set */ static inline bool ieee80211_has_tods(__le16 fc) { @@ -382,6 +383,7 @@ static inline bool ieee80211_has_tods(__le16 fc) /** * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame has from-DS set */ static inline bool ieee80211_has_fromds(__le16 fc) { @@ -391,6 +393,7 @@ static inline bool ieee80211_has_fromds(__le16 fc) /** * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not it's a 4-address frame (from-DS and to-DS set) */ static inline bool ieee80211_has_a4(__le16 fc) { @@ -401,6 +404,7 @@ static inline bool ieee80211_has_a4(__le16 fc) /** * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame has more fragments (more frags bit set) */ static inline bool ieee80211_has_morefrags(__le16 fc) { @@ -410,6 +414,7 @@ static inline bool ieee80211_has_morefrags(__le16 fc) /** * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the retry flag is set */ static inline bool ieee80211_has_retry(__le16 fc) { @@ -419,6 +424,7 @@ static inline bool ieee80211_has_retry(__le16 fc) /** * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the power management flag is set */ static inline bool ieee80211_has_pm(__le16 fc) { @@ -428,6 +434,7 @@ static inline bool ieee80211_has_pm(__le16 fc) /** * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the more data flag is set */ static inline bool ieee80211_has_moredata(__le16 fc) { @@ -437,6 +444,7 @@ static inline bool ieee80211_has_moredata(__le16 fc) /** * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the protected flag is set */ static inline bool ieee80211_has_protected(__le16 fc) { @@ -446,6 +454,7 @@ static inline bool ieee80211_has_protected(__le16 fc) /** * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the order flag is set */ static inline bool ieee80211_has_order(__le16 fc) { @@ -455,6 +464,7 @@ static inline bool ieee80211_has_order(__le16 fc) /** * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame type is management */ static inline bool ieee80211_is_mgmt(__le16 fc) { @@ -465,6 +475,7 @@ static inline bool ieee80211_is_mgmt(__le16 fc) /** * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame type is control */ static inline bool ieee80211_is_ctl(__le16 fc) { @@ -475,6 +486,7 @@ static inline bool ieee80211_is_ctl(__le16 fc) /** * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a data frame */ static inline bool ieee80211_is_data(__le16 fc) { @@ -485,6 +497,7 @@ static inline bool ieee80211_is_data(__le16 fc) /** * ieee80211_is_ext - check if type is IEEE80211_FTYPE_EXT * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame type is extended */ static inline bool ieee80211_is_ext(__le16 fc) { @@ -496,6 +509,7 @@ static inline bool ieee80211_is_ext(__le16 fc) /** * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a QoS data frame */ static inline bool ieee80211_is_data_qos(__le16 fc) { @@ -510,6 +524,8 @@ static inline bool ieee80211_is_data_qos(__le16 fc) /** * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a QoS data frame that has data + * (i.e. is not null data) */ static inline bool ieee80211_is_data_present(__le16 fc) { @@ -524,6 +540,7 @@ static inline bool ieee80211_is_data_present(__le16 fc) /** * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an association request */ static inline bool ieee80211_is_assoc_req(__le16 fc) { @@ -534,6 +551,7 @@ static inline bool ieee80211_is_assoc_req(__le16 fc) /** * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an association response */ static inline bool ieee80211_is_assoc_resp(__le16 fc) { @@ -544,6 +562,7 @@ static inline bool ieee80211_is_assoc_resp(__le16 fc) /** * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a reassociation request */ static inline bool ieee80211_is_reassoc_req(__le16 fc) { @@ -554,6 +573,7 @@ static inline bool ieee80211_is_reassoc_req(__le16 fc) /** * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a reassociation response */ static inline bool ieee80211_is_reassoc_resp(__le16 fc) { @@ -564,6 +584,7 @@ static inline bool ieee80211_is_reassoc_resp(__le16 fc) /** * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a probe request */ static inline bool ieee80211_is_probe_req(__le16 fc) { @@ -574,6 +595,7 @@ static inline bool ieee80211_is_probe_req(__le16 fc) /** * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a probe response */ static inline bool ieee80211_is_probe_resp(__le16 fc) { @@ -584,6 +606,7 @@ static inline bool ieee80211_is_probe_resp(__le16 fc) /** * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a (regular, not S1G) beacon */ static inline bool ieee80211_is_beacon(__le16 fc) { @@ -595,6 +618,7 @@ static inline bool ieee80211_is_beacon(__le16 fc) * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT && * IEEE80211_STYPE_S1G_BEACON * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an S1G beacon */ static inline bool ieee80211_is_s1g_beacon(__le16 fc) { @@ -604,30 +628,21 @@ static inline bool ieee80211_is_s1g_beacon(__le16 fc) } /** - * ieee80211_next_tbtt_present - check if IEEE80211_FTYPE_EXT && - * IEEE80211_STYPE_S1G_BEACON && IEEE80211_S1G_BCN_NEXT_TBTT - * @fc: frame control bytes in little-endian byteorder - */ -static inline bool ieee80211_next_tbtt_present(__le16 fc) -{ - return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == - cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON) && - fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT); -} - -/** - * ieee80211_is_s1g_short_beacon - check if next tbtt present bit is set. Only - * true for S1G beacons when they're short. + * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an S1G short beacon, + * i.e. it is an S1G beacon with 'next TBTT' flag set */ static inline bool ieee80211_is_s1g_short_beacon(__le16 fc) { - return ieee80211_is_s1g_beacon(fc) && ieee80211_next_tbtt_present(fc); + return ieee80211_is_s1g_beacon(fc) && + (fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT)); } /** * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an ATIM frame */ static inline bool ieee80211_is_atim(__le16 fc) { @@ -638,6 +653,7 @@ static inline bool ieee80211_is_atim(__le16 fc) /** * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a disassociation frame */ static inline bool ieee80211_is_disassoc(__le16 fc) { @@ -648,6 +664,7 @@ static inline bool ieee80211_is_disassoc(__le16 fc) /** * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an authentication frame */ static inline bool ieee80211_is_auth(__le16 fc) { @@ -658,6 +675,7 @@ static inline bool ieee80211_is_auth(__le16 fc) /** * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a deauthentication frame */ static inline bool ieee80211_is_deauth(__le16 fc) { @@ -668,6 +686,7 @@ static inline bool ieee80211_is_deauth(__le16 fc) /** * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an action frame */ static inline bool ieee80211_is_action(__le16 fc) { @@ -678,6 +697,7 @@ static inline bool ieee80211_is_action(__le16 fc) /** * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a block-ACK request frame */ static inline bool ieee80211_is_back_req(__le16 fc) { @@ -688,6 +708,7 @@ static inline bool ieee80211_is_back_req(__le16 fc) /** * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a block-ACK frame */ static inline bool ieee80211_is_back(__le16 fc) { @@ -698,6 +719,7 @@ static inline bool ieee80211_is_back(__le16 fc) /** * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a PS-poll frame */ static inline bool ieee80211_is_pspoll(__le16 fc) { @@ -708,6 +730,7 @@ static inline bool ieee80211_is_pspoll(__le16 fc) /** * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an RTS frame */ static inline bool ieee80211_is_rts(__le16 fc) { @@ -718,6 +741,7 @@ static inline bool ieee80211_is_rts(__le16 fc) /** * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a CTS frame */ static inline bool ieee80211_is_cts(__le16 fc) { @@ -728,6 +752,7 @@ static inline bool ieee80211_is_cts(__le16 fc) /** * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an ACK frame */ static inline bool ieee80211_is_ack(__le16 fc) { @@ -738,6 +763,7 @@ static inline bool ieee80211_is_ack(__le16 fc) /** * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a CF-end frame */ static inline bool ieee80211_is_cfend(__le16 fc) { @@ -748,6 +774,7 @@ static inline bool ieee80211_is_cfend(__le16 fc) /** * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a CF-end-ack frame */ static inline bool ieee80211_is_cfendack(__le16 fc) { @@ -758,6 +785,7 @@ static inline bool ieee80211_is_cfendack(__le16 fc) /** * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a nullfunc frame */ static inline bool ieee80211_is_nullfunc(__le16 fc) { @@ -768,6 +796,7 @@ static inline bool ieee80211_is_nullfunc(__le16 fc) /** * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a QoS nullfunc frame */ static inline bool ieee80211_is_qos_nullfunc(__le16 fc) { @@ -778,6 +807,7 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc) /** * ieee80211_is_trigger - check if frame is trigger frame * @fc: frame control field in little-endian byteorder + * Return: whether or not the frame is a trigger frame */ static inline bool ieee80211_is_trigger(__le16 fc) { @@ -788,6 +818,7 @@ static inline bool ieee80211_is_trigger(__le16 fc) /** * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a nullfunc or QoS nullfunc frame */ static inline bool ieee80211_is_any_nullfunc(__le16 fc) { @@ -797,6 +828,8 @@ static inline bool ieee80211_is_any_nullfunc(__le16 fc) /** * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set * @seq_ctrl: frame sequence control bytes in little-endian byteorder + * Return: whether or not the frame is the first fragment (also true if + * it's not fragmented at all) */ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) { @@ -806,6 +839,7 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) /** * ieee80211_is_frag - check if a frame is a fragment * @hdr: 802.11 header of the frame + * Return: whether or not the frame is a fragment */ static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr) { @@ -1101,7 +1135,7 @@ enum ieee80211_vht_opmode_bits { }; /** - * enum ieee80211_s1g_chanwidth + * enum ieee80211_s1g_chanwidth - S1G channel widths * These are defined in IEEE802.11-2016ah Table 10-20 * as BSS Channel Width * @@ -2359,6 +2393,8 @@ struct ieee80211_eht_operation_info { * @max_vht_nss: current maximum NSS as advertised by the STA in * operating mode notification, can be 0 in which case the * capability data will be used to derive this (from MCS support) + * Return: The maximum NSS that can be used for the given bandwidth/MCS + * combination * * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can * vary for a given BW/MCS. This function parses the data. @@ -2370,44 +2406,6 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, int mcs, bool ext_nss_bw_capable, unsigned int max_vht_nss); -/** - * enum ieee80211_ap_reg_power - regulatory power for a Access Point - * - * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode - * @IEEE80211_REG_LPI_AP: Indoor Access Point - * @IEEE80211_REG_SP_AP: Standard power Access Point - * @IEEE80211_REG_VLP_AP: Very low power Access Point - * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal - * @IEEE80211_REG_AP_POWER_MAX: maximum value - */ -enum ieee80211_ap_reg_power { - IEEE80211_REG_UNSET_AP, - IEEE80211_REG_LPI_AP, - IEEE80211_REG_SP_AP, - IEEE80211_REG_VLP_AP, - IEEE80211_REG_AP_POWER_AFTER_LAST, - IEEE80211_REG_AP_POWER_MAX = - IEEE80211_REG_AP_POWER_AFTER_LAST - 1, -}; - -/** - * enum ieee80211_client_reg_power - regulatory power for a client - * - * @IEEE80211_REG_UNSET_CLIENT: Client has no regulatory power mode - * @IEEE80211_REG_DEFAULT_CLIENT: Default Client - * @IEEE80211_REG_SUBORDINATE_CLIENT: Subordinate Client - * @IEEE80211_REG_CLIENT_POWER_AFTER_LAST: internal - * @IEEE80211_REG_CLIENT_POWER_MAX: maximum value - */ -enum ieee80211_client_reg_power { - IEEE80211_REG_UNSET_CLIENT, - IEEE80211_REG_DEFAULT_CLIENT, - IEEE80211_REG_SUBORDINATE_CLIENT, - IEEE80211_REG_CLIENT_POWER_AFTER_LAST, - IEEE80211_REG_CLIENT_POWER_MAX = - IEEE80211_REG_CLIENT_POWER_AFTER_LAST - 1, -}; - /* 802.11ax HE MAC capabilities */ #define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 #define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02 @@ -2789,22 +2787,6 @@ struct ieee80211_he_6ghz_oper { u8 minrate; } __packed; -/* - * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021", - * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation - * subfield encoding", and two category for each type in "Table E-12-Regulatory - * Info subfield encoding in the United States". - * So it it totally max 8 Transmit Power Envelope element. - */ -#define IEEE80211_TPE_MAX_IE_COUNT 8 -/* - * In "Table 9-277âMeaning of Maximum Transmit Power Count subfield" - * of "IEEE Std 802.11axâąâ2021", the max power level is 8. - */ -#define IEEE80211_MAX_NUM_PWR_LEVEL 8 - -#define IEEE80211_TPE_MAX_POWER_COUNT 8 - /* transmit power interpretation type of transmit power envelope element */ enum ieee80211_tx_power_intrpt_type { IEEE80211_TPE_LOCAL_EIRP, @@ -2813,24 +2795,107 @@ enum ieee80211_tx_power_intrpt_type { IEEE80211_TPE_REG_CLIENT_EIRP_PSD, }; +/* category type of transmit power envelope element */ +enum ieee80211_tx_power_category_6ghz { + IEEE80211_TPE_CAT_6GHZ_DEFAULT = 0, + IEEE80211_TPE_CAT_6GHZ_SUBORDINATE = 1, +}; + +/* + * For IEEE80211_TPE_LOCAL_EIRP / IEEE80211_TPE_REG_CLIENT_EIRP, + * setting to 63.5 dBm means no constraint. + */ +#define IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT 127 + +/* + * For IEEE80211_TPE_LOCAL_EIRP_PSD / IEEE80211_TPE_REG_CLIENT_EIRP_PSD, + * setting to 127 indicates no PSD limit for the 20 MHz channel. + */ +#define IEEE80211_TPE_PSD_NO_LIMIT 127 + /** * struct ieee80211_tx_pwr_env - Transmit Power Envelope - * @tx_power_info: Transmit Power Information field - * @tx_power: Maximum Transmit Power field + * @info: Transmit Power Information field + * @variable: Maximum Transmit Power field * * This structure represents the payload of the "Transmit Power * Envelope element" as described in IEEE Std 802.11ax-2021 section * 9.4.2.161 */ struct ieee80211_tx_pwr_env { - u8 tx_power_info; - s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT]; + u8 info; + u8 variable[]; } __packed; #define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7 #define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38 #define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0 +#define IEEE80211_TX_PWR_ENV_EXT_COUNT 0xF + +static inline bool ieee80211_valid_tpe_element(const u8 *data, u8 len) +{ + const struct ieee80211_tx_pwr_env *env = (const void *)data; + u8 count, interpret, category; + u8 needed = sizeof(*env); + u8 N; /* also called N in the spec */ + + if (len < needed) + return false; + + count = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_COUNT); + interpret = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_INTERPRET); + category = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_CATEGORY); + + switch (category) { + case IEEE80211_TPE_CAT_6GHZ_DEFAULT: + case IEEE80211_TPE_CAT_6GHZ_SUBORDINATE: + break; + default: + return false; + } + + switch (interpret) { + case IEEE80211_TPE_LOCAL_EIRP: + case IEEE80211_TPE_REG_CLIENT_EIRP: + if (count > 3) + return false; + + /* count == 0 encodes 1 value for 20 MHz, etc. */ + needed += count + 1; + + if (len < needed) + return false; + + /* there can be extension fields not accounted for in 'count' */ + + return true; + case IEEE80211_TPE_LOCAL_EIRP_PSD: + case IEEE80211_TPE_REG_CLIENT_EIRP_PSD: + if (count > 4) + return false; + + N = count ? 1 << (count - 1) : 1; + needed += N; + + if (len < needed) + return false; + + if (len > needed) { + u8 K = u8_get_bits(env->variable[N], + IEEE80211_TX_PWR_ENV_EXT_COUNT); + + needed += 1 + K; + if (len < needed) + return false; + } + + return true; + } + + return false; +} + /* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the byte @@ -4145,7 +4210,7 @@ enum ieee80211_idle_options { }; /** - * struct ieee80211_bss_max_idle_period_ie + * struct ieee80211_bss_max_idle_period_ie - BSS max idle period element struct * * This structure refers to "BSS Max idle period element" * @@ -4180,7 +4245,7 @@ enum ieee80211_sa_query_action { }; /** - * struct ieee80211_bssid_index + * struct ieee80211_bssid_index - multiple BSSID index element structure * * This structure refers to "Multiple BSSID-index element" * @@ -4195,7 +4260,8 @@ struct ieee80211_bssid_index { }; /** - * struct ieee80211_multiple_bssid_configuration + * struct ieee80211_multiple_bssid_configuration - multiple BSSID configuration + * element structure * * This structure refers to "Multiple BSSID Configuration element" * @@ -4326,6 +4392,7 @@ struct ieee80211_he_6ghz_capa { /** * ieee80211_get_qos_ctl - get pointer to qos control bytes * @hdr: the frame + * Return: a pointer to the QoS control field in the frame header * * The qos ctrl bytes come after the frame_control, duration, seq_num * and 3 or 4 addresses of length ETH_ALEN. Checks frame_control to choose @@ -4348,6 +4415,7 @@ static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr) /** * ieee80211_get_tid - get qos TID * @hdr: the frame + * Return: the TID from the QoS control field */ static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr) { @@ -4359,6 +4427,7 @@ static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr) /** * ieee80211_get_SA - get pointer to SA * @hdr: the frame + * Return: a pointer to the source address (SA) * * Given an 802.11 frame, this function returns the offset * to the source address (SA). It does not verify that the @@ -4378,6 +4447,7 @@ static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr) /** * ieee80211_get_DA - get pointer to DA * @hdr: the frame + * Return: a pointer to the destination address (DA) * * Given an 802.11 frame, this function returns the offset * to the destination address (DA). It does not verify that @@ -4396,6 +4466,7 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) /** * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU * @skb: the skb to check, starting with the 802.11 header + * Return: whether or not the MMPDU is bufferable */ static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb) { @@ -4434,6 +4505,7 @@ static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb) /** * _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame * @hdr: the frame (buffer must include at least the first octet of payload) + * Return: whether or not the frame is a robust management frame */ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) { @@ -4470,6 +4542,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) /** * ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame * @skb: the skb containing the frame, length will be checked + * Return: whether or not the frame is a robust management frame */ static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb) { @@ -4482,6 +4555,7 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb) * ieee80211_is_public_action - check if frame is a public action frame * @hdr: the frame * @len: length of the frame + * Return: whether or not the frame is a public action frame */ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr, size_t len) @@ -4527,8 +4601,9 @@ ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb) /** * _ieee80211_is_group_privacy_action - check if frame is a group addressed - * privacy action frame + * privacy action frame * @hdr: the frame + * Return: whether or not the frame is a group addressed privacy action frame */ static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr) { @@ -4544,8 +4619,9 @@ static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr) /** * ieee80211_is_group_privacy_action - check if frame is a group addressed - * privacy action frame + * privacy action frame * @skb: the skb containing the frame, length will be checked + * Return: whether or not the frame is a group addressed privacy action frame */ static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb) { @@ -4557,6 +4633,7 @@ static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb) /** * ieee80211_tu_to_usec - convert time units (TU) to microseconds * @tu: the TUs + * Return: the time value converted to microseconds */ static inline unsigned long ieee80211_tu_to_usec(unsigned long tu) { @@ -4568,6 +4645,7 @@ static inline unsigned long ieee80211_tu_to_usec(unsigned long tu) * @tim: the TIM IE * @tim_len: length of the TIM IE * @aid: the AID to look for + * Return: whether or not traffic is indicated in the TIM for the given AID */ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, u8 tim_len, u16 aid) @@ -4594,8 +4672,10 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, } /** - * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) + * ieee80211_get_tdls_action - get TDLS action code * @skb: the skb containing the frame, length will not be checked + * Return: the TDLS action code, or -1 if it's not an encapsulated TDLS action + * frame * * This function assumes the frame is a data frame, and that the network header * is in the correct place. @@ -4635,6 +4715,7 @@ static inline int ieee80211_get_tdls_action(struct sk_buff *skb) /** * ieee80211_action_contains_tpc - checks if the frame contains TPC element * @skb: the skb containing the frame, length will be checked + * Return: %true if the frame contains a TPC element, %false otherwise * * This function checks if it's either TPC report action frame or Link * Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5 @@ -4679,6 +4760,11 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb) return true; } +/** + * ieee80211_is_timing_measurement - check if frame is timing measurement response + * @skb: the SKB to check + * Return: whether or not the frame is a valid timing measurement response + */ static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb) { struct ieee80211_mgmt *mgmt = (void *)skb->data; @@ -4698,6 +4784,11 @@ static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb) return false; } +/** + * ieee80211_is_ftm - check if frame is FTM response + * @skb: the SKB to check + * Return: whether or not the frame is a valid FTM response action frame + */ static inline bool ieee80211_is_ftm(struct sk_buff *skb) { struct ieee80211_mgmt *mgmt = (void *)skb->data; @@ -4752,6 +4843,7 @@ struct element { * @element: element pointer after for_each_element() or friends * @data: same data pointer as passed to for_each_element() or friends * @datalen: same data length as passed to for_each_element() or friends + * Return: %true if all elements were iterated, %false otherwise; see notes * * This function returns %true if all the data was parsed or considered * while walking the elements. Only use this if your for_each_element() @@ -4955,6 +5047,7 @@ struct ieee80211_mle_tdls_common_info { * ieee80211_mle_common_size - check multi-link element common size * @data: multi-link element, must already be checked for size using * ieee80211_mle_size_ok() + * Return: the size of the multi-link element's "common" subfield */ static inline u8 ieee80211_mle_common_size(const u8 *data) { @@ -4987,11 +5080,10 @@ static inline u8 ieee80211_mle_common_size(const u8 *data) /** * ieee80211_mle_get_link_id - returns the link ID * @data: the basic multi link element + * Return: the link ID, or -1 if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the BSS link ID can't be found, -1 will be returned */ static inline int ieee80211_mle_get_link_id(const u8 *data) { @@ -5011,12 +5103,10 @@ static inline int ieee80211_mle_get_link_id(const u8 *data) /** * ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count * @data: pointer to the basic multi link element + * Return: the BSS Parameter Change Count field value, or -1 if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the BSS parameter change count value can't be found (the presence bit - * for it is clear), -1 will be returned. */ static inline int ieee80211_mle_get_bss_param_ch_cnt(const u8 *data) @@ -5039,13 +5129,13 @@ ieee80211_mle_get_bss_param_ch_cnt(const u8 *data) /** * ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay - * @data: pointer to the multi link EHT IE + * @data: pointer to the multi-link element + * Return: the medium synchronization delay field value from the multi-link + * element, or the default value (%IEEE80211_MED_SYNC_DELAY_DEFAULT) + * if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the medium synchronization is not present, then the default value is - * returned. */ static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data) { @@ -5069,12 +5159,12 @@ static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data) /** * ieee80211_mle_get_eml_cap - returns the EML capability - * @data: pointer to the multi link EHT IE + * @data: pointer to the multi-link element + * Return: the EML capability field value from the multi-link element, + * or 0 if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the EML capability is not present, 0 will be returned. */ static inline u16 ieee80211_mle_get_eml_cap(const u8 *data) { @@ -5100,13 +5190,12 @@ static inline u16 ieee80211_mle_get_eml_cap(const u8 *data) /** * ieee80211_mle_get_mld_capa_op - returns the MLD capabilities and operations. - * @data: pointer to the multi link EHT IE + * @data: pointer to the multi-link element + * Return: the MLD capabilities and operations field value from the multi-link + * element, or 0 if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the MLD capabilities and operations field is not present, 0 will be - * returned. */ static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data) { @@ -5137,12 +5226,11 @@ static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data) /** * ieee80211_mle_get_mld_id - returns the MLD ID - * @data: pointer to the multi link element + * @data: pointer to the multi-link element + * Return: The MLD ID in the given multi-link element, or 0 if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the MLD ID is not present, 0 will be returned. */ static inline u8 ieee80211_mle_get_mld_id(const u8 *data) { @@ -5177,6 +5265,7 @@ static inline u8 ieee80211_mle_get_mld_id(const u8 *data) * ieee80211_mle_size_ok - validate multi-link element size * @data: pointer to the element data * @len: length of the containing element + * Return: whether or not the multi-link element size is OK */ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len) { @@ -5246,6 +5335,7 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len) * @data: pointer to the element data * @type: expected type of the element * @len: length of the containing element + * Return: whether or not the multi-link element type matches and size is OK */ static inline bool ieee80211_mle_type_ok(const u8 *data, u8 type, size_t len) { @@ -5289,6 +5379,7 @@ struct ieee80211_mle_per_sta_profile { * profile size * @data: pointer to the sub element data * @len: length of the containing sub element + * Return: %true if the STA profile is large enough, %false otherwise */ static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data, size_t len) @@ -5373,6 +5464,7 @@ ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta * element sta profile size. * @data: pointer to the sub element data * @len: length of the containing sub element + * Return: %true if the STA profile is large enough, %false otherwise */ static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data, size_t len) diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index 383614ebd760..f8c1d2505940 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -37,6 +37,10 @@ struct iio_dev; * @append_status: Will be called to enable status append at the end of the sample, may be NULL. * @set_mode: Will be called to select the current mode, may be NULL. * @disable_all: Will be called to disable all channels, may be NULL. + * @disable_one: Will be called to disable a single channel after + * ad_sigma_delta_single_conversion(), may be NULL. + * Usage of this callback expects iio_chan_spec.address to contain + * the value required for the driver to identify the channel. * @postprocess_sample: Is called for each sampled data word, can be used to * modify or drop the sample data, it, may be NULL. * @has_registers: true if the device has writable and readable registers, false @@ -55,6 +59,7 @@ struct ad_sigma_delta_info { int (*append_status)(struct ad_sigma_delta *, bool append); int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode); int (*disable_all)(struct ad_sigma_delta *); + int (*disable_one)(struct ad_sigma_delta *, unsigned int chan); int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample); bool has_registers; unsigned int addr_shift; @@ -140,6 +145,15 @@ static inline int ad_sigma_delta_disable_all(struct ad_sigma_delta *sd) return 0; } +static inline int ad_sigma_delta_disable_one(struct ad_sigma_delta *sd, + unsigned int chan) +{ + if (sd->info->disable_one) + return sd->info->disable_one(sd, chan); + + return 0; +} + static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd, unsigned int mode) { diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h index 6e27e47077d5..5eb66a399002 100644 --- a/include/linux/iio/buffer-dma.h +++ b/include/linux/iio/buffer-dma.h @@ -7,6 +7,7 @@ #ifndef __INDUSTRIALIO_DMA_BUFFER_H__ #define __INDUSTRIALIO_DMA_BUFFER_H__ +#include <linux/atomic.h> #include <linux/list.h> #include <linux/kref.h> #include <linux/spinlock.h> @@ -16,6 +17,9 @@ struct iio_dma_buffer_queue; struct iio_dma_buffer_ops; struct device; +struct dma_buf_attachment; +struct dma_fence; +struct sg_table; /** * enum iio_block_state - State of a struct iio_dma_buffer_block @@ -41,6 +45,10 @@ enum iio_block_state { * @queue: Parent DMA buffer queue * @kref: kref used to manage the lifetime of block * @state: Current state of the block + * @cyclic: True if this is a cyclic buffer + * @fileio: True if this buffer is used for fileio mode + * @sg_table: DMA table for the transfer when transferring a DMABUF + * @fence: DMA fence to be signaled when a DMABUF transfer is complete */ struct iio_dma_buffer_block { /* May only be accessed by the owner of the block */ @@ -63,6 +71,12 @@ struct iio_dma_buffer_block { * queue->list_lock if the block is not owned by the core. */ enum iio_block_state state; + + bool cyclic; + bool fileio; + + struct sg_table *sg_table; + struct dma_fence *fence; }; /** @@ -72,6 +86,7 @@ struct iio_dma_buffer_block { * @pos: Read offset in the active block * @block_size: Size of each block * @next_dequeue: index of next block that will be dequeued + * @enabled: Whether the buffer is operating in fileio mode */ struct iio_dma_buffer_queue_fileio { struct iio_dma_buffer_block *blocks[2]; @@ -80,6 +95,7 @@ struct iio_dma_buffer_queue_fileio { size_t block_size; unsigned int next_dequeue; + bool enabled; }; /** @@ -95,6 +111,7 @@ struct iio_dma_buffer_queue_fileio { * the DMA controller * @incoming: List of buffers on the incoming queue * @active: Whether the buffer is currently active + * @num_dmabufs: Total number of DMABUFs attached to this queue * @fileio: FileIO state */ struct iio_dma_buffer_queue { @@ -107,6 +124,7 @@ struct iio_dma_buffer_queue { struct list_head incoming; bool active; + atomic_t num_dmabufs; struct iio_dma_buffer_queue_fileio fileio; }; @@ -144,4 +162,17 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue); void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue); +struct iio_dma_buffer_block * +iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer, + struct dma_buf_attachment *attach); +void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer, + struct iio_dma_buffer_block *block); +int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer, + struct iio_dma_buffer_block *block, + struct dma_fence *fence, + struct sg_table *sgt, + size_t size, bool cyclic); +void iio_dma_buffer_lock_queue(struct iio_buffer *buffer); +void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer); + #endif diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h index 89c3fd7c29ca..e72552e026f3 100644 --- a/include/linux/iio/buffer_impl.h +++ b/include/linux/iio/buffer_impl.h @@ -9,8 +9,12 @@ #include <uapi/linux/iio/buffer.h> #include <linux/iio/buffer.h> +struct dma_buf_attachment; +struct dma_fence; struct iio_dev; +struct iio_dma_buffer_block; struct iio_buffer; +struct sg_table; /** * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be @@ -39,6 +43,16 @@ struct iio_buffer; * device stops sampling. Calles are balanced with @enable. * @release: called when the last reference to the buffer is dropped, * should free all resources allocated by the buffer. + * @attach_dmabuf: called from userspace via ioctl to attach one external + * DMABUF. + * @detach_dmabuf: called from userspace via ioctl to detach one previously + * attached DMABUF. + * @enqueue_dmabuf: called from userspace via ioctl to queue this DMABUF + * object to this buffer. Requires a valid DMABUF fd, that + * was previouly attached to this buffer. + * @lock_queue: called when the core needs to lock the buffer queue; + * it is used when enqueueing DMABUF objects. + * @unlock_queue: used to unlock a previously locked buffer queue * @modes: Supported operating modes by this buffer type * @flags: A bitmask combination of INDIO_BUFFER_FLAG_* * @@ -68,6 +82,17 @@ struct iio_buffer_access_funcs { void (*release)(struct iio_buffer *buffer); + struct iio_dma_buffer_block * (*attach_dmabuf)(struct iio_buffer *buffer, + struct dma_buf_attachment *attach); + void (*detach_dmabuf)(struct iio_buffer *buffer, + struct iio_dma_buffer_block *block); + int (*enqueue_dmabuf)(struct iio_buffer *buffer, + struct iio_dma_buffer_block *block, + struct dma_fence *fence, struct sg_table *sgt, + size_t size, bool cyclic); + void (*lock_queue)(struct iio_buffer *buffer); + void (*unlock_queue)(struct iio_buffer *buffer); + unsigned int modes; unsigned int flags; }; @@ -136,6 +161,12 @@ struct iio_buffer { /* @ref: Reference count of the buffer. */ struct kref ref; + + /* @dmabufs: List of DMABUF attachments */ + struct list_head dmabufs; /* P: dmabufs_mutex */ + + /* @dmabufs_mutex: Protects dmabufs */ + struct mutex dmabufs_mutex; }; /** @@ -159,6 +190,8 @@ void iio_buffer_init(struct iio_buffer *buffer); struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer); void iio_buffer_put(struct iio_buffer *buffer); +void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int ret); + #else /* CONFIG_IIO_BUFFER */ static inline void iio_buffer_get(struct iio_buffer *buffer) {} diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index e9910b41d48e..333d1d8ccb37 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -441,4 +441,14 @@ ssize_t iio_read_channel_ext_info(struct iio_channel *chan, ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr, const char *buf, size_t len); +/** + * iio_read_channel_label() - read label for a given channel + * @chan: The channel being queried. + * @buf: Where to store the attribute value. Assumed to hold + * at least PAGE_SIZE bytes. + * + * Returns the number of bytes written to buf, or an error code. + */ +ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf); + #endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 55e2b22086a1..894309294182 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -174,6 +174,27 @@ struct iio_event_spec { }; /** + * struct iio_scan_type - specification for channel data format in buffer + * @sign: 's' or 'u' to specify signed or unsigned + * @realbits: Number of valid bits of data + * @storagebits: Realbits + padding + * @shift: Shift right by this before masking out realbits. + * @repeat: Number of times real/storage bits repeats. When the + * repeat element is more than 1, then the type element in + * sysfs will show a repeat value. Otherwise, the number + * of repetitions is omitted. + * @endianness: little or big endian + */ +struct iio_scan_type { + char sign; + u8 realbits; + u8 storagebits; + u8 shift; + u8 repeat; + enum iio_endian endianness; +}; + +/** * struct iio_chan_spec - specification of a single channel * @type: What type of measurement is the channel making. * @channel: What number do we wish to assign the channel. @@ -183,18 +204,13 @@ struct iio_event_spec { * @address: Driver specific identifier. * @scan_index: Monotonic index to give ordering in scans when read * from a buffer. - * @scan_type: struct describing the scan type - * @scan_type.sign: 's' or 'u' to specify signed or unsigned - * @scan_type.realbits: Number of valid bits of data - * @scan_type.storagebits: Realbits + padding - * @scan_type.shift: Shift right by this before masking out - * realbits. - * @scan_type.repeat: Number of times real/storage bits repeats. - * When the repeat element is more than 1, then - * the type element in sysfs will show a repeat - * value. Otherwise, the number of repetitions - * is omitted. - * @scan_type.endianness: little or big endian + * @scan_type: struct describing the scan type - mutually exclusive + * with ext_scan_type. + * @ext_scan_type: Used in rare cases where there is more than one scan + * format for a channel. When this is used, the flag + * has_ext_scan_type must be set and the driver must + * implement get_current_scan_type in struct iio_info. + * @num_ext_scan_type: Number of elements in ext_scan_type. * @info_mask_separate: What information is to be exported that is specific to * this channel. * @info_mask_separate_available: What availability information is to be @@ -238,6 +254,7 @@ struct iio_event_spec { * attributes but not for event codes. * @output: Channel is output. * @differential: Channel is differential. + * @has_ext_scan_type: True if ext_scan_type is used instead of scan_type. */ struct iio_chan_spec { enum iio_chan_type type; @@ -245,14 +262,13 @@ struct iio_chan_spec { int channel2; unsigned long address; int scan_index; - struct { - char sign; - u8 realbits; - u8 storagebits; - u8 shift; - u8 repeat; - enum iio_endian endianness; - } scan_type; + union { + struct iio_scan_type scan_type; + struct { + const struct iio_scan_type *ext_scan_type; + unsigned int num_ext_scan_type; + }; + }; long info_mask_separate; long info_mask_separate_available; long info_mask_shared_by_type; @@ -270,6 +286,7 @@ struct iio_chan_spec { unsigned indexed:1; unsigned output:1; unsigned differential:1; + unsigned has_ext_scan_type:1; }; @@ -432,6 +449,9 @@ struct iio_trigger; /* forward declaration */ * for better event identification. * @validate_trigger: function to validate the trigger when the * current trigger gets changed. + * @get_current_scan_type: must be implemented by drivers that use ext_scan_type + * in the channel spec to return the index of the currently + * active ext_scan type for a channel. * @update_scan_mode: function to configure device and scan buffer when * channels have changed * @debugfs_reg_access: function to read or write register value of device @@ -516,6 +536,8 @@ struct iio_info { int (*validate_trigger)(struct iio_dev *indio_dev, struct iio_trigger *trig); + int (*get_current_scan_type)(const struct iio_dev *indio_dev, + const struct iio_chan_spec *chan); int (*update_scan_mode)(struct iio_dev *indio_dev, const unsigned long *scan_mask); int (*debugfs_reg_access)(struct iio_dev *indio_dev, @@ -801,6 +823,38 @@ static inline bool iio_read_acpi_mount_matrix(struct device *dev, } #endif +/** + * iio_get_current_scan_type - Get the current scan type for a channel + * @indio_dev: the IIO device to get the scan type for + * @chan: the channel to get the scan type for + * + * Most devices only have one scan type per channel and can just access it + * directly without calling this function. Core IIO code and drivers that + * implement ext_scan_type in the channel spec should use this function to + * get the current scan type for a channel. + * + * Returns: the current scan type for the channel or error. + */ +static inline const struct iio_scan_type +*iio_get_current_scan_type(const struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + int ret; + + if (chan->has_ext_scan_type) { + ret = indio_dev->info->get_current_scan_type(indio_dev, chan); + if (ret < 0) + return ERR_PTR(ret); + + if (ret >= chan->num_ext_scan_type) + return ERR_PTR(-EINVAL); + + return &chan->ext_scan_type[ret]; + } + + return &chan->scan_type; +} + ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals); int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index 8898966bc0f0..e6a75356567a 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -9,6 +9,7 @@ #ifndef __IIO_ADIS_H__ #define __IIO_ADIS_H__ +#include <linux/cleanup.h> #include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/iio/iio.h> @@ -21,6 +22,7 @@ #define ADIS_REG_PAGE_ID 0x00 struct adis; +struct iio_dev_attr; /** * struct adis_timeouts - ADIS chip variant timeouts @@ -84,6 +86,7 @@ struct adis_data { bool unmasked_drdy; bool has_paging; + bool has_fifo; unsigned int burst_reg_cmd; unsigned int burst_len; @@ -148,13 +151,8 @@ int __adis_reset(struct adis *adis); */ static inline int adis_reset(struct adis *adis) { - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_reset(adis); - mutex_unlock(&adis->state_lock); - - return ret; + guard(mutex)(&adis->state_lock); + return __adis_reset(adis); } int __adis_write_reg(struct adis *adis, unsigned int reg, @@ -246,13 +244,8 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, static inline int adis_write_reg(struct adis *adis, unsigned int reg, unsigned int val, unsigned int size) { - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_write_reg(adis, reg, val, size); - mutex_unlock(&adis->state_lock); - - return ret; + guard(mutex)(&adis->state_lock); + return __adis_write_reg(adis, reg, val, size); } /** @@ -265,13 +258,8 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg, static int adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val, unsigned int size) { - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_read_reg(adis, reg, val, size); - mutex_unlock(&adis->state_lock); - - return ret; + guard(mutex)(&adis->state_lock); + return __adis_read_reg(adis, reg, val, size); } /** @@ -363,12 +351,8 @@ int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask, static inline int adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask, const u32 val, u8 size) { - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_update_bits_base(adis, reg, mask, val, size); - mutex_unlock(&adis->state_lock); - return ret; + guard(mutex)(&adis->state_lock); + return __adis_update_bits_base(adis, reg, mask, val, size); } /** @@ -409,35 +393,19 @@ int __adis_enable_irq(struct adis *adis, bool enable); static inline int adis_enable_irq(struct adis *adis, bool enable) { - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_enable_irq(adis, enable); - mutex_unlock(&adis->state_lock); - - return ret; + guard(mutex)(&adis->state_lock); + return __adis_enable_irq(adis, enable); } static inline int adis_check_status(struct adis *adis) { - int ret; - - mutex_lock(&adis->state_lock); - ret = __adis_check_status(adis); - mutex_unlock(&adis->state_lock); - - return ret; -} - -static inline void adis_dev_lock(struct adis *adis) -{ - mutex_lock(&adis->state_lock); + guard(mutex)(&adis->state_lock); + return __adis_check_status(adis); } -static inline void adis_dev_unlock(struct adis *adis) -{ - mutex_unlock(&adis->state_lock); -} +#define adis_dev_auto_lock(adis) guard(mutex)(&(adis)->state_lock) +#define adis_dev_auto_scoped_lock(adis) \ + scoped_guard(mutex, &(adis)->state_lock) int adis_single_conversion(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, @@ -515,11 +483,19 @@ int adis_single_conversion(struct iio_dev *indio_dev, #define ADIS_ROT_CHAN(mod, addr, si, info_sep, info_all, bits) \ ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, info_all, bits) +#define devm_adis_setup_buffer_and_trigger(adis, indio_dev, trigger_handler) \ + devm_adis_setup_buffer_and_trigger_with_attrs((adis), (indio_dev), \ + (trigger_handler), NULL, \ + NULL) + #ifdef CONFIG_IIO_ADIS_LIB_BUFFER int -devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, - irq_handler_t trigger_handler); +devm_adis_setup_buffer_and_trigger_with_attrs(struct adis *adis, + struct iio_dev *indio_dev, + irq_handler_t trigger_handler, + const struct iio_buffer_setup_ops *ops, + const struct iio_dev_attr **buffer_attrs); int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); @@ -529,8 +505,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, #else /* CONFIG_IIO_BUFFER */ static inline int -devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, - irq_handler_t trigger_handler) +devm_adis_setup_buffer_and_trigger_with_attrs(struct adis *adis, + struct iio_dev *indio_dev, + irq_handler_t trigger_handler, + const struct iio_buffer_setup_ops *ops, + const struct iio_dev_attr **buffer_attrs) { return 0; } diff --git a/include/linux/init.h b/include/linux/init.h index 58cef4c2e59a..ee1309473bc6 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -84,11 +84,15 @@ #define __exit __section(".exit.text") __exitused __cold notrace -/* Used for MEMORY_HOTPLUG */ -#define __meminit __section(".meminit.text") __cold notrace \ - __latent_entropy -#define __meminitdata __section(".meminit.data") -#define __meminitconst __section(".meminit.rodata") +#ifdef CONFIG_MEMORY_HOTPLUG +#define __meminit +#define __meminitdata +#define __meminitconst +#else +#define __meminit __init +#define __meminitdata __initdata +#define __meminitconst __initconst +#endif /* For assembly routines */ #define __HEAD .section ".head.text","ax" @@ -99,10 +103,6 @@ #define __INITRODATA .section ".init.rodata","a",%progbits #define __FINITDATA .previous -#define __MEMINIT .section ".meminit.text", "ax" -#define __MEMINITDATA .section ".meminit.data", "aw" -#define __MEMINITRODATA .section ".meminit.rodata", "a" - /* silence warnings when references are OK */ #define __REF .section ".ref.text", "ax" #define __REFDATA .section ".ref.data", "aw" diff --git a/include/linux/input.h b/include/linux/input.h index c22ac465254b..89a0be6ee0e2 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -275,7 +275,8 @@ struct input_handle; * it may not sleep * @events: event sequence handler. This method is being called by * input core with interrupts disabled and dev->event_lock - * spinlock held and so it may not sleep + * spinlock held and so it may not sleep. The method must return + * number of events passed to it. * @filter: similar to @event; separates normal event handlers from * "filters". * @match: called after comparing device's id with handler's id_table @@ -312,8 +313,8 @@ struct input_handler { void *private; void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value); - void (*events)(struct input_handle *handle, - const struct input_value *vals, unsigned int count); + unsigned int (*events)(struct input_handle *handle, + struct input_value *vals, unsigned int count); bool (*filter)(struct input_handle *handle, unsigned int type, unsigned int code, int value); bool (*match)(struct input_handler *handler, struct input_dev *dev); int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id); diff --git a/include/linux/intel_tcc.h b/include/linux/intel_tcc.h index 8ff8eabb4a98..fa788817acfc 100644 --- a/include/linux/intel_tcc.h +++ b/include/linux/intel_tcc.h @@ -14,5 +14,6 @@ int intel_tcc_get_tjmax(int cpu); int intel_tcc_get_offset(int cpu); int intel_tcc_set_offset(int cpu, int offset); int intel_tcc_get_temp(int cpu, int *temp, bool pkg); +u32 intel_tcc_get_offset_mask(void); #endif /* __INTEL_TCC_H__ */ diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index 1e880cb0f454..ff480b47ae64 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -21,6 +21,7 @@ enum intel_tpmi_id { TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */ TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */ TPMI_ID_SST = 5, /* Speed Select Technology */ + TPMI_ID_PLR = 0xc, /* Performance Limit Reasons */ TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */ TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */ }; @@ -53,4 +54,5 @@ struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int int tpmi_get_resource_count(struct auxiliary_device *auxdev); int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked, bool *write_blocked); +struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev); #endif diff --git a/include/linux/interconnect-clk.h b/include/linux/interconnect-clk.h index 0cd80112bea5..9bcee3e9c56c 100644 --- a/include/linux/interconnect-clk.h +++ b/include/linux/interconnect-clk.h @@ -11,12 +11,16 @@ struct device; struct icc_clk_data { struct clk *clk; const char *name; + unsigned int master_id; + unsigned int slave_id; }; struct icc_provider *icc_clk_register(struct device *dev, unsigned int first_id, unsigned int num_clocks, const struct icc_clk_data *data); +int devm_icc_clk_register(struct device *dev, unsigned int first_id, + unsigned int num_clocks, const struct icc_clk_data *data); void icc_clk_unregister(struct icc_provider *provider); #endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 5c9bdd3ffccc..bea39a0292eb 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -5,13 +5,14 @@ #include <linux/kernel.h> #include <linux/bitops.h> -#include <linux/cpumask.h> +#include <linux/cleanup.h> #include <linux/irqreturn.h> #include <linux/irqnr.h> #include <linux/hardirq.h> #include <linux/irqflags.h> #include <linux/hrtimer.h> #include <linux/kref.h> +#include <linux/cpumask_types.h> #include <linux/workqueue.h> #include <linux/jump_label.h> @@ -235,6 +236,9 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type); extern bool irq_percpu_is_enabled(unsigned int irq); extern void irq_wake_thread(unsigned int irq, void *dev_id); +DEFINE_LOCK_GUARD_1(disable_irq, int, + disable_irq(*_T->lock), enable_irq(*_T->lock)) + extern void disable_nmi_nosync(unsigned int irq); extern void disable_percpu_nmi(unsigned int irq); extern void enable_nmi(unsigned int irq); diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 86cf1f7ae389..f9a81761bfce 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -85,6 +85,8 @@ struct io_pgtable_cfg { * * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability * attributes set in the TCR for a non-coherent page-table walker. + * + * IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable. */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) @@ -92,6 +94,7 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4) #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) + #define IO_PGTABLE_QUIRK_ARM_HD BIT(7) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 7abdc0927124..3bb6198d1523 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -50,7 +50,7 @@ struct io_wq_work_list { struct io_wq_work { struct io_wq_work_node list; - unsigned flags; + atomic_t flags; /* place it here instead of io_kiocb as it fills padding and saves 4B */ int cancel_seq; }; @@ -210,14 +210,6 @@ struct io_submit_state { struct blk_plug plug; }; -struct io_ev_fd { - struct eventfd_ctx *cq_ev_fd; - unsigned int eventfd_async: 1; - struct rcu_head rcu; - atomic_t refs; - atomic_t ops; -}; - struct io_alloc_cache { void **entries; unsigned int nr_cached; @@ -372,7 +364,6 @@ struct io_ring_ctx { struct io_restriction restrictions; /* slow path rsrc auxilary data, used by update/register */ - struct io_mapped_ubuf *dummy_ubuf; struct io_rsrc_data *file_data; struct io_rsrc_data *buf_data; @@ -405,6 +396,9 @@ struct io_ring_ctx { struct callback_head poll_wq_task_work; struct list_head defer_list; + struct io_alloc_cache msg_cache; + spinlock_t msg_lock; + #ifdef CONFIG_NET_RX_BUSY_POLL struct list_head napi_list; /* track busy poll napi_id */ spinlock_t napi_lock; /* napi_list lock */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 17b3f36ad843..4d47f2c33311 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -124,12 +124,16 @@ struct iopf_fault { struct iopf_group { struct iopf_fault last_fault; struct list_head faults; + size_t fault_count; /* list node for iommu_fault_param::faults */ struct list_head pending_node; struct work_struct work; - struct iommu_domain *domain; + struct iommu_attach_handle *attach_handle; /* The device's fault data parameter. */ struct iommu_fault_param *fault_param; + /* Used by handler provider to hook the group on its own lists. */ + struct list_head node; + u32 cookie; }; /** @@ -317,6 +321,9 @@ enum iommu_dev_features { #define IOMMU_PASID_INVALID (-1U) typedef unsigned int ioasid_t; +/* Read but do not clear any dirty bits */ +#define IOMMU_DIRTY_NO_CLEAR (1 << 0) + #ifdef CONFIG_IOMMU_API /** @@ -353,9 +360,6 @@ struct iommu_dirty_bitmap { struct iommu_iotlb_gather *gather; }; -/* Read but do not clear any dirty bits */ -#define IOMMU_DIRTY_NO_CLEAR (1 << 0) - /** * struct iommu_dirty_ops - domain specific dirty tracking operations * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain @@ -547,6 +551,10 @@ static inline int __iommu_copy_struct_from_user_array( * @default_domain: If not NULL this will always be set as the default domain. * This should be an IDENTITY/BLOCKED/PLATFORM domain. * Do not use in new drivers. + * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is + * no user domain for each PASID and the I/O page faults are + * forwarded through the user domain attached to the device + * RID. */ struct iommu_ops { bool (*capable)(struct device *dev, enum iommu_cap); @@ -590,6 +598,7 @@ struct iommu_ops { struct iommu_domain *blocked_domain; struct iommu_domain *release_domain; struct iommu_domain *default_domain; + u8 user_pasid_table:1; }; /** @@ -780,6 +789,7 @@ extern bool iommu_present(const struct bus_type *bus); extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); extern bool iommu_group_has_isolated_msi(struct iommu_group *group); extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus); +struct iommu_domain *iommu_paging_domain_alloc(struct device *dev); extern void iommu_domain_free(struct iommu_domain *domain); extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); @@ -968,7 +978,6 @@ extern struct iommu_group *generic_single_device_group(struct device *dev); /** * struct iommu_fwspec - per-device IOMMU instance data - * @ops: ops for this device's IOMMU * @iommu_fwnode: firmware handle for this device's IOMMU * @flags: IOMMU_FWSPEC_* flags * @num_ids: number of associated device IDs @@ -979,7 +988,6 @@ extern struct iommu_group *generic_single_device_group(struct device *dev); * consumers. */ struct iommu_fwspec { - const struct iommu_ops *ops; struct fwnode_handle *iommu_fwnode; u32 flags; unsigned int num_ids; @@ -989,27 +997,33 @@ struct iommu_fwspec { /* ATS is supported */ #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) +/* + * An iommu attach handle represents a relationship between an iommu domain + * and a PASID or RID of a device. It is allocated and managed by the component + * that manages the domain and is stored in the iommu group during the time the + * domain is attached. + */ +struct iommu_attach_handle { + struct iommu_domain *domain; +}; + /** * struct iommu_sva - handle to a device-mm bond */ struct iommu_sva { + struct iommu_attach_handle handle; struct device *dev; - struct iommu_domain *domain; - struct list_head handle_item; refcount_t users; }; struct iommu_mm_data { u32 pasid; struct list_head sva_domains; - struct list_head sva_handles; }; -int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, - const struct iommu_ops *ops); +int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode); void iommu_fwspec_free(struct device *dev); int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids); -const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode); static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) { @@ -1052,12 +1066,10 @@ int iommu_device_claim_dma_owner(struct device *dev, void *owner); void iommu_device_release_dma_owner(struct device *dev); int iommu_attach_device_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid); + struct device *dev, ioasid_t pasid, + struct iommu_attach_handle *handle); void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid); -struct iommu_domain * -iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, - unsigned int type); ioasid_t iommu_alloc_global_pasid(struct device *dev); void iommu_free_global_pasid(ioasid_t pasid); #else /* CONFIG_IOMMU_API */ @@ -1086,6 +1098,11 @@ static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus return NULL; } +static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + static inline void iommu_domain_free(struct iommu_domain *domain) { } @@ -1315,8 +1332,7 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link) } static inline int iommu_fwspec_init(struct device *dev, - struct fwnode_handle *iommu_fwnode, - const struct iommu_ops *ops) + struct fwnode_handle *iommu_fwnode) { return -ENODEV; } @@ -1331,12 +1347,6 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, return -ENODEV; } -static inline -const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode) -{ - return NULL; -} - static inline int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) { @@ -1388,7 +1398,8 @@ static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner) } static inline int iommu_attach_device_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid) + struct device *dev, ioasid_t pasid, + struct iommu_attach_handle *handle) { return -ENODEV; } @@ -1398,13 +1409,6 @@ static inline void iommu_detach_device_pasid(struct iommu_domain *domain, { } -static inline struct iommu_domain * -iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, - unsigned int type) -{ - return NULL; -} - static inline ioasid_t iommu_alloc_global_pasid(struct device *dev) { return IOMMU_PASID_INVALID; @@ -1527,8 +1531,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm); void iommu_sva_unbind_device(struct iommu_sva *handle); u32 iommu_sva_get_pasid(struct iommu_sva *handle); -struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, - struct mm_struct *mm); #else static inline struct iommu_sva * iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) @@ -1553,12 +1555,6 @@ static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) } static inline void mm_pasid_drop(struct mm_struct *mm) {} - -static inline struct iommu_domain * -iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm) -{ - return NULL; -} #endif /* CONFIG_IOMMU_SVA */ #ifdef CONFIG_IOMMU_IOPF diff --git a/include/linux/ioport.h b/include/linux/ioport.h index db7fe25f3370..6e9fb667a1c5 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -188,6 +188,42 @@ enum { #define DEFINE_RES_DMA(_dma) \ DEFINE_RES_DMA_NAMED((_dma), NULL) +/** + * typedef resource_alignf - Resource alignment callback + * @data: Private data used by the callback + * @res: Resource candidate range (an empty resource space) + * @size: The minimum size of the empty space + * @align: Alignment from the constraints + * + * Callback allows calculating resource placement and alignment beyond min, + * max, and align fields in the struct resource_constraint. + * + * Return: Start address for the resource. + */ +typedef resource_size_t (*resource_alignf)(void *data, + const struct resource *res, + resource_size_t size, + resource_size_t align); + +/** + * struct resource_constraint - constraints to be met while searching empty + * resource space + * @min: The minimum address for the memory range + * @max: The maximum address for the memory range + * @align: Alignment for the start address of the empty space + * @alignf: Additional alignment constraints callback + * @alignf_data: Data provided for @alignf callback + * + * Contains the range and alignment constraints that have to be met during + * find_resource_space(). @alignf can be NULL indicating no alignment beyond + * @align is necessary. + */ +struct resource_constraint { + resource_size_t min, max, align; + resource_alignf alignf; + void *alignf_data; +}; + /* PC/ISA/whatever - the normal PC address spaces: IO and memory */ extern struct resource ioport_resource; extern struct resource iomem_resource; @@ -207,10 +243,7 @@ extern void arch_remove_reservations(struct resource *avail); extern int allocate_resource(struct resource *root, struct resource *new, resource_size_t size, resource_size_t min, resource_size_t max, resource_size_t align, - resource_size_t (*alignf)(void *, - const struct resource *, - resource_size_t, - resource_size_t), + resource_alignf alignf, void *alignf_data); struct resource *lookup_resource(struct resource *root, resource_size_t start); int adjust_resource(struct resource *res, resource_size_t start, @@ -264,6 +297,9 @@ static inline bool resource_union(const struct resource *r1, const struct resour return true; } +int find_resource_space(struct resource *root, struct resource *new, + resource_size_t size, struct resource_constraint *constraint); + /* Convenience shorthand with allocation */ #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) #define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED) diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h index ab831e5ae748..89b4d8ff274b 100644 --- a/include/linux/irq_sim.h +++ b/include/linux/irq_sim.h @@ -16,11 +16,28 @@ * requested like normal irqs and enqueued from process context. */ +struct irq_sim_ops { + int (*irq_sim_irq_requested)(struct irq_domain *domain, + irq_hw_number_t hwirq, void *data); + void (*irq_sim_irq_released)(struct irq_domain *domain, + irq_hw_number_t hwirq, void *data); +}; + struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode, unsigned int num_irqs); struct irq_domain *devm_irq_domain_create_sim(struct device *dev, struct fwnode_handle *fwnode, unsigned int num_irqs); +struct irq_domain *irq_domain_create_sim_full(struct fwnode_handle *fwnode, + unsigned int num_irqs, + const struct irq_sim_ops *ops, + void *data); +struct irq_domain * +devm_irq_domain_create_sim_full(struct device *dev, + struct fwnode_handle *fwnode, + unsigned int num_irqs, + const struct irq_sim_ops *ops, + void *data); void irq_domain_remove_sim(struct irq_domain *domain); #endif /* _LINUX_IRQ_SIM_H */ diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h index 1177f3a1aed5..fc0246cc05ac 100644 --- a/include/linux/irqchip/arm-gic-common.h +++ b/include/linux/irqchip/arm-gic-common.h @@ -10,10 +10,6 @@ #include <linux/irqchip/arm-vgic-info.h> #define GICD_INT_DEF_PRI 0xa0 -#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\ - (GICD_INT_DEF_PRI << 16) |\ - (GICD_INT_DEF_PRI << 8) |\ - GICD_INT_DEF_PRI) struct irq_domain; struct fwnode_handle; diff --git a/include/linux/irqchip/arm-gic-v3-prio.h b/include/linux/irqchip/arm-gic-v3-prio.h new file mode 100644 index 000000000000..44157c9abb78 --- /dev/null +++ b/include/linux/irqchip/arm-gic-v3-prio.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H +#define __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H + +/* + * GIC priorities from the view of the PMR/RPR. + * + * These values are chosen to be valid in either the absolute priority space or + * the NS view of the priority space. The value programmed into the distributor + * and ITS will be chosen at boot time such that these values appear in the + * PMR/RPR. + * + * GICV3_PRIO_UNMASKED is the PMR view of the priority to use to permit both + * IRQs and pseudo-NMIs. + * + * GICV3_PRIO_IRQ is the PMR view of the priority of regular interrupts. This + * can be written to the PMR to mask regular IRQs. + * + * GICV3_PRIO_NMI is the PMR view of the priority of pseudo-NMIs. This can be + * written to the PMR to mask pseudo-NMIs. + * + * On arm64 some code sections either automatically switch back to PSR.I or + * explicitly require to not use priority masking. If bit GICV3_PRIO_PSR_I_SET + * is included in the priority mask, it indicates that PSR.I should be set and + * interrupt disabling temporarily does not rely on IRQ priorities. + */ +#define GICV3_PRIO_UNMASKED 0xe0 +#define GICV3_PRIO_IRQ 0xc0 +#define GICV3_PRIO_NMI 0x80 + +#define GICV3_PRIO_PSR_I_SET (1 << 4) + +#ifndef __ASSEMBLER__ + +#define __gicv3_prio_to_ns(p) (0xff & ((p) << 1)) +#define __gicv3_ns_to_prio(ns) (0x80 | ((ns) >> 1)) + +#define __gicv3_prio_valid_ns(p) \ + (__gicv3_ns_to_prio(__gicv3_prio_to_ns(p)) == (p)) + +static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_NMI)); +static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_IRQ)); + +static_assert(GICV3_PRIO_NMI < GICV3_PRIO_IRQ); +static_assert(GICV3_PRIO_IRQ < GICV3_PRIO_UNMASKED); + +static_assert(GICV3_PRIO_IRQ < (GICV3_PRIO_IRQ | GICV3_PRIO_PSR_I_SET)); + +#endif /* __ASSEMBLER */ + +#endif /* __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 728691365464..70c0948f978e 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -638,7 +638,7 @@ struct fwnode_handle; int __init its_lpi_memreserve_init(void); int its_cpu_init(void); int its_init(struct fwnode_handle *handle, struct rdists *rdists, - struct irq_domain *domain); + struct irq_domain *domain, u8 irq_prio); int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); static inline bool gic_enable_sre(void) diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h index 2f6ae7551748..b35ee22c278f 100644 --- a/include/linux/irqchip/irq-partition-percpu.h +++ b/include/linux/irqchip/irq-partition-percpu.h @@ -8,7 +8,7 @@ #define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H #include <linux/fwnode.h> -#include <linux/cpumask.h> +#include <linux/cpumask_types.h> #include <linux/irqdomain.h> struct partition_affinity { diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index ab04c1c27fae..5157d92b6f23 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1086,6 +1086,13 @@ struct journal_s int j_revoke_records_per_block; /** + * @j_transaction_overhead: + * + * Number of blocks each transaction needs for its own bookkeeping + */ + int j_transaction_overhead_buffers; + + /** * @j_commit_interval: * * What is the maximum transaction lifetime before we begin a commit? @@ -1588,6 +1595,11 @@ void jbd2_journal_put_journal_head(struct journal_head *jh); */ extern struct kmem_cache *jbd2_handle_cache; +/* + * This specialized allocator has to be a macro for its allocations to be + * accounted separately (to have a separate alloc_tag). The typecast is + * intentional to enforce typesafety. + */ #define jbd2_alloc_handle(_gfp_flags) \ ((handle_t *)kmem_cache_zalloc(jbd2_handle_cache, _gfp_flags)) @@ -1602,6 +1614,11 @@ static inline void jbd2_free_handle(handle_t *handle) */ extern struct kmem_cache *jbd2_inode_cache; +/* + * This specialized allocator has to be a macro for its allocations to be + * accounted separately (to have a separate alloc_tag). The typecast is + * intentional to enforce typesafety. + */ #define jbd2_alloc_inode(_gfp_flags) \ ((struct jbd2_inode *)kmem_cache_alloc(jbd2_inode_cache, _gfp_flags)) @@ -1660,11 +1677,6 @@ int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode); int jbd2_fc_wait_bufs(journal_t *journal, int num_blks); int jbd2_fc_release_bufs(journal_t *journal); -static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal) -{ - return (journal->j_total_len - journal->j_fc_wbufsize) / 4; -} - /* * is_journal_abort * diff --git a/include/linux/jhash.h b/include/linux/jhash.h index ab7f8c152b89..fa26a2dd3b52 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -31,7 +31,7 @@ /* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */ #define jhash_mask(n) (jhash_size(n)-1) -/* __jhash_mix -- mix 3 32-bit values reversibly. */ +/* __jhash_mix - mix 3 32-bit values reversibly. */ #define __jhash_mix(a, b, c) \ { \ a -= c; a ^= rol32(c, 4); c += b; \ @@ -60,7 +60,7 @@ /* jhash - hash an arbitrary key * @k: sequence of bytes as key * @length: the length of the key - * @initval: the previous hash, or an arbitray value + * @initval: the previous hash, or an arbitrary value * * The generic version, hashes an arbitrary sequence of bytes. * No alignment or length assumptions are made about the input key. @@ -110,7 +110,7 @@ static inline u32 jhash(const void *key, u32 length, u32 initval) /* jhash2 - hash an array of u32's * @k: the key which must be an array of u32's * @length: the number of u32's in the key - * @initval: the previous hash, or an arbitray value + * @initval: the previous hash, or an arbitrary value * * Returns the hash value of the key. */ diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 9c042c6384bb..b97ce2df376f 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -5,7 +5,6 @@ #include <linux/smp.h> #include <linux/threads.h> #include <linux/percpu.h> -#include <linux/cpumask.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/vtime.h> diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h index e0c23a32cdf0..2b1432cc16d5 100644 --- a/include/linux/kmsan.h +++ b/include/linux/kmsan.h @@ -230,6 +230,67 @@ void kmsan_handle_urb(const struct urb *urb, bool is_out); */ void kmsan_unpoison_entry_regs(const struct pt_regs *regs); +/** + * kmsan_get_metadata() - Return a pointer to KMSAN shadow or origins. + * @addr: kernel address. + * @is_origin: whether to return origins or shadow. + * + * Return NULL if metadata cannot be found. + */ +void *kmsan_get_metadata(void *addr, bool is_origin); + +/** + * kmsan_enable_current(): Enable KMSAN for the current task. + * + * Each kmsan_enable_current() current call must be preceded by a + * kmsan_disable_current() call. These call pairs may be nested. + */ +void kmsan_enable_current(void); + +/** + * kmsan_disable_current(): Disable KMSAN for the current task. + * + * Each kmsan_disable_current() current call must be followed by a + * kmsan_enable_current() call. These call pairs may be nested. + */ +void kmsan_disable_current(void); + +/** + * memset_no_sanitize_memory(): Fill memory without KMSAN instrumentation. + * @s: address of kernel memory to fill. + * @c: constant byte to fill the memory with. + * @n: number of bytes to fill. + * + * This is like memset(), but without KMSAN instrumentation. + */ +static inline void *memset_no_sanitize_memory(void *s, int c, size_t n) +{ + return __memset(s, c, n); +} + +extern bool kmsan_enabled; +extern int panic_on_kmsan; + +/* + * KMSAN performs a lot of consistency checks that are currently enabled by + * default. BUG_ON is normally discouraged in the kernel, unless used for + * debugging, but KMSAN itself is a debugging tool, so it makes little sense to + * recover if something goes wrong. + */ +#define KMSAN_WARN_ON(cond) \ + ({ \ + const bool __cond = WARN_ON(cond); \ + if (unlikely(__cond)) { \ + WRITE_ONCE(kmsan_enabled, false); \ + if (panic_on_kmsan) { \ + /* Can't call panic() here because */ \ + /* of uaccess checks. */ \ + BUG(); \ + } \ + } \ + __cond; \ + }) + #else static inline void kmsan_init_shadow(void) @@ -329,6 +390,21 @@ static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs) { } +static inline void kmsan_enable_current(void) +{ +} + +static inline void kmsan_disable_current(void) +{ +} + +static inline void *memset_no_sanitize_memory(void *s, int c, size_t n) +{ + return memset(s, c, n); +} + +#define KMSAN_WARN_ON WARN_ON + #endif #endif /* _LINUX_KMSAN_H */ diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h index 929287981afe..dfc59918b3c0 100644 --- a/include/linux/kmsan_types.h +++ b/include/linux/kmsan_types.h @@ -31,7 +31,7 @@ struct kmsan_context_state { struct kmsan_ctx { struct kmsan_context_state cstate; int kmsan_in_runtime; - bool allow_reporting; + unsigned int depth; }; #endif /* _LINUX_KMSAN_TYPES_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 692c01e41a18..689e8be873a7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -378,8 +378,10 @@ struct kvm_vcpu { bool dy_eligible; } spin_loop; #endif + bool wants_to_run; bool preempted; bool ready; + bool scheduled_out; struct kvm_vcpu_arch arch; struct kvm_vcpu_stat stat; char stats_id[KVM_STATS_NAME_SIZE]; @@ -1494,8 +1496,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg); int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); -void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); @@ -1955,8 +1955,6 @@ struct _kvm_stats_desc { HALT_POLL_HIST_COUNT), \ STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) -extern struct dentry *kvm_debugfs_dir; - ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, const struct _kvm_stats_desc *desc, void *stats, size_t size_stats, @@ -2096,6 +2094,7 @@ int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *entries, unsigned nr, unsigned flags); +int kvm_init_irq_routing(struct kvm *kvm); int kvm_set_routing_entry(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue); @@ -2105,6 +2104,11 @@ void kvm_free_irq_routing(struct kvm *kvm); static inline void kvm_free_irq_routing(struct kvm *kvm) {} +static inline int kvm_init_irq_routing(struct kvm *kvm) +{ + return 0; +} + #endif int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); @@ -2441,4 +2445,45 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm, } #endif /* CONFIG_KVM_PRIVATE_MEM */ +#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE +int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order); +bool kvm_arch_gmem_prepare_needed(struct kvm *kvm); +#endif + +/** + * kvm_gmem_populate() - Populate/prepare a GPA range with guest data + * + * @kvm: KVM instance + * @gfn: starting GFN to be populated + * @src: userspace-provided buffer containing data to copy into GFN range + * (passed to @post_populate, and incremented on each iteration + * if not NULL) + * @npages: number of pages to copy from userspace-buffer + * @post_populate: callback to issue for each gmem page that backs the GPA + * range + * @opaque: opaque data to pass to @post_populate callback + * + * This is primarily intended for cases where a gmem-backed GPA range needs + * to be initialized with userspace-provided data prior to being mapped into + * the guest as a private page. This should be called with the slots->lock + * held so that caller-enforced invariants regarding the expected memory + * attributes of the GPA range do not race with KVM_SET_MEMORY_ATTRIBUTES. + * + * Returns the number of pages that were populated. + */ +typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, + void __user *src, int order, void *opaque); + +long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages, + kvm_gmem_populate_cb post_populate, void *opaque); + +#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE +void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); +#endif + +#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY +long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, + struct kvm_pre_fault_memory *range); +#endif + #endif diff --git a/include/linux/leds.h b/include/linux/leds.h index 6300313c46b7..6885603f211b 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -107,6 +107,8 @@ struct led_classdev { #define LED_BRIGHT_HW_CHANGED BIT(21) #define LED_RETAIN_AT_SHUTDOWN BIT(22) #define LED_INIT_DEFAULT_TRIGGER BIT(23) +#define LED_REJECT_NAME_CONFLICT BIT(24) +#define LED_MULTI_COLOR BIT(25) /* set_brightness_work / blink_timer flags, atomic, private. */ unsigned long work_flags; @@ -374,6 +376,25 @@ void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness); int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value); /** + * led_mc_set_brightness - set mc LED color intensity values and brightness + * @led_cdev: the LED to set + * @intensity_value: array of per color intensity values to set + * @num_colors: amount of entries in intensity_value array + * @brightness: the brightness to set the LED to + * + * Set a multi-color LED's per color intensity values and brightness. + * If necessary, this cancels the software blink timer. This function is + * guaranteed not to sleep. + * + * Calling this function on a non multi-color led_classdev or with the wrong + * num_colors value is an error. In this case an error will be logged once + * and the call will do nothing. + */ +void led_mc_set_brightness(struct led_classdev *led_cdev, + unsigned int *intensity_value, unsigned int num_colors, + unsigned int brightness); + +/** * led_update_brightness - update LED brightness * @led_cdev: the LED to query * @@ -428,6 +449,16 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data, char *led_classdev_name); /** + * led_get_color_name - get string representation of color ID + * @color_id: The LED_COLOR_ID_* constant + * + * Get the string name of a LED_COLOR_ID_* constant. + * + * Returns: A string constant or NULL on an invalid ID. + */ +const char *led_get_color_name(u8 color_id); + +/** * led_sysfs_is_disabled - check if LED sysfs interface is disabled * @led_cdev: the LED to query * @@ -490,6 +521,9 @@ void led_trigger_register_simple(const char *name, struct led_trigger **trigger); void led_trigger_unregister_simple(struct led_trigger *trigger); void led_trigger_event(struct led_trigger *trigger, enum led_brightness event); +void led_mc_trigger_event(struct led_trigger *trig, + unsigned int *intensity_value, unsigned int num_colors, + enum led_brightness brightness); void led_trigger_blink(struct led_trigger *trigger, unsigned long delay_on, unsigned long delay_off); void led_trigger_blink_oneshot(struct led_trigger *trigger, @@ -532,6 +566,9 @@ static inline void led_trigger_register_simple(const char *name, static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} static inline void led_trigger_event(struct led_trigger *trigger, enum led_brightness event) {} +static inline void led_mc_trigger_event(struct led_trigger *trig, + unsigned int *intensity_value, unsigned int num_colors, + enum led_brightness brightness) {} static inline void led_trigger_blink(struct led_trigger *trigger, unsigned long delay_on, unsigned long delay_off) {} diff --git a/include/linux/libata.h b/include/linux/libata.h index 7d3bd7c9664a..17394098bee9 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -814,7 +814,6 @@ struct ata_port { /* Flags that change dynamically, protected by ap->lock */ unsigned int pflags; /* ATA_PFLAG_xxx */ unsigned int print_id; /* user visible unique port ID */ - unsigned int local_port_no; /* host local port num */ unsigned int port_no; /* 0 based port no. inside the host */ #ifdef CONFIG_ATA_SFF @@ -1069,7 +1068,7 @@ extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); -extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); +extern struct ata_host *ata_host_alloc(struct device *dev, int n_ports); extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports); extern void ata_host_get(struct ata_host *host); @@ -1082,7 +1081,6 @@ extern int ata_host_activate(struct ata_host *host, int irq, const struct scsi_host_template *sht); extern void ata_host_detach(struct ata_host *host); extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *); -extern int ata_scsi_detect(struct scsi_host_template *sht); extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); #ifdef CONFIG_COMPAT @@ -1246,12 +1244,11 @@ extern int sata_link_debounce(struct ata_link *link, extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, bool spm_wakeup); extern int ata_slave_link_init(struct ata_port *ap); -extern struct ata_port *ata_sas_port_alloc(struct ata_host *, - struct ata_port_info *, struct Scsi_Host *); extern void ata_port_probe(struct ata_port *ap); +extern struct ata_port *ata_port_alloc(struct ata_host *host); extern void ata_port_free(struct ata_port *ap); -extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap); -extern void ata_sas_tport_delete(struct ata_port *ap); +extern int ata_tport_add(struct device *parent, struct ata_port *ap); +extern void ata_tport_delete(struct ata_port *ap); int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim, struct ata_port *ap); extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h index d4d5b93efe84..e37699b7e839 100644 --- a/include/linux/linux_logo.h +++ b/include/linux/linux_logo.h @@ -10,9 +10,6 @@ * Copyright (C) 2001 Greg Banks <gnb@alphalink.com.au> * Copyright (C) 2001 Jan-Benedict Glaw <jbglaw@lug-owl.de> * Copyright (C) 2003 Geert Uytterhoeven <geert@linux-m68k.org> - * - * Serial_console ascii image can be any size, - * but should contain %s to display the version */ #include <linux/init.h> diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 792b67ceb631..5099a8ccd5f4 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -50,7 +50,7 @@ struct list_lru_node { struct list_lru { struct list_lru_node *node; -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG struct list_head list; int shrinker_id; bool memcg_aware; diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index e55010fa7329..091dc0b6bdfb 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -51,4 +51,25 @@ #define local_unlock_irqrestore(lock, flags) \ __local_unlock_irqrestore(lock, flags) +DEFINE_GUARD(local_lock, local_lock_t __percpu*, + local_lock(_T), + local_unlock(_T)) +DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*, + local_lock_irq(_T), + local_unlock_irq(_T)) +DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu, + local_lock_irqsave(_T->lock, _T->flags), + local_unlock_irqrestore(_T->lock, _T->flags), + unsigned long flags) + +#define local_lock_nested_bh(_lock) \ + __local_lock_nested_bh(_lock) + +#define local_unlock_nested_bh(_lock) \ + __local_unlock_nested_bh(_lock) + +DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*, + local_lock_nested_bh(_T), + local_unlock_nested_bh(_T)) + #endif diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 975e33b793a7..8dd71fbbb6d2 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -62,6 +62,17 @@ do { \ local_lock_debug_init(lock); \ } while (0) +#define __spinlock_nested_bh_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ + lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ + 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ + LD_LOCK_NORMAL); \ + local_lock_debug_init(lock); \ +} while (0) + #define __local_lock(lock) \ do { \ preempt_disable(); \ @@ -98,6 +109,15 @@ do { \ local_irq_restore(flags); \ } while (0) +#define __local_lock_nested_bh(lock) \ + do { \ + lockdep_assert_in_softirq(); \ + local_lock_acquire(this_cpu_ptr(lock)); \ + } while (0) + +#define __local_unlock_nested_bh(lock) \ + local_lock_release(this_cpu_ptr(lock)) + #else /* !CONFIG_PREEMPT_RT */ /* @@ -138,4 +158,15 @@ typedef spinlock_t local_lock_t; #define __local_unlock_irqrestore(lock, flags) __local_unlock(lock) +#define __local_lock_nested_bh(lock) \ +do { \ + lockdep_assert_in_softirq_func(); \ + spin_lock(this_cpu_ptr(lock)); \ +} while (0) + +#define __local_unlock_nested_bh(lock) \ +do { \ + spin_unlock(this_cpu_ptr((lock))); \ +} while (0) + #endif /* CONFIG_PREEMPT_RT */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 08b0d1d9d78b..217f7abf2cbf 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -178,9 +178,27 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, (lock)->dep_map.wait_type_outer, \ (lock)->dep_map.lock_type) +/** + * lockdep_set_novalidate_class: disable checking of lock ordering on a given + * lock + * @lock: Lock to mark + * + * Lockdep will still record that this lock has been taken, and print held + * instances when dumping locks + */ #define lockdep_set_novalidate_class(lock) \ lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) +/** + * lockdep_set_notrack_class: disable lockdep tracking of a given lock entirely + * @lock: Lock to mark + * + * Bigger hammer than lockdep_set_novalidate_class: so far just for bcachefs, + * which takes more locks than lockdep is able to track (48). + */ +#define lockdep_set_notrack_class(lock) \ + lockdep_set_class_and_name(lock, &__lockdep_no_track__, #lock) + /* * Compare locking classes */ @@ -338,6 +356,7 @@ static inline void lockdep_set_selftest_task(struct task_struct *task) #define lockdep_set_subclass(lock, sub) do { } while (0) #define lockdep_set_novalidate_class(lock) do { } while (0) +#define lockdep_set_notrack_class(lock) do { } while (0) /* * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP @@ -600,6 +619,8 @@ do { \ (!in_softirq() || in_irq() || in_nmi())); \ } while (0) +extern void lockdep_assert_in_softirq_func(void); + #else # define might_lock(lock) do { } while (0) # define might_lock_read(lock) do { } while (0) @@ -613,6 +634,7 @@ do { \ # define lockdep_assert_preemption_enabled() do { } while (0) # define lockdep_assert_preemption_disabled() do { } while (0) # define lockdep_assert_in_softirq() do { } while (0) +# define lockdep_assert_in_softirq_func() do { } while (0) #endif #ifdef CONFIG_PROVE_RAW_LOCK_NESTING diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index 70d30d40ea4a..9f361d3ab9d9 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -80,6 +80,7 @@ struct lock_class_key { }; extern struct lock_class_key __lockdep_no_validate__; +extern struct lock_class_key __lockdep_no_track__; struct lock_trace; diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 44488b1ab9a9..855db460e08b 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -144,6 +144,7 @@ LSM_HOOK(int, 0, inode_setattr, struct mnt_idmap *idmap, struct dentry *dentry, LSM_HOOK(void, LSM_RET_VOID, inode_post_setattr, struct mnt_idmap *idmap, struct dentry *dentry, int ia_valid) LSM_HOOK(int, 0, inode_getattr, const struct path *path) +LSM_HOOK(int, 0, inode_xattr_skipcap, const char *name) LSM_HOOK(int, 0, inode_setxattr, struct mnt_idmap *idmap, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) diff --git a/include/linux/math.h b/include/linux/math.h index dd4152711de7..f5f18dc3616b 100644 --- a/include/linux/math.h +++ b/include/linux/math.h @@ -112,6 +112,8 @@ struct type##_fract { \ __##type numerator; \ __##type denominator; \ }; +__STRUCT_FRACT(s8) +__STRUCT_FRACT(u8) __STRUCT_FRACT(s16) __STRUCT_FRACT(u16) __STRUCT_FRACT(s32) diff --git a/include/linux/math64.h b/include/linux/math64.h index d34def7f9a8c..6aaccc1626ab 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -298,6 +298,19 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) /** + * DIV_U64_ROUND_UP - unsigned 64bit divide with 32bit divisor rounded up + * @ll: unsigned 64bit dividend + * @d: unsigned 32bit divisor + * + * Divide unsigned 64bit dividend by unsigned 32bit divisor + * and round up. + * + * Return: dividend / divisor rounded up + */ +#define DIV_U64_ROUND_UP(ll, d) \ + ({ u32 _tmp = (d); div_u64((ll) + _tmp - 1, _tmp); }) + +/** * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer * @dividend: unsigned 64bit dividend * @divisor: unsigned 64bit divisor @@ -342,4 +355,19 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); div_s64((__x - (__d / 2)), __d); \ } \ ) + +/** + * roundup_u64 - Round up a 64bit value to the next specified 32bit multiple + * @x: the value to up + * @y: 32bit multiple to round up to + * + * Rounds @x to the next multiple of @y. For 32bit @x values, see roundup and + * the faster round_up() for powers of 2. + * + * Return: rounded up value. + */ +static inline u64 roundup_u64(u64 x, u32 y) +{ + return DIV_U64_ROUND_UP(x, y) * y; +} #endif /* _LINUX_MATH64_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index e2082240586d..fc4d75c6cec3 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -299,25 +299,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, unsigned long *out_spfn, unsigned long *out_epfn); -/** - * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free - * memblock areas - * @i: u64 used as loop variable - * @zone: zone in which all of the memory blocks reside - * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL - * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL - * - * Walks over free (memory && !reserved) areas of memblock in a specific - * zone. Available once memblock and an empty zone is initialized. The main - * assumption is that the zone start, end, and pgdat have been associated. - * This way we can use the zone to determine NUMA node, and if a given part - * of the memblock is valid for the zone. - */ -#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \ - for (i = 0, \ - __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \ - i != U64_MAX; \ - __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) /** * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific @@ -335,8 +316,6 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, for (; i != U64_MAX; \ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) -int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); - #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ /** @@ -565,7 +544,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo } /** - * for_each_mem_region - itereate over memory regions + * for_each_mem_region - iterate over memory regions * @region: loop variable */ #define for_each_mem_region(region) \ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 030d34e9d117..7e2eb091049a 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -69,18 +69,6 @@ struct mem_cgroup_id { refcount_t ref; }; -/* - * Per memcg event counter is incremented at every pagein/pageout. With THP, - * it will be incremented by the number of pages. This counter is used - * to trigger some periodic events. This is straightforward and better - * than using jiffies etc. to handle periodic memcg event. - */ -enum mem_cgroup_events_target { - MEM_CGROUP_TARGET_THRESH, - MEM_CGROUP_TARGET_SOFTLIMIT, - MEM_CGROUP_NTARGETS, -}; - struct memcg_vmstats_percpu; struct memcg_vmstats; struct lruvec_stats_percpu; @@ -96,23 +84,33 @@ struct mem_cgroup_reclaim_iter { * per-node information in memory controller. */ struct mem_cgroup_per_node { - struct lruvec lruvec; + /* Keep the read-only fields at the start */ + struct mem_cgroup *memcg; /* Back pointer, we cannot */ + /* use container_of */ struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; struct lruvec_stats *lruvec_stats; - - unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; - - struct mem_cgroup_reclaim_iter iter; - struct shrinker_info __rcu *shrinker_info; +#ifdef CONFIG_MEMCG_V1 + /* + * Memcg-v1 only stuff in middle as buffer between read mostly fields + * and update often fields to avoid false sharing. If v1 stuff is + * not present, an explicit padding is needed. + */ + struct rb_node tree_node; /* RB tree node */ unsigned long usage_in_excess;/* Set to the value by which */ /* the soft limit is exceeded*/ bool on_tree; - struct mem_cgroup *memcg; /* Back pointer, we cannot */ - /* use container_of */ +#else + CACHELINE_PADDING(_pad1_); +#endif + + /* Fields which get updated often at the end. */ + struct lruvec lruvec; + unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; + struct mem_cgroup_reclaim_iter iter; }; struct mem_cgroup_threshold { @@ -194,14 +192,10 @@ struct mem_cgroup { struct page_counter memsw; /* v1 only */ }; - /* Legacy consumer-oriented counters */ - struct page_counter kmem; /* v1 only */ - struct page_counter tcpmem; /* v1 only */ - /* Range enforcement for interrupt charges */ struct work_struct high_work; -#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) +#ifdef CONFIG_ZSWAP unsigned long zswap_max; /* @@ -211,8 +205,6 @@ struct mem_cgroup { bool zswap_writeback; #endif - unsigned long soft_limit; - /* vmpressure notifications */ struct vmpressure vmpressure; @@ -221,13 +213,7 @@ struct mem_cgroup { */ bool oom_group; - /* protected by memcg_oom_lock */ - bool oom_lock; - int under_oom; - - int swappiness; - /* OOM-Killer disable */ - int oom_kill_disable; + int swappiness; /* memory.events and memory.events.local */ struct cgroup_file events_file; @@ -236,29 +222,6 @@ struct mem_cgroup { /* handle for "memory.swap.events" */ struct cgroup_file swap_events_file; - /* protect arrays of thresholds */ - struct mutex thresholds_lock; - - /* thresholds for memory usage. RCU-protected */ - struct mem_cgroup_thresholds thresholds; - - /* thresholds for mem+swap usage. RCU-protected */ - struct mem_cgroup_thresholds memsw_thresholds; - - /* For oom notifier event fd */ - struct list_head oom_notify; - - /* - * Should we move charges of a task when a task is moved into this - * mem_cgroup ? And what type of charges should we move ? - */ - unsigned long move_charge_at_immigrate; - /* taken only while moving_account > 0 */ - spinlock_t move_lock; - unsigned long move_lock_flags; - - CACHELINE_PADDING(_pad1_); - /* memory.stat */ struct memcg_vmstats *vmstats; @@ -273,11 +236,6 @@ struct mem_cgroup { */ unsigned long socket_pressure; - /* Legacy tcp memory accounting */ - bool tcpmem_active; - int tcpmem_pressure; - -#ifdef CONFIG_MEMCG_KMEM int kmemcg_id; /* * memcg->objcg is wiped out as a part of the objcg repaprenting @@ -288,15 +246,6 @@ struct mem_cgroup { struct obj_cgroup *orig_objcg; /* list of inherited objcgs, protected by objcg_lock */ struct list_head objcg_list; -#endif - - CACHELINE_PADDING(_pad2_); - - /* - * set > 0 if pages under this cgroup are moving to other cgroup. - */ - atomic_t moving_account; - struct task_struct *move_lock_task; struct memcg_vmstats_percpu __percpu *vmstats_percpu; @@ -306,10 +255,6 @@ struct mem_cgroup { struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; #endif - /* List of events which userspace want to receive */ - struct list_head event_list; - spinlock_t event_list_lock; - #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct deferred_split deferred_split_queue; #endif @@ -319,6 +264,56 @@ struct mem_cgroup { struct lru_gen_mm_list mm_list; #endif +#ifdef CONFIG_MEMCG_V1 + /* Legacy consumer-oriented counters */ + struct page_counter kmem; /* v1 only */ + struct page_counter tcpmem; /* v1 only */ + + unsigned long soft_limit; + + /* protected by memcg_oom_lock */ + bool oom_lock; + int under_oom; + + /* OOM-Killer disable */ + int oom_kill_disable; + + /* protect arrays of thresholds */ + struct mutex thresholds_lock; + + /* thresholds for memory usage. RCU-protected */ + struct mem_cgroup_thresholds thresholds; + + /* thresholds for mem+swap usage. RCU-protected */ + struct mem_cgroup_thresholds memsw_thresholds; + + /* For oom notifier event fd */ + struct list_head oom_notify; + + /* + * Should we move charges of a task when a task is moved into this + * mem_cgroup ? And what type of charges should we move ? + */ + unsigned long move_charge_at_immigrate; + /* taken only while moving_account > 0 */ + spinlock_t move_lock; + unsigned long move_lock_flags; + + /* Legacy tcp memory accounting */ + bool tcpmem_active; + int tcpmem_pressure; + + /* + * set > 0 if pages under this cgroup are moving to other cgroup. + */ + atomic_t moving_account; + struct task_struct *move_lock_task; + + /* List of events which userspace want to receive */ + struct list_head event_list; + spinlock_t event_list_lock; +#endif /* CONFIG_MEMCG_V1 */ + struct mem_cgroup_per_node *nodeinfo[]; }; @@ -443,11 +438,6 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio) return __folio_memcg(folio); } -static inline struct mem_cgroup *page_memcg(struct page *page) -{ - return folio_memcg(page_folio(page)); -} - /** * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. * @folio: Pointer to the folio. @@ -540,7 +530,6 @@ retry: return memcg; } -#ifdef CONFIG_MEMCG_KMEM /* * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. * @folio: Pointer to the folio. @@ -556,15 +545,6 @@ static inline bool folio_memcg_kmem(struct folio *folio) return folio->memcg_data & MEMCG_DATA_KMEM; } - -#else -static inline bool folio_memcg_kmem(struct folio *folio) -{ - return false; -} - -#endif - static inline bool PageMemcgKmem(struct page *page) { return folio_memcg_kmem(page_folio(page)); @@ -949,51 +929,13 @@ void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); -static inline void mem_cgroup_enter_user_fault(void) -{ - WARN_ON(current->in_user_fault); - current->in_user_fault = 1; -} - -static inline void mem_cgroup_exit_user_fault(void) -{ - WARN_ON(!current->in_user_fault); - current->in_user_fault = 0; -} - -static inline bool task_in_memcg_oom(struct task_struct *p) -{ - return p->memcg_in_oom; -} - -bool mem_cgroup_oom_synchronize(bool wait); struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, struct mem_cgroup *oom_domain); void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); -void folio_memcg_lock(struct folio *folio); -void folio_memcg_unlock(struct folio *folio); - void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, int val); -/* try to stablize folio_memcg() for all the pages in a memcg */ -static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) -{ - rcu_read_lock(); - - if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account)) - return true; - - rcu_read_unlock(); - return false; -} - -static inline void mem_cgroup_unlock_pages(void) -{ - rcu_read_unlock(); -} - /* idx can be of type enum memcg_stat_item or node_stat_item */ static inline void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, int val) @@ -1014,7 +956,7 @@ static inline void mod_memcg_page_state(struct page *page, return; rcu_read_lock(); - memcg = page_memcg(page); + memcg = folio_memcg(page_folio(page)); if (memcg) mod_memcg_state(memcg, idx, val); rcu_read_unlock(); @@ -1120,10 +1062,6 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, void split_page_memcg(struct page *head, int old_order, int new_order); -unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, - gfp_t gfp_mask, - unsigned long *total_scanned); - #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 @@ -1133,11 +1071,6 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio) return NULL; } -static inline struct mem_cgroup *page_memcg(struct page *page) -{ - return NULL; -} - static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) { WARN_ON_ONCE(!rcu_read_lock_held()); @@ -1439,48 +1372,10 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) { } -static inline void folio_memcg_lock(struct folio *folio) -{ -} - -static inline void folio_memcg_unlock(struct folio *folio) -{ -} - -static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) -{ - /* to match folio_memcg_rcu() */ - rcu_read_lock(); - return true; -} - -static inline void mem_cgroup_unlock_pages(void) -{ - rcu_read_unlock(); -} - static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) { } -static inline void mem_cgroup_enter_user_fault(void) -{ -} - -static inline void mem_cgroup_exit_user_fault(void) -{ -} - -static inline bool task_in_memcg_oom(struct task_struct *p) -{ - return false; -} - -static inline bool mem_cgroup_oom_synchronize(bool wait) -{ - return false; -} - static inline struct mem_cgroup *mem_cgroup_get_oom_group( struct task_struct *victim, struct mem_cgroup *oom_domain) { @@ -1574,14 +1469,6 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) static inline void split_page_memcg(struct page *head, int old_order, int new_order) { } - -static inline -unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, - gfp_t gfp_mask, - unsigned long *total_scanned) -{ - return 0; -} #endif /* CONFIG_MEMCG */ /* @@ -1589,7 +1476,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, * if MEMCG_DATA_OBJEXTS is set. */ struct slabobj_ext { -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG struct obj_cgroup *objcg; #endif #ifdef CONFIG_MEM_ALLOC_PROFILING @@ -1636,7 +1523,7 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, spin_unlock_irqrestore(&lruvec->lru_lock, flags); } -/* Test requires a stable page->memcg binding, see page_memcg() */ +/* Test requires a stable folio->memcg binding, see folio_memcg() */ static inline bool folio_matches_lruvec(struct folio *folio, struct lruvec *lruvec) { @@ -1734,8 +1621,10 @@ void mem_cgroup_sk_alloc(struct sock *sk); void mem_cgroup_sk_free(struct sock *sk); static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { +#ifdef CONFIG_MEMCG_V1 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) return !!memcg->tcpmem_pressure; +#endif /* CONFIG_MEMCG_V1 */ do { if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) return true; @@ -1762,7 +1651,7 @@ static inline void set_shrinker_bit(struct mem_cgroup *memcg, } #endif -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG bool mem_cgroup_kmem_disabled(void); int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); @@ -1905,9 +1794,9 @@ static inline void count_objcg_event(struct obj_cgroup *objcg, { } -#endif /* CONFIG_MEMCG_KMEM */ +#endif /* CONFIG_MEMCG */ -#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) +#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP) bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); @@ -1932,4 +1821,100 @@ static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) } #endif + +/* Cgroup v1-related declarations */ + +#ifdef CONFIG_MEMCG_V1 +unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, + gfp_t gfp_mask, + unsigned long *total_scanned); + +bool mem_cgroup_oom_synchronize(bool wait); + +static inline bool task_in_memcg_oom(struct task_struct *p) +{ + return p->memcg_in_oom; +} + +void folio_memcg_lock(struct folio *folio); +void folio_memcg_unlock(struct folio *folio); + +/* try to stablize folio_memcg() for all the pages in a memcg */ +static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) +{ + rcu_read_lock(); + + if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account)) + return true; + + rcu_read_unlock(); + return false; +} + +static inline void mem_cgroup_unlock_pages(void) +{ + rcu_read_unlock(); +} + +static inline void mem_cgroup_enter_user_fault(void) +{ + WARN_ON(current->in_user_fault); + current->in_user_fault = 1; +} + +static inline void mem_cgroup_exit_user_fault(void) +{ + WARN_ON(!current->in_user_fault); + current->in_user_fault = 0; +} + +#else /* CONFIG_MEMCG_V1 */ +static inline +unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, + gfp_t gfp_mask, + unsigned long *total_scanned) +{ + return 0; +} + +static inline void folio_memcg_lock(struct folio *folio) +{ +} + +static inline void folio_memcg_unlock(struct folio *folio) +{ +} + +static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) +{ + /* to match folio_memcg_rcu() */ + rcu_read_lock(); + return true; +} + +static inline void mem_cgroup_unlock_pages(void) +{ + rcu_read_unlock(); +} + +static inline bool task_in_memcg_oom(struct task_struct *p) +{ + return false; +} + +static inline bool mem_cgroup_oom_synchronize(bool wait) +{ + return false; +} + +static inline void mem_cgroup_enter_user_fault(void) +{ +} + +static inline void mem_cgroup_exit_user_fault(void) +{ +} + +#endif /* CONFIG_MEMCG_V1 */ + #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memfd.h b/include/linux/memfd.h index e7abf6fa4c52..3f2cf339ceaf 100644 --- a/include/linux/memfd.h +++ b/include/linux/memfd.h @@ -6,11 +6,16 @@ #ifdef CONFIG_MEMFD_CREATE extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg); +struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx); #else static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) { return -EINVAL; } +static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) +{ + return ERR_PTR(-EINVAL); +} #endif #endif /* __LINUX_MEMFD_H */ diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h index 0d70788558f4..0dc0cf2863e2 100644 --- a/include/linux/memory-tiers.h +++ b/include/linux/memory-tiers.h @@ -38,6 +38,7 @@ struct access_coordinate; #ifdef CONFIG_NUMA extern bool numa_demotion_enabled; extern struct memory_dev_type *default_dram_type; +extern nodemask_t default_dram_nodes; struct memory_dev_type *alloc_memory_type(int adistance); void put_memory_type(struct memory_dev_type *memtype); void init_node_memory_type(int node, struct memory_dev_type *default_type); @@ -76,6 +77,7 @@ static inline bool node_is_toptier(int node) #define numa_demotion_enabled false #define default_dram_type NULL +#define default_dram_nodes NODE_MASK_NONE /* * CONFIG_NUMA implementation returns non NULL error. */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 7a9ff464608d..ebe876930e78 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -175,8 +175,8 @@ extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); extern int online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone, struct memory_group *group); -extern void __offline_isolated_pages(unsigned long start_pfn, - unsigned long end_pfn); +extern unsigned long __offline_isolated_pages(unsigned long start_pfn, + unsigned long end_pfn); typedef void (*online_page_callback_t)(struct page *page, unsigned int order); diff --git a/include/linux/mfd/88pm886.h b/include/linux/mfd/88pm886.h new file mode 100644 index 000000000000..133aa302e492 --- /dev/null +++ b/include/linux/mfd/88pm886.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __MFD_88PM886_H +#define __MFD_88PM886_H + +#include <linux/i2c.h> +#include <linux/regmap.h> + +#define PM886_A1_CHIP_ID 0xa1 + +#define PM886_IRQ_ONKEY 0 + +#define PM886_PAGE_OFFSET_REGULATORS 1 + +#define PM886_REG_ID 0x00 + +#define PM886_REG_STATUS1 0x01 +#define PM886_ONKEY_STS1 BIT(0) + +#define PM886_REG_INT_STATUS1 0x05 + +#define PM886_REG_INT_ENA_1 0x0a +#define PM886_INT_ENA1_ONKEY BIT(0) + +#define PM886_REG_MISC_CONFIG1 0x14 +#define PM886_SW_PDOWN BIT(5) + +#define PM886_REG_MISC_CONFIG2 0x15 +#define PM886_INT_INV BIT(0) +#define PM886_INT_CLEAR BIT(1) +#define PM886_INT_RC 0x00 +#define PM886_INT_WC BIT(1) +#define PM886_INT_MASK_MODE BIT(2) + +#define PM886_REG_RTC_SPARE6 0xef + +#define PM886_REG_BUCK_EN 0x08 +#define PM886_REG_LDO_EN1 0x09 +#define PM886_REG_LDO_EN2 0x0a +#define PM886_REG_LDO1_VOUT 0x20 +#define PM886_REG_LDO2_VOUT 0x26 +#define PM886_REG_LDO3_VOUT 0x2c +#define PM886_REG_LDO4_VOUT 0x32 +#define PM886_REG_LDO5_VOUT 0x38 +#define PM886_REG_LDO6_VOUT 0x3e +#define PM886_REG_LDO7_VOUT 0x44 +#define PM886_REG_LDO8_VOUT 0x4a +#define PM886_REG_LDO9_VOUT 0x50 +#define PM886_REG_LDO10_VOUT 0x56 +#define PM886_REG_LDO11_VOUT 0x5c +#define PM886_REG_LDO12_VOUT 0x62 +#define PM886_REG_LDO13_VOUT 0x68 +#define PM886_REG_LDO14_VOUT 0x6e +#define PM886_REG_LDO15_VOUT 0x74 +#define PM886_REG_LDO16_VOUT 0x7a +#define PM886_REG_BUCK1_VOUT 0xa5 +#define PM886_REG_BUCK2_VOUT 0xb3 +#define PM886_REG_BUCK3_VOUT 0xc1 +#define PM886_REG_BUCK4_VOUT 0xcf +#define PM886_REG_BUCK5_VOUT 0xdd + +#define PM886_LDO_VSEL_MASK 0x0f +#define PM886_BUCK_VSEL_MASK 0x7f + +struct pm886_chip { + struct i2c_client *client; + unsigned int chip_id; + struct regmap *regmap; +}; +#endif /* __MFD_88PM886_H */ diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h index a186119a49b5..80d675a03b39 100644 --- a/include/linux/mfd/atmel-hlcdc.h +++ b/include/linux/mfd/atmel-hlcdc.h @@ -22,6 +22,8 @@ #define ATMEL_HLCDC_DITHER BIT(6) #define ATMEL_HLCDC_DISPDLY BIT(7) #define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8) +#define ATMEL_XLCDC_MODE_MASK GENMASK(10, 8) +#define ATMEL_XLCDC_DPI BIT(11) #define ATMEL_HLCDC_PP BIT(10) #define ATMEL_HLCDC_VSPSU BIT(12) #define ATMEL_HLCDC_VSPHO BIT(13) @@ -34,6 +36,12 @@ #define ATMEL_HLCDC_IDR 0x30 #define ATMEL_HLCDC_IMR 0x34 #define ATMEL_HLCDC_ISR 0x38 +#define ATMEL_XLCDC_ATTRE 0x3c + +#define ATMEL_XLCDC_BASE_UPDATE BIT(0) +#define ATMEL_XLCDC_OVR1_UPDATE BIT(1) +#define ATMEL_XLCDC_OVR3_UPDATE BIT(2) +#define ATMEL_XLCDC_HEO_UPDATE BIT(3) #define ATMEL_HLCDC_CLKPOL BIT(0) #define ATMEL_HLCDC_CLKSEL BIT(2) @@ -48,6 +56,8 @@ #define ATMEL_HLCDC_DISP BIT(2) #define ATMEL_HLCDC_PWM BIT(3) #define ATMEL_HLCDC_SIP BIT(4) +#define ATMEL_XLCDC_SD BIT(5) +#define ATMEL_XLCDC_CM BIT(6) #define ATMEL_HLCDC_SOF BIT(0) #define ATMEL_HLCDC_SYNCDIS BIT(1) diff --git a/include/linux/mfd/cs40l50.h b/include/linux/mfd/cs40l50.h new file mode 100644 index 000000000000..e5dc49860944 --- /dev/null +++ b/include/linux/mfd/cs40l50.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * CS40L50 Advanced Haptic Driver with waveform memory, + * integrated DSP, and closed-loop algorithms + * + * Copyright 2024 Cirrus Logic, Inc. + * + * Author: James Ogletree <james.ogletree@cirrus.com> + */ + +#ifndef __MFD_CS40L50_H__ +#define __MFD_CS40L50_H__ + +#include <linux/firmware/cirrus/cs_dsp.h> +#include <linux/gpio/consumer.h> +#include <linux/pm.h> +#include <linux/regmap.h> + +/* Power Supply Configuration */ +#define CS40L50_BLOCK_ENABLES2 0x201C +#define CS40L50_ERR_RLS 0x2034 +#define CS40L50_BST_LPMODE_SEL 0x3810 +#define CS40L50_DCM_LOW_POWER 0x1 +#define CS40L50_OVERTEMP_WARN 0x4000010 + +/* Interrupts */ +#define CS40L50_IRQ1_INT_1 0xE010 +#define CS40L50_IRQ1_BASE CS40L50_IRQ1_INT_1 +#define CS40L50_IRQ1_INT_2 0xE014 +#define CS40L50_IRQ1_INT_8 0xE02C +#define CS40L50_IRQ1_INT_9 0xE030 +#define CS40L50_IRQ1_INT_10 0xE034 +#define CS40L50_IRQ1_INT_18 0xE054 +#define CS40L50_IRQ1_MASK_1 0xE090 +#define CS40L50_IRQ1_MASK_2 0xE094 +#define CS40L50_IRQ1_MASK_20 0xE0DC +#define CS40L50_IRQ1_INT_1_OFFSET (CS40L50_IRQ1_INT_1 - CS40L50_IRQ1_BASE) +#define CS40L50_IRQ1_INT_2_OFFSET (CS40L50_IRQ1_INT_2 - CS40L50_IRQ1_BASE) +#define CS40L50_IRQ1_INT_8_OFFSET (CS40L50_IRQ1_INT_8 - CS40L50_IRQ1_BASE) +#define CS40L50_IRQ1_INT_9_OFFSET (CS40L50_IRQ1_INT_9 - CS40L50_IRQ1_BASE) +#define CS40L50_IRQ1_INT_10_OFFSET (CS40L50_IRQ1_INT_10 - CS40L50_IRQ1_BASE) +#define CS40L50_IRQ1_INT_18_OFFSET (CS40L50_IRQ1_INT_18 - CS40L50_IRQ1_BASE) +#define CS40L50_IRQ_MASK_2_OVERRIDE 0xFFDF7FFF +#define CS40L50_IRQ_MASK_20_OVERRIDE 0x15C01000 +#define CS40L50_AMP_SHORT_MASK BIT(31) +#define CS40L50_DSP_QUEUE_MASK BIT(21) +#define CS40L50_TEMP_ERR_MASK BIT(31) +#define CS40L50_BST_UVP_MASK BIT(6) +#define CS40L50_BST_SHORT_MASK BIT(7) +#define CS40L50_BST_ILIMIT_MASK BIT(18) +#define CS40L50_UVLO_VDDBATT_MASK BIT(16) +#define CS40L50_GLOBAL_ERROR_MASK BIT(15) + +enum cs40l50_irq_list { + CS40L50_DSP_QUEUE_IRQ, + CS40L50_GLOBAL_ERROR_IRQ, + CS40L50_UVLO_VDDBATT_IRQ, + CS40L50_BST_ILIMIT_IRQ, + CS40L50_BST_SHORT_IRQ, + CS40L50_BST_UVP_IRQ, + CS40L50_TEMP_ERR_IRQ, + CS40L50_AMP_SHORT_IRQ, +}; + +/* DSP */ +#define CS40L50_XMEM_PACKED_0 0x2000000 +#define CS40L50_XMEM_UNPACKED24_0 0x2800000 +#define CS40L50_SYS_INFO_ID 0x25E0000 +#define CS40L50_DSP_QUEUE_WT 0x28042C8 +#define CS40L50_DSP_QUEUE_RD 0x28042CC +#define CS40L50_NUM_WAVES 0x2805C18 +#define CS40L50_CORE_BASE 0x2B80000 +#define CS40L50_YMEM_PACKED_0 0x2C00000 +#define CS40L50_YMEM_UNPACKED24_0 0x3400000 +#define CS40L50_PMEM_0 0x3800000 +#define CS40L50_DSP_POLL_US 1000 +#define CS40L50_DSP_TIMEOUT_COUNT 100 +#define CS40L50_RESET_PULSE_US 2200 +#define CS40L50_CP_READY_US 3100 +#define CS40L50_AUTOSUSPEND_MS 2000 +#define CS40L50_PM_ALGO 0x9F206 +#define CS40L50_GLOBAL_ERR_RLS_SET BIT(11) +#define CS40L50_GLOBAL_ERR_RLS_CLEAR 0 + +enum cs40l50_wseqs { + CS40L50_PWR_ON, + CS40L50_STANDBY, + CS40L50_ACTIVE, + CS40L50_NUM_WSEQS, +}; + +/* DSP Queue */ +#define CS40L50_DSP_QUEUE_BASE 0x11004 +#define CS40L50_DSP_QUEUE_END 0x1101C +#define CS40L50_DSP_QUEUE 0x11020 +#define CS40L50_PREVENT_HIBER 0x2000003 +#define CS40L50_ALLOW_HIBER 0x2000004 +#define CS40L50_SHUTDOWN 0x2000005 +#define CS40L50_SYSTEM_RESET 0x2000007 +#define CS40L50_START_I2S 0x3000002 +#define CS40L50_OWT_PUSH 0x3000008 +#define CS40L50_STOP_PLAYBACK 0x5000000 +#define CS40L50_OWT_DELETE 0xD000000 + +/* Firmware files */ +#define CS40L50_FW "cs40l50.wmfw" +#define CS40L50_WT "cs40l50.bin" + +/* Device */ +#define CS40L50_DEVID 0x0 +#define CS40L50_REVID 0x4 +#define CS40L50_DEVID_A 0x40A50 +#define CS40L50_REVID_B0 0xB0 + +struct cs40l50 { + struct device *dev; + struct regmap *regmap; + struct mutex lock; + struct cs_dsp dsp; + struct gpio_desc *reset_gpio; + struct regmap_irq_chip_data *irq_data; + const struct firmware *fw; + const struct firmware *bin; + struct cs_dsp_wseq wseqs[CS40L50_NUM_WSEQS]; + int irq; + u32 devid; + u32 revid; +}; + +int cs40l50_dsp_write(struct device *dev, struct regmap *regmap, u32 val); +int cs40l50_probe(struct cs40l50 *cs40l50); +int cs40l50_remove(struct cs40l50 *cs40l50); + +extern const struct regmap_config cs40l50_regmap; +extern const struct dev_pm_ops cs40l50_pm_ops; + +#endif /* __MFD_CS40L50_H__ */ diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h index 0c706085c205..53a222605526 100644 --- a/include/linux/mfd/idt8a340_reg.h +++ b/include/linux/mfd/idt8a340_reg.h @@ -61,7 +61,7 @@ #define HW_Q8_CTRL_SPARE (0xa7d4) #define HW_Q11_CTRL_SPARE (0xa7ec) -/** +/* * Select FOD5 as sync_trigger for Q8 divider. * Transition from logic zero to one * sets trigger to sync Q8 divider. @@ -70,7 +70,7 @@ */ #define Q9_TO_Q8_SYNC_TRIG BIT(1) -/** +/* * Enable FOD5 as driver for clock and sync for Q8 divider. * Enable fanout buffer for FOD5. * @@ -78,7 +78,7 @@ */ #define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2)) -/** +/* * Select FOD6 as sync_trigger for Q11 divider. * Transition from logic zero to one * sets trigger to sync Q11 divider. @@ -87,7 +87,7 @@ */ #define Q10_TO_Q11_SYNC_TRIG BIT(1) -/** +/* * Enable FOD6 as driver for clock and sync for Q11 divider. * Enable fanout buffer for FOD6. * diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h index 77092f6363ad..69059a7a2ce5 100644 --- a/include/linux/mfd/lm3533.h +++ b/include/linux/mfd/lm3533.h @@ -16,6 +16,7 @@ DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name) struct device; +struct gpio_desc; struct regmap; struct lm3533 { @@ -23,7 +24,7 @@ struct lm3533 { struct regmap *regmap; - int gpio_hwen; + struct gpio_desc *hwen; int irq; unsigned have_als:1; @@ -69,8 +70,6 @@ enum lm3533_boost_ovp { }; struct lm3533_platform_data { - int gpio_hwen; - enum lm3533_boost_ovp boost_ovp; enum lm3533_boost_freq boost_freq; diff --git a/include/linux/mfd/rohm-bd96801.h b/include/linux/mfd/rohm-bd96801.h new file mode 100644 index 000000000000..e2d9e10b6364 --- /dev/null +++ b/include/linux/mfd/rohm-bd96801.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 2024 ROHM Semiconductors */ + +#ifndef __MFD_BD96801_H__ +#define __MFD_BD96801_H__ + +#define BD96801_REG_SSCG_CTRL 0x09 +#define BD96801_REG_SHD_INTB 0x20 +#define BD96801_LDO5_VOL_LVL_REG 0x2c +#define BD96801_LDO6_VOL_LVL_REG 0x2d +#define BD96801_LDO7_VOL_LVL_REG 0x2e +#define BD96801_REG_BUCK_OVP 0x30 +#define BD96801_REG_BUCK_OVD 0x35 +#define BD96801_REG_LDO_OVP 0x31 +#define BD96801_REG_LDO_OVD 0x36 +#define BD96801_REG_BOOT_OVERTIME 0x3a +#define BD96801_REG_WD_TMO 0x40 +#define BD96801_REG_WD_CONF 0x41 +#define BD96801_REG_WD_FEED 0x42 +#define BD96801_REG_WD_FAILCOUNT 0x43 +#define BD96801_REG_WD_ASK 0x46 +#define BD96801_REG_WD_STATUS 0x4a +#define BD96801_REG_PMIC_STATE 0x4f +#define BD96801_REG_EXT_STATE 0x50 + +#define BD96801_STATE_STBY 0x09 + +#define BD96801_LOCK_REG 0x04 +#define BD96801_UNLOCK 0x9d +#define BD96801_LOCK 0x00 + +/* IRQ register area */ +#define BD96801_REG_INT_MAIN 0x51 + +/* + * The BD96801 has two physical IRQ lines, INTB and ERRB. + * + * The 'main status register' is located at 0x51. + * The ERRB status registers are located at 0x52 ... 0x5B + * INTB status registers are at range 0x5c ... 0x63 + */ +#define BD96801_REG_INT_SYS_ERRB1 0x52 +#define BD96801_REG_INT_SYS_INTB 0x5c +#define BD96801_REG_INT_LDO7_INTB 0x63 + +/* MASK registers */ +#define BD96801_REG_MASK_SYS_INTB 0x73 +#define BD96801_REG_MASK_SYS_ERRB 0x69 + +#define BD96801_MAX_REGISTER 0x7a + +#define BD96801_OTP_ERR_MASK BIT(0) +#define BD96801_DBIST_ERR_MASK BIT(1) +#define BD96801_EEP_ERR_MASK BIT(2) +#define BD96801_ABIST_ERR_MASK BIT(3) +#define BD96801_PRSTB_ERR_MASK BIT(4) +#define BD96801_DRMOS1_ERR_MASK BIT(5) +#define BD96801_DRMOS2_ERR_MASK BIT(6) +#define BD96801_SLAVE_ERR_MASK BIT(7) +#define BD96801_VREF_ERR_MASK BIT(0) +#define BD96801_TSD_ERR_MASK BIT(1) +#define BD96801_UVLO_ERR_MASK BIT(2) +#define BD96801_OVLO_ERR_MASK BIT(3) +#define BD96801_OSC_ERR_MASK BIT(4) +#define BD96801_PON_ERR_MASK BIT(5) +#define BD96801_POFF_ERR_MASK BIT(6) +#define BD96801_CMD_SHDN_ERR_MASK BIT(7) +#define BD96801_INT_PRSTB_WDT_ERR_MASK BIT(0) +#define BD96801_INT_CHIP_IF_ERR_MASK BIT(3) +#define BD96801_INT_SHDN_ERR_MASK BIT(7) +#define BD96801_OUT_PVIN_ERR_MASK BIT(0) +#define BD96801_OUT_OVP_ERR_MASK BIT(1) +#define BD96801_OUT_UVP_ERR_MASK BIT(2) +#define BD96801_OUT_SHDN_ERR_MASK BIT(7) + +/* ERRB IRQs */ +enum { + /* Reg 0x52, 0x53, 0x54 - ERRB system IRQs */ + BD96801_OTP_ERR_STAT, + BD96801_DBIST_ERR_STAT, + BD96801_EEP_ERR_STAT, + BD96801_ABIST_ERR_STAT, + BD96801_PRSTB_ERR_STAT, + BD96801_DRMOS1_ERR_STAT, + BD96801_DRMOS2_ERR_STAT, + BD96801_SLAVE_ERR_STAT, + BD96801_VREF_ERR_STAT, + BD96801_TSD_ERR_STAT, + BD96801_UVLO_ERR_STAT, + BD96801_OVLO_ERR_STAT, + BD96801_OSC_ERR_STAT, + BD96801_PON_ERR_STAT, + BD96801_POFF_ERR_STAT, + BD96801_CMD_SHDN_ERR_STAT, + BD96801_INT_PRSTB_WDT_ERR, + BD96801_INT_CHIP_IF_ERR, + BD96801_INT_SHDN_ERR_STAT, + + /* Reg 0x55 BUCK1 ERR IRQs */ + BD96801_BUCK1_PVIN_ERR_STAT, + BD96801_BUCK1_OVP_ERR_STAT, + BD96801_BUCK1_UVP_ERR_STAT, + BD96801_BUCK1_SHDN_ERR_STAT, + + /* Reg 0x56 BUCK2 ERR IRQs */ + BD96801_BUCK2_PVIN_ERR_STAT, + BD96801_BUCK2_OVP_ERR_STAT, + BD96801_BUCK2_UVP_ERR_STAT, + BD96801_BUCK2_SHDN_ERR_STAT, + + /* Reg 0x57 BUCK3 ERR IRQs */ + BD96801_BUCK3_PVIN_ERR_STAT, + BD96801_BUCK3_OVP_ERR_STAT, + BD96801_BUCK3_UVP_ERR_STAT, + BD96801_BUCK3_SHDN_ERR_STAT, + + /* Reg 0x58 BUCK4 ERR IRQs */ + BD96801_BUCK4_PVIN_ERR_STAT, + BD96801_BUCK4_OVP_ERR_STAT, + BD96801_BUCK4_UVP_ERR_STAT, + BD96801_BUCK4_SHDN_ERR_STAT, + + /* Reg 0x59 LDO5 ERR IRQs */ + BD96801_LDO5_PVIN_ERR_STAT, + BD96801_LDO5_OVP_ERR_STAT, + BD96801_LDO5_UVP_ERR_STAT, + BD96801_LDO5_SHDN_ERR_STAT, + + /* Reg 0x5a LDO6 ERR IRQs */ + BD96801_LDO6_PVIN_ERR_STAT, + BD96801_LDO6_OVP_ERR_STAT, + BD96801_LDO6_UVP_ERR_STAT, + BD96801_LDO6_SHDN_ERR_STAT, + + /* Reg 0x5b LDO7 ERR IRQs */ + BD96801_LDO7_PVIN_ERR_STAT, + BD96801_LDO7_OVP_ERR_STAT, + BD96801_LDO7_UVP_ERR_STAT, + BD96801_LDO7_SHDN_ERR_STAT, +}; + +/* INTB IRQs */ +enum { + /* Reg 0x5c (System INTB) */ + BD96801_TW_STAT, + BD96801_WDT_ERR_STAT, + BD96801_I2C_ERR_STAT, + BD96801_CHIP_IF_ERR_STAT, + + /* Reg 0x5d (BUCK1 INTB) */ + BD96801_BUCK1_OCPH_STAT, + BD96801_BUCK1_OCPL_STAT, + BD96801_BUCK1_OCPN_STAT, + BD96801_BUCK1_OVD_STAT, + BD96801_BUCK1_UVD_STAT, + BD96801_BUCK1_TW_CH_STAT, + + /* Reg 0x5e (BUCK2 INTB) */ + BD96801_BUCK2_OCPH_STAT, + BD96801_BUCK2_OCPL_STAT, + BD96801_BUCK2_OCPN_STAT, + BD96801_BUCK2_OVD_STAT, + BD96801_BUCK2_UVD_STAT, + BD96801_BUCK2_TW_CH_STAT, + + /* Reg 0x5f (BUCK3 INTB)*/ + BD96801_BUCK3_OCPH_STAT, + BD96801_BUCK3_OCPL_STAT, + BD96801_BUCK3_OCPN_STAT, + BD96801_BUCK3_OVD_STAT, + BD96801_BUCK3_UVD_STAT, + BD96801_BUCK3_TW_CH_STAT, + + /* Reg 0x60 (BUCK4 INTB)*/ + BD96801_BUCK4_OCPH_STAT, + BD96801_BUCK4_OCPL_STAT, + BD96801_BUCK4_OCPN_STAT, + BD96801_BUCK4_OVD_STAT, + BD96801_BUCK4_UVD_STAT, + BD96801_BUCK4_TW_CH_STAT, + + /* Reg 0x61 (LDO5 INTB) */ + BD96801_LDO5_OCPH_STAT, /* bit [0] */ + BD96801_LDO5_OVD_STAT, /* bit [3] */ + BD96801_LDO5_UVD_STAT, /* bit [4] */ + + /* Reg 0x62 (LDO6 INTB) */ + BD96801_LDO6_OCPH_STAT, /* bit [0] */ + BD96801_LDO6_OVD_STAT, /* bit [3] */ + BD96801_LDO6_UVD_STAT, /* bit [4] */ + + /* Reg 0x63 (LDO7 INTB) */ + BD96801_LDO7_OCPH_STAT, /* bit [0] */ + BD96801_LDO7_OVD_STAT, /* bit [3] */ + BD96801_LDO7_UVD_STAT, /* bit [4] */ +}; + +/* IRQ MASKs */ +#define BD96801_TW_STAT_MASK BIT(0) +#define BD96801_WDT_ERR_STAT_MASK BIT(1) +#define BD96801_I2C_ERR_STAT_MASK BIT(2) +#define BD96801_CHIP_IF_ERR_STAT_MASK BIT(3) + +#define BD96801_BUCK_OCPH_STAT_MASK BIT(0) +#define BD96801_BUCK_OCPL_STAT_MASK BIT(1) +#define BD96801_BUCK_OCPN_STAT_MASK BIT(2) +#define BD96801_BUCK_OVD_STAT_MASK BIT(3) +#define BD96801_BUCK_UVD_STAT_MASK BIT(4) +#define BD96801_BUCK_TW_CH_STAT_MASK BIT(5) + +#define BD96801_LDO_OCPH_STAT_MASK BIT(0) +#define BD96801_LDO_OVD_STAT_MASK BIT(3) +#define BD96801_LDO_UVD_STAT_MASK BIT(4) + +#endif diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h index 4eeb22876bad..e7d4e6afe388 100644 --- a/include/linux/mfd/rohm-generic.h +++ b/include/linux/mfd/rohm-generic.h @@ -16,6 +16,7 @@ enum rohm_chip_type { ROHM_CHIP_TYPE_BD71828, ROHM_CHIP_TYPE_BD71837, ROHM_CHIP_TYPE_BD71847, + ROHM_CHIP_TYPE_BD96801, ROHM_CHIP_TYPE_AMOUNT }; diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h index 9eb17481b07f..f09ba598c97a 100644 --- a/include/linux/mfd/stm32-timers.h +++ b/include/linux/mfd/stm32-timers.h @@ -12,97 +12,106 @@ #include <linux/dma-mapping.h> #include <linux/regmap.h> -#define TIM_CR1 0x00 /* Control Register 1 */ -#define TIM_CR2 0x04 /* Control Register 2 */ -#define TIM_SMCR 0x08 /* Slave mode control reg */ -#define TIM_DIER 0x0C /* DMA/interrupt register */ -#define TIM_SR 0x10 /* Status register */ -#define TIM_EGR 0x14 /* Event Generation Reg */ -#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */ -#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */ -#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */ -#define TIM_CNT 0x24 /* Counter */ -#define TIM_PSC 0x28 /* Prescaler */ -#define TIM_ARR 0x2c /* Auto-Reload Register */ -#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */ -#define TIM_CCR2 0x38 /* Capt/Comp Register 2 */ -#define TIM_CCR3 0x3C /* Capt/Comp Register 3 */ -#define TIM_CCR4 0x40 /* Capt/Comp Register 4 */ -#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */ -#define TIM_DCR 0x48 /* DMA control register */ -#define TIM_DMAR 0x4C /* DMA register for transfer */ -#define TIM_TISEL 0x68 /* Input Selection */ +#define TIM_CR1 0x00 /* Control Register 1 */ +#define TIM_CR2 0x04 /* Control Register 2 */ +#define TIM_SMCR 0x08 /* Slave mode control reg */ +#define TIM_DIER 0x0C /* DMA/interrupt register */ +#define TIM_SR 0x10 /* Status register */ +#define TIM_EGR 0x14 /* Event Generation Reg */ +#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */ +#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */ +#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */ +#define TIM_CNT 0x24 /* Counter */ +#define TIM_PSC 0x28 /* Prescaler */ +#define TIM_ARR 0x2c /* Auto-Reload Register */ +#define TIM_CCRx(x) (0x34 + 4 * ((x) - 1)) /* Capt/Comp Register x (x â {1, .. 4}) */ +#define TIM_CCR1 TIM_CCRx(1) /* Capt/Comp Register 1 */ +#define TIM_CCR2 TIM_CCRx(2) /* Capt/Comp Register 2 */ +#define TIM_CCR3 TIM_CCRx(3) /* Capt/Comp Register 3 */ +#define TIM_CCR4 TIM_CCRx(4) /* Capt/Comp Register 4 */ +#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */ +#define TIM_DCR 0x48 /* DMA control register */ +#define TIM_DMAR 0x4C /* DMA register for transfer */ +#define TIM_TISEL 0x68 /* Input Selection */ -#define TIM_CR1_CEN BIT(0) /* Counter Enable */ -#define TIM_CR1_DIR BIT(4) /* Counter Direction */ -#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */ -#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */ -#define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */ -#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */ -#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */ -#define TIM_DIER_UIE BIT(0) /* Update interrupt */ -#define TIM_DIER_CC1IE BIT(1) /* CC1 Interrupt Enable */ -#define TIM_DIER_CC2IE BIT(2) /* CC2 Interrupt Enable */ -#define TIM_DIER_CC3IE BIT(3) /* CC3 Interrupt Enable */ -#define TIM_DIER_CC4IE BIT(4) /* CC4 Interrupt Enable */ -#define TIM_DIER_CC_IE(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt enable */ -#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */ -#define TIM_DIER_CC1DE BIT(9) /* CC1 DMA request Enable */ -#define TIM_DIER_CC2DE BIT(10) /* CC2 DMA request Enable */ -#define TIM_DIER_CC3DE BIT(11) /* CC3 DMA request Enable */ -#define TIM_DIER_CC4DE BIT(12) /* CC4 DMA request Enable */ -#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */ -#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */ -#define TIM_SR_UIF BIT(0) /* Update interrupt flag */ -#define TIM_SR_CC_IF(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt flag */ -#define TIM_EGR_UG BIT(0) /* Update Generation */ -#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */ -#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */ -#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */ -#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */ -#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */ -#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */ -#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */ -#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */ -#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */ -#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */ -#define TIM_CCMR_CC3S (BIT(0) | BIT(1)) /* Capture/compare 3 sel */ -#define TIM_CCMR_CC4S (BIT(8) | BIT(9)) /* Capture/compare 4 sel */ -#define TIM_CCMR_CC3S_TI3 BIT(0) /* IC3 selects TI3 */ -#define TIM_CCMR_CC4S_TI4 BIT(8) /* IC4 selects TI4 */ -#define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */ -#define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */ -#define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */ -#define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */ -#define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */ -#define TIM_CCER_CC2P BIT(5) /* Capt/Comp 2 Polarity */ -#define TIM_CCER_CC2NP BIT(7) /* Capt/Comp 2N Polarity */ -#define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */ -#define TIM_CCER_CC3P BIT(9) /* Capt/Comp 3 Polarity */ -#define TIM_CCER_CC3NP BIT(11) /* Capt/Comp 3N Polarity */ -#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */ -#define TIM_CCER_CC4P BIT(13) /* Capt/Comp 4 Polarity */ -#define TIM_CCER_CC4NP BIT(15) /* Capt/Comp 4N Polarity */ -#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12)) -#define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */ -#define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */ -#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */ -#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */ -#define TIM_BDTR_BKF(x) (0xf << (16 + (x) * 4)) -#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */ -#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */ +#define TIM_CR1_CEN BIT(0) /* Counter Enable */ +#define TIM_CR1_DIR BIT(4) /* Counter Direction */ +#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */ +#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */ +#define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */ +#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */ +#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */ +#define TIM_DIER_UIE BIT(0) /* Update interrupt */ +#define TIM_DIER_CCxIE(x) BIT(1 + ((x) - 1)) /* CCx Interrupt Enable (x â {1, .. 4}) */ +#define TIM_DIER_CC1IE TIM_DIER_CCxIE(1) /* CC1 Interrupt Enable */ +#define TIM_DIER_CC2IE TIM_DIER_CCxIE(2) /* CC2 Interrupt Enable */ +#define TIM_DIER_CC3IE TIM_DIER_CCxIE(3) /* CC3 Interrupt Enable */ +#define TIM_DIER_CC4IE TIM_DIER_CCxIE(4) /* CC4 Interrupt Enable */ +#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */ +#define TIM_DIER_CCxDE(x) BIT(9 + ((x) - 1)) /* CCx DMA request Enable (x â {1, .. 4}) */ +#define TIM_DIER_CC1DE TIM_DIER_CCxDE(1) /* CC1 DMA request Enable */ +#define TIM_DIER_CC2DE TIM_DIER_CCxDE(2) /* CC2 DMA request Enable */ +#define TIM_DIER_CC3DE TIM_DIER_CCxDE(3) /* CC3 DMA request Enable */ +#define TIM_DIER_CC4DE TIM_DIER_CCxDE(4) /* CC4 DMA request Enable */ +#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */ +#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */ +#define TIM_SR_UIF BIT(0) /* Update interrupt flag */ +#define TIM_SR_CC_IF(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt flag */ +#define TIM_EGR_UG BIT(0) /* Update Generation */ +#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */ +#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */ +#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */ +#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */ +#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */ +#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */ +#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */ +#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */ +#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */ +#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */ +#define TIM_CCMR_CC3S (BIT(0) | BIT(1)) /* Capture/compare 3 sel */ +#define TIM_CCMR_CC4S (BIT(8) | BIT(9)) /* Capture/compare 4 sel */ +#define TIM_CCMR_CC3S_TI3 BIT(0) /* IC3 selects TI3 */ +#define TIM_CCMR_CC4S_TI4 BIT(8) /* IC4 selects TI4 */ +#define TIM_CCER_CCxE(x) BIT(0 + 4 * ((x) - 1)) /* Capt/Comp x out Ena (x â {1, .. 4}) */ +#define TIM_CCER_CCxP(x) BIT(1 + 4 * ((x) - 1)) /* Capt/Comp x Polarity (x â {1, .. 4}) */ +#define TIM_CCER_CCxNE(x) BIT(2 + 4 * ((x) - 1)) /* Capt/Comp xN out Ena (x â {1, .. 4}) */ +#define TIM_CCER_CCxNP(x) BIT(3 + 4 * ((x) - 1)) /* Capt/Comp xN Polarity (x â {1, .. 4}) */ +#define TIM_CCER_CC1E TIM_CCER_CCxE(1) /* Capt/Comp 1 out Ena */ +#define TIM_CCER_CC1P TIM_CCER_CCxP(1) /* Capt/Comp 1 Polarity */ +#define TIM_CCER_CC1NE TIM_CCER_CCxNE(1) /* Capt/Comp 1N out Ena */ +#define TIM_CCER_CC1NP TIM_CCER_CCxNP(1) /* Capt/Comp 1N Polarity */ +#define TIM_CCER_CC2E TIM_CCER_CCxE(2) /* Capt/Comp 2 out Ena */ +#define TIM_CCER_CC2P TIM_CCER_CCxP(2) /* Capt/Comp 2 Polarity */ +#define TIM_CCER_CC2NE TIM_CCER_CCxNE(2) /* Capt/Comp 2N out Ena */ +#define TIM_CCER_CC2NP TIM_CCER_CCxNP(2) /* Capt/Comp 2N Polarity */ +#define TIM_CCER_CC3E TIM_CCER_CCxE(3) /* Capt/Comp 3 out Ena */ +#define TIM_CCER_CC3P TIM_CCER_CCxP(3) /* Capt/Comp 3 Polarity */ +#define TIM_CCER_CC3NE TIM_CCER_CCxNE(3) /* Capt/Comp 3N out Ena */ +#define TIM_CCER_CC3NP TIM_CCER_CCxNP(3) /* Capt/Comp 3N Polarity */ +#define TIM_CCER_CC4E TIM_CCER_CCxE(4) /* Capt/Comp 4 out Ena */ +#define TIM_CCER_CC4P TIM_CCER_CCxP(4) /* Capt/Comp 4 Polarity */ +#define TIM_CCER_CC4NE TIM_CCER_CCxNE(4) /* Capt/Comp 4N out Ena */ +#define TIM_CCER_CC4NP TIM_CCER_CCxNP(4) /* Capt/Comp 4N Polarity */ +#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12)) +#define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */ +#define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */ +#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */ +#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */ +#define TIM_BDTR_BKF(x) (0xf << (16 + (x) * 4)) +#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */ +#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */ -#define MAX_TIM_PSC 0xFFFF -#define MAX_TIM_ICPSC 0x3 -#define TIM_CR2_MMS_SHIFT 4 -#define TIM_CR2_MMS2_SHIFT 20 +#define MAX_TIM_PSC 0xFFFF +#define MAX_TIM_ICPSC 0x3 +#define TIM_CR2_MMS_SHIFT 4 +#define TIM_CR2_MMS2_SHIFT 20 #define TIM_SMCR_SMS_SLAVE_MODE_DISABLED 0 /* counts on internal clock when CEN=1 */ #define TIM_SMCR_SMS_ENCODER_MODE_1 1 /* counts TI1FP1 edges, depending on TI2FP2 level */ #define TIM_SMCR_SMS_ENCODER_MODE_2 2 /* counts TI2FP2 edges, depending on TI1FP1 level */ #define TIM_SMCR_SMS_ENCODER_MODE_3 3 /* counts on both TI1FP1 and TI2FP2 edges */ -#define TIM_SMCR_TS_SHIFT 4 -#define TIM_BDTR_BKF_MASK 0xF -#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4) +#define TIM_SMCR_TS_SHIFT 4 +#define TIM_BDTR_BKF_MASK 0xF +#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4) enum stm32_timers_dmas { STM32_TIMERS_DMA_CH1, diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index c315903f6dab..aad9c6b50463 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -28,6 +28,8 @@ struct regmap *syscon_regmap_lookup_by_phandle_args(struct device_node *np, unsigned int *out_args); struct regmap *syscon_regmap_lookup_by_phandle_optional(struct device_node *np, const char *property); +int of_syscon_register_regmap(struct device_node *np, + struct regmap *regmap); #else static inline struct regmap *device_node_to_regmap(struct device_node *np) { @@ -67,6 +69,12 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle_optional( return NULL; } +static inline int of_syscon_register_regmap(struct device_node *np, + struct regmap *regmap) +{ + return -EOPNOTSUPP; +} + #endif #endif /* __LINUX_MFD_SYSCON_H__ */ diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h deleted file mode 100644 index eace8ea6cda0..000000000000 --- a/include/linux/mfd/tmio.h +++ /dev/null @@ -1,133 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef MFD_TMIO_H -#define MFD_TMIO_H - -#include <linux/device.h> -#include <linux/fb.h> -#include <linux/io.h> -#include <linux/jiffies.h> -#include <linux/mmc/card.h> -#include <linux/platform_device.h> -#include <linux/pm_runtime.h> - -#define tmio_ioread8(addr) readb(addr) -#define tmio_ioread16(addr) readw(addr) -#define tmio_ioread16_rep(r, b, l) readsw(r, b, l) -#define tmio_ioread32(addr) \ - (((u32)readw((addr))) | (((u32)readw((addr) + 2)) << 16)) - -#define tmio_iowrite8(val, addr) writeb((val), (addr)) -#define tmio_iowrite16(val, addr) writew((val), (addr)) -#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) -#define tmio_iowrite32(val, addr) \ - do { \ - writew((val), (addr)); \ - writew((val) >> 16, (addr) + 2); \ - } while (0) - -#define sd_config_write8(base, shift, reg, val) \ - tmio_iowrite8((val), (base) + ((reg) << (shift))) -#define sd_config_write16(base, shift, reg, val) \ - tmio_iowrite16((val), (base) + ((reg) << (shift))) -#define sd_config_write32(base, shift, reg, val) \ - do { \ - tmio_iowrite16((val), (base) + ((reg) << (shift))); \ - tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \ - } while (0) - -/* tmio MMC platform flags */ -/* - * Some controllers can support a 2-byte block size when the bus width - * is configured in 4-bit mode. - */ -#define TMIO_MMC_BLKSZ_2BYTES BIT(1) -/* - * Some controllers can support SDIO IRQ signalling. - */ -#define TMIO_MMC_SDIO_IRQ BIT(2) - -/* Some features are only available or tested on R-Car Gen2 or later */ -#define TMIO_MMC_MIN_RCAR2 BIT(3) - -/* - * Some controllers require waiting for the SD bus to become - * idle before writing to some registers. - */ -#define TMIO_MMC_HAS_IDLE_WAIT BIT(4) - -/* - * Use the busy timeout feature. Probably all TMIO versions support it. Yet, - * we don't have documentation for old variants, so we enable only known good - * variants with this flag. Can be removed once all variants are known good. - */ -#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5) - -/* - * Some controllers have CMD12 automatically - * issue/non-issue register - */ -#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7) - -/* Controller has some SDIO status bits which must be 1 */ -#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8) - -/* - * Some controllers have a 32-bit wide data port register - */ -#define TMIO_MMC_32BIT_DATA_PORT BIT(9) - -/* - * Some controllers allows to set SDx actual clock - */ -#define TMIO_MMC_CLK_ACTUAL BIT(10) - -/* Some controllers have a CBSY bit */ -#define TMIO_MMC_HAVE_CBSY BIT(11) - -struct dma_chan; - -/* - * data for the MMC controller - */ -struct tmio_mmc_data { - void *chan_priv_tx; - void *chan_priv_rx; - unsigned int hclk; - unsigned long capabilities; - unsigned long capabilities2; - unsigned long flags; - u32 ocr_mask; /* available voltages */ - dma_addr_t dma_rx_offset; - unsigned int max_blk_count; - unsigned short max_segs; - void (*set_pwr)(struct platform_device *host, int state); - void (*set_clk_div)(struct platform_device *host, int state); -}; - -/* - * data for the NAND controller - */ -struct tmio_nand_data { - struct nand_bbt_descr *badblock_pattern; - struct mtd_partition *partition; - unsigned int num_partitions; - const char *const *part_parsers; -}; - -#define FBIO_TMIO_ACC_WRITE 0x7C639300 -#define FBIO_TMIO_ACC_SYNC 0x7C639301 - -struct tmio_fb_data { - int (*lcd_set_power)(struct platform_device *fb_dev, - bool on); - int (*lcd_mode)(struct platform_device *fb_dev, - const struct fb_videomode *mode); - int num_modes; - struct fb_videomode *modes; - - /* in mm: size of screen */ - int height; - int width; -}; - -#endif diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h index 860ec0a16c96..e5373c302722 100644 --- a/include/linux/mfd/tps65912.h +++ b/include/linux/mfd/tps65912.h @@ -314,6 +314,5 @@ struct tps65912 { extern const struct regmap_config tps65912_regmap_config; int tps65912_device_init(struct tps65912 *tps); -void tps65912_device_exit(struct tps65912 *tps); #endif /* __LINUX_MFD_TPS65912_H */ diff --git a/include/linux/mhi.h b/include/linux/mhi.h index b573f15762f8..fabd6ed8d258 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -290,6 +290,7 @@ struct mhi_controller_config { /** * struct mhi_controller - Master MHI controller structure + * @name: Device name of the MHI controller * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI * controller (required) * @mhi_dev: MHI device instance for the controller @@ -367,6 +368,7 @@ struct mhi_controller_config { * they can be populated depending on the usecase. */ struct mhi_controller { + const char *name; struct device *cntrl_dev; struct mhi_device *mhi_dev; struct dentry *debugfs_dentry; diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 2ce13e8a309b..644be30b69c8 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -63,8 +63,6 @@ extern const char *migrate_reason_names[MR_TYPES]; #ifdef CONFIG_MIGRATION void putback_movable_pages(struct list_head *l); -int migrate_folio_extra(struct address_space *mapping, struct folio *dst, - struct folio *src, enum migrate_mode mode, int extra_count); int migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode); int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, @@ -78,7 +76,6 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) __releases(ptl); void folio_migrate_flags(struct folio *newfolio, struct folio *folio); -void folio_migrate_copy(struct folio *newfolio, struct folio *folio); int folio_migrate_mapping(struct address_space *mapping, struct folio *newfolio, struct folio *folio, int extra_count); @@ -142,9 +139,16 @@ const struct movable_operations *page_movable_ops(struct page *page) } #ifdef CONFIG_NUMA_BALANCING +int migrate_misplaced_folio_prepare(struct folio *folio, + struct vm_area_struct *vma, int node); int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node); #else +static inline int migrate_misplaced_folio_prepare(struct folio *folio, + struct vm_area_struct *vma, int node) +{ + return -EAGAIN; /* can't migrate now */ +} static inline int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node) { diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h index f37cc03f9369..265c4328b36a 100644 --- a/include/linux/migrate_mode.h +++ b/include/linux/migrate_mode.h @@ -7,16 +7,11 @@ * on most operations but not ->writepage as the potential stall time * is too significant * MIGRATE_SYNC will block when migrating pages - * MIGRATE_SYNC_NO_COPY will block when migrating pages but will not copy pages - * with the CPU. Instead, page copy happens outside the migratepage() - * callback and is likely using a DMA engine. See migrate_vma() and HMM - * (mm/hmm.c) for users of this mode. */ enum migrate_mode { MIGRATE_ASYNC, MIGRATE_SYNC_LIGHT, MIGRATE_SYNC, - MIGRATE_SYNC_NO_COPY, }; enum migrate_reason { @@ -29,6 +24,7 @@ enum migrate_reason { MR_CONTIG_RANGE, MR_LONGTERM_PIN, MR_DEMOTION, + MR_DAMON, MR_TYPES }; diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h index 26b04f73f214..995db62570f9 100644 --- a/include/linux/mii_timestamper.h +++ b/include/linux/mii_timestamper.h @@ -59,7 +59,7 @@ struct mii_timestamper { struct phy_device *phydev); int (*ts_info)(struct mii_timestamper *mii_ts, - struct ethtool_ts_info *ts_info); + struct kernel_ethtool_ts_info *ts_info); struct device *device; }; diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h index d52daf45861b..43a7b9dcf15e 100644 --- a/include/linux/min_heap.h +++ b/include/linux/min_heap.h @@ -7,46 +7,89 @@ #include <linux/types.h> /** - * struct min_heap - Data structure to hold a min-heap. - * @data: Start of array holding the heap elements. + * Data structure to hold a min-heap. * @nr: Number of elements currently in the heap. * @size: Maximum number of elements that can be held in current storage. + * @data: Pointer to the start of array holding the heap elements. + * @preallocated: Start of the static preallocated array holding the heap elements. */ -struct min_heap { - void *data; - int nr; - int size; -}; +#define MIN_HEAP_PREALLOCATED(_type, _name, _nr) \ +struct _name { \ + int nr; \ + int size; \ + _type *data; \ + _type preallocated[_nr]; \ +} + +#define DEFINE_MIN_HEAP(_type, _name) MIN_HEAP_PREALLOCATED(_type, _name, 0) + +typedef DEFINE_MIN_HEAP(char, min_heap_char) min_heap_char; + +#define __minheap_cast(_heap) (typeof((_heap)->data[0]) *) +#define __minheap_obj_size(_heap) sizeof((_heap)->data[0]) /** * struct min_heap_callbacks - Data/functions to customise the min_heap. - * @elem_size: The nr of each element in bytes. * @less: Partial order function for this heap. * @swp: Swap elements function. */ struct min_heap_callbacks { - int elem_size; - bool (*less)(const void *lhs, const void *rhs); - void (*swp)(void *lhs, void *rhs); + bool (*less)(const void *lhs, const void *rhs, void *args); + void (*swp)(void *lhs, void *rhs, void *args); }; +/* Initialize a min-heap. */ +static __always_inline +void __min_heap_init(min_heap_char *heap, void *data, int size) +{ + heap->nr = 0; + heap->size = size; + if (data) + heap->data = data; + else + heap->data = heap->preallocated; +} + +#define min_heap_init(_heap, _data, _size) \ + __min_heap_init((min_heap_char *)_heap, _data, _size) + +/* Get the minimum element from the heap. */ +static __always_inline +void *__min_heap_peek(struct min_heap_char *heap) +{ + return heap->nr ? heap->data : NULL; +} + +#define min_heap_peek(_heap) \ + (__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap)) + +/* Check if the heap is full. */ +static __always_inline +bool __min_heap_full(min_heap_char *heap) +{ + return heap->nr == heap->size; +} + +#define min_heap_full(_heap) \ + __min_heap_full((min_heap_char *)_heap) + /* Sift the element at pos down the heap. */ static __always_inline -void min_heapify(struct min_heap *heap, int pos, - const struct min_heap_callbacks *func) +void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { void *left, *right; void *data = heap->data; - void *root = data + pos * func->elem_size; + void *root = data + pos * elem_size; int i = pos, j; /* Find the sift-down path all the way to the leaves. */ for (;;) { if (i * 2 + 2 >= heap->nr) break; - left = data + (i * 2 + 1) * func->elem_size; - right = data + (i * 2 + 2) * func->elem_size; - i = func->less(left, right) ? i * 2 + 1 : i * 2 + 2; + left = data + (i * 2 + 1) * elem_size; + right = data + (i * 2 + 2) * elem_size; + i = func->less(left, right, args) ? i * 2 + 1 : i * 2 + 2; } /* Special case for the last leaf with no sibling. */ @@ -54,83 +97,140 @@ void min_heapify(struct min_heap *heap, int pos, i = i * 2 + 1; /* Backtrack to the correct location. */ - while (i != pos && func->less(root, data + i * func->elem_size)) + while (i != pos && func->less(root, data + i * elem_size, args)) i = (i - 1) / 2; /* Shift the element into its correct place. */ j = i; while (i != pos) { i = (i - 1) / 2; - func->swp(data + i * func->elem_size, data + j * func->elem_size); + func->swp(data + i * elem_size, data + j * elem_size, args); + } +} + +#define min_heap_sift_down(_heap, _pos, _func, _args) \ + __min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args) + +/* Sift up ith element from the heap, O(log2(nr)). */ +static __always_inline +void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx, + const struct min_heap_callbacks *func, void *args) +{ + void *data = heap->data; + size_t parent; + + while (idx) { + parent = (idx - 1) / 2; + if (func->less(data + parent * elem_size, data + idx * elem_size, args)) + break; + func->swp(data + parent * elem_size, data + idx * elem_size, args); + idx = parent; } } +#define min_heap_sift_up(_heap, _idx, _func, _args) \ + __min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) + /* Floyd's approach to heapification that is O(nr). */ static __always_inline -void min_heapify_all(struct min_heap *heap, - const struct min_heap_callbacks *func) +void __min_heapify_all(min_heap_char *heap, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { int i; for (i = heap->nr / 2 - 1; i >= 0; i--) - min_heapify(heap, i, func); + __min_heap_sift_down(heap, i, elem_size, func, args); } +#define min_heapify_all(_heap, _func, _args) \ + __min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + /* Remove minimum element from the heap, O(log2(nr)). */ static __always_inline -void min_heap_pop(struct min_heap *heap, - const struct min_heap_callbacks *func) +bool __min_heap_pop(min_heap_char *heap, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { void *data = heap->data; if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap")) - return; + return false; /* Place last element at the root (position 0) and then sift down. */ heap->nr--; - memcpy(data, data + (heap->nr * func->elem_size), func->elem_size); - min_heapify(heap, 0, func); + memcpy(data, data + (heap->nr * elem_size), elem_size); + __min_heap_sift_down(heap, 0, elem_size, func, args); + + return true; } +#define min_heap_pop(_heap, _func, _args) \ + __min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + /* * Remove the minimum element and then push the given element. The * implementation performs 1 sift (O(log2(nr))) and is therefore more * efficient than a pop followed by a push that does 2. */ static __always_inline -void min_heap_pop_push(struct min_heap *heap, - const void *element, - const struct min_heap_callbacks *func) +void __min_heap_pop_push(min_heap_char *heap, + const void *element, size_t elem_size, + const struct min_heap_callbacks *func, + void *args) { - memcpy(heap->data, element, func->elem_size); - min_heapify(heap, 0, func); + memcpy(heap->data, element, elem_size); + __min_heap_sift_down(heap, 0, elem_size, func, args); } +#define min_heap_pop_push(_heap, _element, _func, _args) \ + __min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) + /* Push an element on to the heap, O(log2(nr)). */ static __always_inline -void min_heap_push(struct min_heap *heap, const void *element, - const struct min_heap_callbacks *func) +bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { void *data = heap->data; - void *child, *parent; int pos; if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap")) - return; + return false; /* Place at the end of data. */ pos = heap->nr; - memcpy(data + (pos * func->elem_size), element, func->elem_size); + memcpy(data + (pos * elem_size), element, elem_size); heap->nr++; /* Sift child at pos up. */ - for (; pos > 0; pos = (pos - 1) / 2) { - child = data + (pos * func->elem_size); - parent = data + ((pos - 1) / 2) * func->elem_size; - if (func->less(parent, child)) - break; - func->swp(parent, child); - } + __min_heap_sift_up(heap, elem_size, pos, func, args); + + return true; } +#define min_heap_push(_heap, _element, _func, _args) \ + __min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) + +/* Remove ith element from the heap, O(log2(nr)). */ +static __always_inline +bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx, + const struct min_heap_callbacks *func, void *args) +{ + void *data = heap->data; + + if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap")) + return false; + + /* Place last element at the root (position 0) and then sift down. */ + heap->nr--; + if (idx == heap->nr) + return true; + func->swp(data + (idx * elem_size), data + (heap->nr * elem_size), args); + __min_heap_sift_up(heap, elem_size, idx, func, args); + __min_heap_sift_down(heap, idx, elem_size, func, args); + + return true; +} + +#define min_heap_del(_heap, _idx, _func, _args) \ + __min_heap_del((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) + #endif /* _LINUX_MIN_HEAP_H */ diff --git a/include/linux/misc/keba.h b/include/linux/misc/keba.h new file mode 100644 index 000000000000..323b31a847c5 --- /dev/null +++ b/include/linux/misc/keba.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2024, KEBA Industrial Automation Gmbh */ + +#ifndef _LINUX_MISC_KEBA_H +#define _LINUX_MISC_KEBA_H + +#include <linux/auxiliary_bus.h> + +struct i2c_board_info; + +/** + * struct keba_i2c_auxdev - KEBA I2C auxiliary device + * @auxdev: auxiliary device object + * @io: address range of I2C controller IO memory + * @info_size: number of I2C devices to be probed + * @info: I2C devices to be probed + */ +struct keba_i2c_auxdev { + struct auxiliary_device auxdev; + struct resource io; + int info_size; + struct i2c_board_info *info; +}; + +#endif /* _LINUX_MISC_KEBA_H */ diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h index e799b1f8d05b..49eef10c8e59 100644 --- a/include/linux/misc_cgroup.h +++ b/include/linux/misc_cgroup.h @@ -9,15 +9,16 @@ #define _MISC_CGROUP_H_ /** - * Types of misc cgroup entries supported by the host. + * enum misc_res_type - Types of misc cgroup entries supported by the host. */ enum misc_res_type { #ifdef CONFIG_KVM_AMD_SEV - /* AMD SEV ASIDs resource */ + /** @MISC_CG_RES_SEV: AMD SEV ASIDs resource */ MISC_CG_RES_SEV, - /* AMD SEV-ES ASIDs resource */ + /** @MISC_CG_RES_SEV_ES: AMD SEV-ES ASIDs resource */ MISC_CG_RES_SEV_ES, #endif + /** @MISC_CG_RES_TYPES: count of enum misc_res_type constants */ MISC_CG_RES_TYPES }; @@ -30,13 +31,16 @@ struct misc_cg; /** * struct misc_res: Per cgroup per misc type resource * @max: Maximum limit on the resource. + * @watermark: Historical maximum usage of the resource. * @usage: Current usage of the resource. * @events: Number of times, the resource limit exceeded. */ struct misc_res { u64 max; + atomic64_t watermark; atomic64_t usage; atomic64_t events; + atomic64_t events_local; }; /** @@ -50,6 +54,8 @@ struct misc_cg { /* misc.events */ struct cgroup_file events_file; + /* misc.events.local */ + struct cgroup_file events_local_file; struct misc_res res[MISC_CG_RES_TYPES]; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index d7bb31d9a446..ba875a619b97 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -294,6 +294,7 @@ enum { #define MLX5_UMR_FLEX_ALIGNMENT 0x40 #define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt)) #define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm)) +#define MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_ksm)) #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8) @@ -1466,6 +1467,7 @@ enum { MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, + MLX5_INFINIBAND_EXTENDED_PORT_COUNTERS_GROUP = 0x21, }; enum { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 779cfdf2e9d6..a96438ded15f 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -766,6 +766,12 @@ struct mlx5_hca_cap { u32 max[MLX5_UN_SZ_DW(hca_cap_union)]; }; +enum mlx5_wc_state { + MLX5_WC_STATE_UNINITIALIZED, + MLX5_WC_STATE_UNSUPPORTED, + MLX5_WC_STATE_SUPPORTED, +}; + struct mlx5_core_dev { struct device *device; enum mlx5_coredev_type coredev_type; @@ -824,6 +830,9 @@ struct mlx5_core_dev { #endif u64 num_ipsec_offloads; struct mlx5_sd *sd; + enum mlx5_wc_state wc_state; + /* sync write combining state */ + struct mutex wc_state_lock; }; struct mlx5_db { @@ -908,6 +917,7 @@ struct mlx5_hca_vport_context { u16 qkey_violation_counter; u16 pkey_violation_counter; bool grh_required; + u8 num_plane; }; #define STRUCT_FIELD(header, field) \ @@ -1375,4 +1385,6 @@ static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev) enum { MLX5_OCTWORD = 16, }; + +bool mlx5_wc_support_get(struct mlx5_core_dev *mdev); #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 5df52e15f7d6..cab228cf51c6 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -793,7 +793,7 @@ struct mlx5_ifc_ads_bits { u8 reserved_at_2[0xe]; u8 pkey_index[0x10]; - u8 reserved_at_20[0x8]; + u8 plane_index[0x8]; u8 grh[0x1]; u8 mlid[0x7]; u8 rlid[0x10]; @@ -1093,7 +1093,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 tunnel_stateless_ip_over_ip_tx[0x1]; u8 reserved_at_2e[0x2]; u8 max_vxlan_udp_ports[0x8]; - u8 reserved_at_38[0x6]; + u8 swp_csum_l4_partial[0x1]; + u8 reserved_at_39[0x5]; u8 max_geneve_opt_len[0x1]; u8 tunnel_stateless_geneve_rx[0x1]; @@ -1526,8 +1527,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 ts_cqe_to_dest_cqn[0x1]; u8 reserved_at_b3[0x6]; u8 go_back_n[0x1]; - u8 shampo[0x1]; - u8 reserved_at_bb[0x5]; + u8 reserved_at_ba[0x6]; u8 max_sgl_for_optimized_performance[0x8]; u8 log_max_cq_sz[0x8]; @@ -1744,7 +1744,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_280[0x10]; u8 max_wqe_sz_sq[0x10]; - u8 reserved_at_2a0[0x10]; + u8 reserved_at_2a0[0xb]; + u8 shampo[0x1]; + u8 reserved_at_2ac[0x4]; u8 max_wqe_sz_rq[0x10]; u8 max_flow_counter_31_16[0x10]; @@ -1990,9 +1992,12 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_c0[0x8]; u8 migration_multi_load[0x1]; u8 migration_tracking_state[0x1]; - u8 reserved_at_ca[0x6]; + u8 multiplane_qp_ud[0x1]; + u8 reserved_at_cb[0x5]; u8 migration_in_chunks[0x1]; - u8 reserved_at_d1[0xf]; + u8 reserved_at_d1[0x1]; + u8 sf_eq_usage[0x1]; + u8 reserved_at_d3[0xd]; u8 cross_vhca_object_to_object_supported[0x20]; @@ -2017,7 +2022,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_250[0x10]; u8 reserved_at_260[0x120]; - u8 reserved_at_380[0x10]; + u8 reserved_at_380[0xb]; + u8 min_mkey_log_entity_size_fixed_buffer[0x5]; u8 ec_vf_vport_base[0x10]; u8 reserved_at_3a0[0x10]; @@ -2029,7 +2035,15 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 pcc_ifa2[0x1]; u8 reserved_at_3f1[0xf]; - u8 reserved_at_400[0x400]; + u8 reserved_at_400[0x1]; + u8 min_mkey_log_entity_size_fixed_buffer_valid[0x1]; + u8 reserved_at_402[0x1e]; + + u8 reserved_at_420[0x20]; + + u8 reserved_at_440[0x8]; + u8 max_num_eqs_24b[0x18]; + u8 reserved_at_460[0x3a0]; }; enum mlx5_ifc_flow_destination_type { @@ -2650,6 +2664,46 @@ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { u8 port_xmit_wait[0x20]; }; +struct mlx5_ifc_ib_ext_port_cntrs_grp_data_layout_bits { + u8 reserved_at_0[0x300]; + + u8 port_xmit_data_high[0x20]; + + u8 port_xmit_data_low[0x20]; + + u8 port_rcv_data_high[0x20]; + + u8 port_rcv_data_low[0x20]; + + u8 port_xmit_pkts_high[0x20]; + + u8 port_xmit_pkts_low[0x20]; + + u8 port_rcv_pkts_high[0x20]; + + u8 port_rcv_pkts_low[0x20]; + + u8 reserved_at_400[0x80]; + + u8 port_unicast_xmit_pkts_high[0x20]; + + u8 port_unicast_xmit_pkts_low[0x20]; + + u8 port_multicast_xmit_pkts_high[0x20]; + + u8 port_multicast_xmit_pkts_low[0x20]; + + u8 port_unicast_rcv_pkts_high[0x20]; + + u8 port_unicast_rcv_pkts_low[0x20]; + + u8 port_multicast_rcv_pkts_high[0x20]; + + u8 port_multicast_rcv_pkts_low[0x20]; + + u8 reserved_at_580[0x240]; +}; + struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits { u8 transmit_queue_high[0x20]; @@ -3908,7 +3962,7 @@ enum { }; enum { - ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0, + ELEMENT_TYPE_CAP_MASK_TSAR = 1 << 0, ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1, ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2, ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3, @@ -4172,7 +4226,8 @@ struct mlx5_ifc_hca_vport_context_bits { u8 has_smi[0x1]; u8 has_raw[0x1]; u8 grh_required[0x1]; - u8 reserved_at_104[0xc]; + u8 reserved_at_104[0x4]; + u8 num_port_plane[0x8]; u8 port_physical_state[0x4]; u8 vport_state_policy[0x4]; u8 port_state[0x4]; @@ -4541,6 +4596,7 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout; struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout; struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; + struct mlx5_ifc_ib_ext_port_cntrs_grp_data_layout_bits ib_ext_port_cntrs_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs; u8 reserved_at_0[0x7c0]; @@ -5629,7 +5685,11 @@ struct mlx5_ifc_query_q_counter_out_bits { u8 local_ack_timeout_err[0x20]; - u8 reserved_at_320[0xa0]; + u8 reserved_at_320[0x60]; + + u8 req_rnr_retries_exceeded[0x20]; + + u8 reserved_at_3a0[0x20]; u8 resp_local_length_error[0x20]; @@ -7688,7 +7748,7 @@ struct mlx5_ifc_mad_ifc_in_bits { u8 op_mod[0x10]; u8 remote_lid[0x10]; - u8 reserved_at_50[0x8]; + u8 plane_index[0x8]; u8 port[0x8]; u8 reserved_at_60[0x20]; @@ -9617,7 +9677,9 @@ struct mlx5_ifc_ptys_reg_bits { u8 an_disable_cap[0x1]; u8 reserved_at_3[0x5]; u8 local_port[0x8]; - u8 reserved_at_10[0xd]; + u8 reserved_at_10[0x8]; + u8 plane_ind[0x4]; + u8 reserved_at_1c[0x1]; u8 proto_mask[0x3]; u8 an_status[0x4]; @@ -9843,8 +9905,10 @@ struct mlx5_ifc_ppcnt_reg_bits { u8 grp[0x6]; u8 clr[0x1]; - u8 reserved_at_21[0x1c]; - u8 prio_tc[0x3]; + u8 reserved_at_21[0x13]; + u8 plane_ind[0x4]; + u8 reserved_at_38[0x3]; + u8 prio_tc[0x5]; union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; }; diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 40371c916cf9..58dfa2ee7c83 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -148,7 +148,9 @@ enum { MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS = (u64)1 << 6, MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX = (u64)1 << 7, MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX = (u64)1 << 8, + MLX5_VIRTQ_MODIFY_MASK_QUEUE_VIRTIO_VERSION = (u64)1 << 10, MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY = (u64)1 << 11, + MLX5_VIRTQ_MODIFY_MASK_QUEUE_FEATURES = (u64)1 << 12, MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14, }; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 26092c78a985..e68d42b8ce65 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -155,10 +155,11 @@ struct mlx5_port_eth_proto { int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, - int ptys_size, int proto_mask, u8 local_port); + int ptys_size, int proto_mask, + u8 local_port, u8 plane_index); int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper, - u16 *proto_oper, u8 local_port); + u16 *proto_oper, u8 local_port, u8 plane_index); void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index f0e55bf3ec8b..ad1ce650146c 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -576,9 +576,12 @@ static inline const char *mlx5_qp_state_str(int state) static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev) { - return !MLX5_CAP_ROCE(dev, qp_ts_format) ? - MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : - MLX5_TIMESTAMP_FORMAT_DEFAULT; + u8 supported_ts_cap = mlx5_get_roce_state(dev) ? + MLX5_CAP_ROCE(dev, qp_ts_format) : + MLX5_CAP_GEN(dev, sq_ts_format); + + return supported_ts_cap ? MLX5_TIMESTAMP_FORMAT_DEFAULT : + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; } #endif /* MLX5_QP_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index eb7c96d24ac0..7d044e737dba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1110,7 +1110,7 @@ static inline unsigned int compound_order(struct page *page) * * Return: The order of the folio. */ -static inline unsigned int folio_order(struct folio *folio) +static inline unsigned int folio_order(const struct folio *folio) { if (!folio_test_large(folio)) return 0; @@ -1202,8 +1202,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) /* * How many times the entire folio is mapped as a single unit (eg by a * PMD or PUD entry). This is probably not what you want, except for - * debugging purposes - it does not include PTE-mapped sub-pages; look - * at folio_mapcount() or page_mapcount() instead. + * debugging purposes or implementation of other core folio_*() primitives. */ static inline int folio_entire_mapcount(const struct folio *folio) { @@ -1211,40 +1210,6 @@ static inline int folio_entire_mapcount(const struct folio *folio) return atomic_read(&folio->_entire_mapcount) + 1; } -/* - * The atomic page->_mapcount, starts from -1: so that transitions - * both from it and to it can be tracked, using atomic_inc_and_test - * and atomic_add_negative(-1). - */ -static inline void page_mapcount_reset(struct page *page) -{ - atomic_set(&(page)->_mapcount, -1); -} - -/** - * page_mapcount() - Number of times this precise page is mapped. - * @page: The page. - * - * The number of times this page is mapped. If this page is part of - * a large folio, it includes the number of times this page is mapped - * as part of that folio. - * - * Will report 0 for pages which cannot be mapped into userspace, eg - * slab, page tables and similar. - */ -static inline int page_mapcount(struct page *page) -{ - int mapcount = atomic_read(&page->_mapcount) + 1; - - /* Handle page_has_type() pages */ - if (mapcount < PAGE_MAPCOUNT_RESERVE + 1) - mapcount = 0; - if (unlikely(PageCompound(page))) - mapcount += folio_entire_mapcount(page_folio(page)); - - return mapcount; -} - static inline int folio_large_mapcount(const struct folio *folio) { VM_WARN_ON_FOLIO(!folio_test_large(folio), folio); @@ -1326,6 +1291,7 @@ void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); void folio_copy(struct folio *dst, struct folio *src); +int folio_mc_copy(struct folio *dst, struct folio *src); unsigned long nr_free_buffer_pages(void); @@ -1612,17 +1578,19 @@ static inline void put_page(struct page *page) * issue. * * Locking: the lockless algorithm described in folio_try_get_rcu() - * provides safe operation for get_user_pages(), page_mkclean() and + * provides safe operation for get_user_pages(), folio_mkclean() and * other calls that race to set up page table entries. */ #define GUP_PIN_COUNTING_BIAS (1U << 10) void unpin_user_page(struct page *page); +void unpin_folio(struct folio *folio); void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty); void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty); void unpin_user_pages(struct page **pages, unsigned long npages); +void unpin_folios(struct folio **folios, unsigned long nfolios); static inline bool is_cow_mapping(vm_flags_t flags) { @@ -1953,8 +1921,8 @@ static inline struct folio *pfn_folio(unsigned long pfn) * * For more information, please see Documentation/core-api/pin_user_pages.rst. * - * Return: True, if it is likely that the page has been "dma-pinned". - * False, if the page is definitely not dma-pinned. + * Return: True, if it is likely that the folio has been "dma-pinned". + * False, if the folio is definitely not dma-pinned. */ static inline bool folio_maybe_dma_pinned(struct folio *folio) { @@ -1973,11 +1941,6 @@ static inline bool folio_maybe_dma_pinned(struct folio *folio) GUP_PIN_COUNTING_BIAS; } -static inline bool page_maybe_dma_pinned(struct page *page) -{ - return folio_maybe_dma_pinned(page_folio(page)); -} - /* * This should most likely only be called during fork() to see whether we * should break the cow immediately for an anon page on the src mm. @@ -2150,7 +2113,7 @@ static inline struct folio *folio_next(struct folio *folio) * it from being split. It is not necessary for the folio to be locked. * Return: The base-2 logarithm of the size of this folio. */ -static inline unsigned int folio_shift(struct folio *folio) +static inline unsigned int folio_shift(const struct folio *folio) { return PAGE_SHIFT + folio_order(folio); } @@ -2163,7 +2126,7 @@ static inline unsigned int folio_shift(struct folio *folio) * it from being split. It is not necessary for the folio to be locked. * Return: The number of bytes in this folio. */ -static inline size_t folio_size(struct folio *folio) +static inline size_t folio_size(const struct folio *folio) { return PAGE_SIZE << folio_order(folio); } @@ -2295,19 +2258,6 @@ static inline void *folio_address(const struct folio *folio) return page_address(&folio->page); } -extern pgoff_t __page_file_index(struct page *page); - -/* - * Return the pagecache index of the passed page. Regular pagecache pages - * use ->index whereas swapcache pages use swp_offset(->private) - */ -static inline pgoff_t page_index(struct page *page) -{ - if (unlikely(PageSwapCache(page))) - return __page_file_index(page); - return page->index; -} - /* * Return true only if the page has been allocated with * ALLOC_NO_WATERMARKS and the low watermark was not @@ -2550,6 +2500,9 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); +long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end, + struct folio **folios, unsigned int max_folios, + pgoff_t *offset); int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); @@ -4038,7 +3991,6 @@ extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared); void num_poisoned_pages_inc(unsigned long pfn); void num_poisoned_pages_sub(unsigned long pfn, long i); -struct task_struct *task_early_kill(struct task_struct *tsk, int force_early); #else static inline void memory_failure_queue(unsigned long pfn, int flags) { @@ -4059,12 +4011,6 @@ static inline void num_poisoned_pages_sub(unsigned long pfn, long i) } #endif -#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_KSM) -void add_to_kill_ksm(struct task_struct *tsk, struct page *p, - struct vm_area_struct *vma, struct list_head *to_kill, - unsigned long ksm_addr); -#endif - #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG) extern void memblk_nr_poison_inc(unsigned long pfn); extern void memblk_nr_poison_sub(unsigned long pfn, long i); @@ -4105,10 +4051,10 @@ enum mf_result { enum mf_action_page_type { MF_MSG_KERNEL, MF_MSG_KERNEL_HIGH_ORDER, - MF_MSG_SLAB, MF_MSG_DIFFERENT_COMPOUND, MF_MSG_HUGE, MF_MSG_FREE_HUGE, + MF_MSG_GET_HWPOISON, MF_MSG_UNMAP_FAILED, MF_MSG_DIRTY_SWAPCACHE, MF_MSG_CLEAN_SWAPCACHE, @@ -4122,13 +4068,12 @@ enum mf_action_page_type { MF_MSG_BUDDY, MF_MSG_DAX, MF_MSG_UNSPLIT_THP, + MF_MSG_ALREADY_POISONED, MF_MSG_UNKNOWN, }; #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) -extern void clear_huge_page(struct page *page, - unsigned long addr_hint, - unsigned int pages_per_huge_page); +void folio_zero_user(struct folio *folio, unsigned long addr_hint); int copy_user_large_folio(struct folio *dst, struct folio *src, unsigned long addr_hint, struct vm_area_struct *vma); @@ -4261,4 +4206,6 @@ static inline bool pfn_is_unaccepted_memory(unsigned long pfn) void vma_pgtable_walk_begin(struct vm_area_struct *vma); void vma_pgtable_walk_end(struct vm_area_struct *vma); +int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size); + #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index af3a0256fa93..485424979254 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -46,9 +46,7 @@ struct mem_cgroup; * which is guaranteed to be aligned. If you use the same storage as * page->mapping, you must restore it to NULL before freeing the page. * - * If your page will not be mapped to userspace, you can also use the four - * bytes in the mapcount union, but you must call page_mapcount_reset() - * before freeing it. + * The mapcount field must not be used for own purposes. * * If you want to use the refcount field, it must be used in such a way * that other CPUs temporarily incrementing and then decrementing the @@ -152,25 +150,40 @@ struct page { union { /* This union is 4 bytes in size. */ /* - * If the page can be mapped to userspace, encodes the number - * of times this page is referenced by a page table. + * For head pages of typed folios, the value stored here + * allows for determining what this page is used for. The + * tail pages of typed folios will not store a type + * (page_type == _mapcount == -1). + * + * See page-flags.h for a list of page types which are currently + * stored here. + * + * Owners of typed folios may reuse the lower 16 bit of the + * head page page_type field after setting the page type, + * but must reset these 16 bit to -1 before clearing the + * page type. */ - atomic_t _mapcount; + unsigned int page_type; /* - * If the page is neither PageSlab nor mappable to userspace, - * the value stored here may help determine what this page - * is used for. See page-flags.h for a list of page types - * which are currently stored here. + * For pages that are part of non-typed folios for which mappings + * are tracked via the RMAP, encodes the number of times this page + * is directly referenced by a page table. + * + * Note that the mapcount is always initialized to -1, so that + * transitions both from it and to it can be tracked, using + * atomic_inc_and_test() and atomic_add_negative(-1). */ - unsigned int page_type; + atomic_t _mapcount; }; /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */ atomic_t _refcount; -#ifdef CONFIG_SLAB_OBJ_EXT +#ifdef CONFIG_MEMCG unsigned long memcg_data; +#elif defined(CONFIG_SLAB_OBJ_EXT) + unsigned long _unused_slab_obj_exts; #endif /* @@ -298,6 +311,7 @@ typedef struct { * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h. * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head(). * @_deferred_list: Folios to be split under memory pressure. + * @_unused_slab_obj_exts: Placeholder to match obj_exts in struct slab. * * A folio is a physically, virtually and logically contiguous set * of bytes. It is a power-of-two in size, and it is aligned to that @@ -332,8 +346,10 @@ struct folio { }; atomic_t _mapcount; atomic_t _refcount; -#ifdef CONFIG_SLAB_OBJ_EXT +#ifdef CONFIG_MEMCG unsigned long memcg_data; +#elif defined(CONFIG_SLAB_OBJ_EXT) + unsigned long _unused_slab_obj_exts; #endif #if defined(WANT_PAGE_VIRTUAL) void *virtual; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 586a8f0104d7..41458892bc8a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -220,6 +220,8 @@ enum node_stat_item { PGDEMOTE_KSWAPD, PGDEMOTE_DIRECT, PGDEMOTE_KHUGEPAGED, + NR_MEMMAP, /* page metadata allocated through buddy allocator */ + NR_MEMMAP_BOOT, /* page metadata allocated through boot allocator */ NR_VM_NODE_STAT_ITEMS }; @@ -1979,8 +1981,9 @@ static inline int subsection_map_index(unsigned long pfn) static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) { int idx = subsection_map_index(pfn); + struct mem_section_usage *usage = READ_ONCE(ms->usage); - return test_bit(idx, READ_ONCE(ms->usage)->subsection_map); + return usage ? test_bit(idx, usage->subsection_map) : 0; } #else static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) diff --git a/include/linux/module.h b/include/linux/module.h index 330ffb59efe5..4213d8993cd8 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -509,7 +509,9 @@ struct module { #endif #ifdef CONFIG_DEBUG_INFO_BTF_MODULES unsigned int btf_data_size; + unsigned int btf_base_data_size; void *btf_data; + void *btf_base_data; #endif #ifdef CONFIG_JUMP_LABEL struct jump_entry *jump_entries; diff --git a/include/linux/msi.h b/include/linux/msi.h index dc27cf3903d5..26588da88bdd 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -19,7 +19,7 @@ */ #include <linux/irqdomain_defs.h> -#include <linux/cpumask.h> +#include <linux/cpumask_types.h> #include <linux/msi_api.h> #include <linux/xarray.h> #include <linux/mutex.h> diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 947410faf9e2..35ca19ae21ae 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -308,32 +308,32 @@ static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) { map_word val = map_read(map, addr); - if (map_bankwidth_is_1(map)) { + if (map_bankwidth_is_1(map)) return val.x[0]; - } else if (map_bankwidth_is_2(map)) { + if (map_bankwidth_is_2(map)) return cfi16_to_cpu(map, val.x[0]); - } else { - /* No point in a 64-bit byteswap since that would just be - swapping the responses from different chips, and we are - only interested in one chip (a representative sample) */ - return cfi32_to_cpu(map, val.x[0]); - } + /* + * No point in a 64-bit byteswap since that would just be + * swapping the responses from different chips, and we are + * only interested in one chip (a representative sample) + */ + return cfi32_to_cpu(map, val.x[0]); } static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr) { map_word val = map_read(map, addr); - if (map_bankwidth_is_1(map)) { + if (map_bankwidth_is_1(map)) return val.x[0] & 0xff; - } else if (map_bankwidth_is_2(map)) { + if (map_bankwidth_is_2(map)) return cfi16_to_cpu(map, val.x[0]); - } else { - /* No point in a 64-bit byteswap since that would just be - swapping the responses from different chips, and we are - only interested in one chip (a representative sample) */ - return cfi32_to_cpu(map, val.x[0]); - } + /* + * No point in a 64-bit byteswap since that would just be + * swapping the responses from different chips, and we are + * only interested in one chip (a representative sample) + */ + return cfi32_to_cpu(map, val.x[0]); } void cfi_udelay(int us); diff --git a/include/linux/namei.h b/include/linux/namei.h index 967aa9ea9f96..8ec8fed3bce8 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -50,13 +50,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; extern int path_pts(struct path *path); -extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); - -static inline int user_path_at(int dfd, const char __user *name, unsigned flags, - struct path *path) -{ - return user_path_at_empty(dfd, name, flags, path, NULL); -} +extern int user_path_at(int, const char __user *, unsigned, struct path *); struct dentry *lookup_one_qstr_excl(const struct qstr *name, struct dentry *base, diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h index eb01c37e71e0..662074b08c94 100644 --- a/include/linux/net_tstamp.h +++ b/include/linux/net_tstamp.h @@ -5,7 +5,16 @@ #include <uapi/linux/net_tstamp.h> +#define SOF_TIMESTAMPING_SOFTWARE_MASK (SOF_TIMESTAMPING_RX_SOFTWARE | \ + SOF_TIMESTAMPING_TX_SOFTWARE | \ + SOF_TIMESTAMPING_SOFTWARE) + +#define SOF_TIMESTAMPING_HARDWARE_MASK (SOF_TIMESTAMPING_RX_HARDWARE | \ + SOF_TIMESTAMPING_TX_HARDWARE | \ + SOF_TIMESTAMPING_RAW_HARDWARE) + enum hwtstamp_source { + HWTSTAMP_SOURCE_UNSPEC, HWTSTAMP_SOURCE_NETDEV, HWTSTAMP_SOURCE_PHYLIB, }; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d20c6c99eb88..607009150b5f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -43,6 +43,7 @@ #include <linux/netdev_features.h> #include <linux/neighbour.h> +#include <linux/netdevice_xmit.h> #include <uapi/linux/netdevice.h> #include <uapi/linux/if_bonding.h> #include <uapi/linux/pkt_cls.h> @@ -79,6 +80,7 @@ struct xdp_buff; struct xdp_frame; struct xdp_metadata_ops; struct xdp_md; +struct ethtool_netdev_state; typedef u32 xdp_features_t; @@ -1817,7 +1819,8 @@ enum netdev_reg_state { * @priv_flags: Like 'flags' but invisible to userspace, * see if.h for the definitions * @gflags: Global flags ( kept as legacy ) - * @padded: How much padding added by alloc_netdev() + * @priv_len: Size of the ->priv flexible array + * @priv: Flexible array containing private data * @operstate: RFC2863 operstate * @link_mode: Mapping policy to operstate * @if_port: Selectable AUI, TP, ... @@ -1985,8 +1988,6 @@ enum netdev_reg_state { * switch driver and used to set the phys state of the * switch port. * - * @wol_enabled: Wake-on-LAN is enabled - * * @threaded: napi threaded mode is enabled * * @net_notifier_list: List of per-net netdev notifier block @@ -1998,6 +1999,7 @@ enum netdev_reg_state { * @udp_tunnel_nic_info: static structure describing the UDP tunnel * offload capabilities of the device * @udp_tunnel_nic: UDP tunnel offload state + * @ethtool: ethtool related state * @xdp_state: stores info on attached XDP BPF programs * * @nested_level: Used as a parameter of spin_lock_nested() of @@ -2198,10 +2200,10 @@ struct net_device { unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; - unsigned short padded; + int irq; + u32 priv_len; spinlock_t addr_list_lock; - int irq; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; @@ -2372,7 +2374,6 @@ struct net_device { struct lock_class_key *qdisc_tx_busylock; bool proto_down; bool threaded; - unsigned wol_enabled:1; struct list_head net_notifier_list; @@ -2383,6 +2384,8 @@ struct net_device { const struct udp_tunnel_nic_info *udp_tunnel_nic_info; struct udp_tunnel_nic *udp_tunnel_nic; + struct ethtool_netdev_state *ethtool; + /* protected by rtnl_lock */ struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; @@ -2401,7 +2404,13 @@ struct net_device { /** @page_pools: page pools created for this netdevice */ struct hlist_head page_pools; #endif -}; + + /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ + struct dim_irq_moder *irq_moder; + + u8 priv[] ____cacheline_aligned + __counted_by(priv_len); +} ____cacheline_aligned; #define to_net_dev(d) container_of(d, struct net_device, dev) /* @@ -2591,7 +2600,7 @@ void dev_net_set(struct net_device *dev, struct net *net) */ static inline void *netdev_priv(const struct net_device *dev) { - return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); + return (void *)dev->priv; } /* Set the sysfs physical device reference for the network logical device @@ -2731,12 +2740,12 @@ struct pcpu_sw_netstats { } __aligned(4 * sizeof(u64)); struct pcpu_dstats { - u64 rx_packets; - u64 rx_bytes; - u64 rx_drops; - u64 tx_packets; - u64 tx_bytes; - u64 tx_drops; + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_drops; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_drops; struct u64_stats_sync syncp; } __aligned(8 * sizeof(u64)); @@ -3021,7 +3030,8 @@ int call_netdevice_notifiers_info(unsigned long val, #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) #define for_each_netdev_dump(net, d, ifindex) \ - xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex)) + for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \ + ULONG_MAX, XA_PRESENT)); ifindex++) static inline struct net_device *next_net_device(struct net_device *dev) { @@ -3121,7 +3131,6 @@ static inline void unregister_netdevice(struct net_device *dev) int netdev_refcnt_read(const struct net_device *dev); void free_netdev(struct net_device *dev); -void netdev_freemem(struct net_device *dev); void init_dummy_netdev(struct net_device *dev); struct net_device *netdev_get_xmit_slave(struct net_device *dev, @@ -3200,6 +3209,7 @@ static inline bool dev_has_header(const struct net_device *dev) struct softnet_data { struct list_head poll_list; struct sk_buff_head process_queue; + local_lock_t process_queue_bh_lock; /* stats */ unsigned int processed; @@ -3222,13 +3232,7 @@ struct softnet_data { struct sk_buff_head xfrm_backlog; #endif /* written and read only by owning cpu: */ - struct { - u16 recursion; - u8 more; -#ifdef CONFIG_NET_EGRESS - u8 skip_txqueue; -#endif - } xmit; + struct netdev_xmit xmit; #ifdef CONFIG_RPS /* input_queue_head should be written by cpu owning this struct, * and only read by other cpus. Worth using a cache line. @@ -3256,10 +3260,18 @@ struct softnet_data { DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); +#ifndef CONFIG_PREEMPT_RT static inline int dev_recursion_level(void) { return this_cpu_read(softnet_data.xmit.recursion); } +#else +static inline int dev_recursion_level(void) +{ + return current->net_xmit.recursion; +} + +#endif void __netif_schedule(struct Qdisc *q); void netif_schedule_queue(struct netdev_queue *txq); @@ -3903,9 +3915,6 @@ int generic_hwtstamp_get_lower(struct net_device *dev, int generic_hwtstamp_set_lower(struct net_device *dev, struct kernel_hwtstamp_config *kernel_cfg, struct netlink_ext_ack *extack); -int dev_set_hwtstamp_phylib(struct net_device *dev, - struct kernel_hwtstamp_config *cfg, - struct netlink_ext_ack *extack); int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); unsigned int dev_get_flags(const struct net_device *); int __dev_change_flags(struct net_device *dev, unsigned int flags, @@ -4874,18 +4883,35 @@ static inline ktime_t netdev_get_tstamp(struct net_device *dev, return hwtstamps->hwtstamp; } -static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, - struct sk_buff *skb, struct net_device *dev, - bool more) +#ifndef CONFIG_PREEMPT_RT +static inline void netdev_xmit_set_more(bool more) { __this_cpu_write(softnet_data.xmit.more, more); - return ops->ndo_start_xmit(skb, dev); } static inline bool netdev_xmit_more(void) { return __this_cpu_read(softnet_data.xmit.more); } +#else +static inline void netdev_xmit_set_more(bool more) +{ + current->net_xmit.more = more; +} + +static inline bool netdev_xmit_more(void) +{ + return current->net_xmit.more; +} +#endif + +static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, + struct sk_buff *skb, struct net_device *dev, + bool more) +{ + netdev_xmit_set_more(more); + return ops->ndo_start_xmit(skb, dev); +} static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, bool more) diff --git a/include/linux/netdevice_xmit.h b/include/linux/netdevice_xmit.h new file mode 100644 index 000000000000..38325e070296 --- /dev/null +++ b/include/linux/netdevice_xmit.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_NETDEVICE_XMIT_H +#define _LINUX_NETDEVICE_XMIT_H + +struct netdev_xmit { + u16 recursion; + u8 more; +#ifdef CONFIG_NET_EGRESS + u8 skip_txqueue; +#endif +}; + +#endif diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 5df7340d4dab..b332c2048c75 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -47,7 +47,6 @@ struct netlink_kernel_cfg { unsigned int groups; unsigned int flags; void (*input)(struct sk_buff *skb); - struct mutex *cb_mutex; int (*bind)(struct net *net, int group); void (*unbind)(struct net *net, int group); void (*release) (struct sock *sk, unsigned long *groups); diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 0d896ce296ce..f9df88091c6d 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -367,6 +367,8 @@ enum open_delegation_type4 { NFS4_OPEN_DELEGATE_READ = 1, NFS4_OPEN_DELEGATE_WRITE = 2, NFS4_OPEN_DELEGATE_NONE_EXT = 3, /* 4.1 */ + NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG = 4, + NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG = 5, }; enum why_no_delegation4 { /* new to v4.1 */ @@ -507,6 +509,12 @@ enum { FATTR4_XATTR_SUPPORT = 82, }; +enum { + FATTR4_TIME_DELEG_ACCESS = 84, + FATTR4_TIME_DELEG_MODIFY = 85, + FATTR4_OPEN_ARGUMENTS = 86, +}; + /* * The following internal definitions enable processing the above * attribute bits within 32-bit word boundaries. @@ -586,6 +594,9 @@ enum { #define FATTR4_WORD2_SECURITY_LABEL BIT(FATTR4_SEC_LABEL - 64) #define FATTR4_WORD2_MODE_UMASK BIT(FATTR4_MODE_UMASK - 64) #define FATTR4_WORD2_XATTR_SUPPORT BIT(FATTR4_XATTR_SUPPORT - 64) +#define FATTR4_WORD2_TIME_DELEG_ACCESS BIT(FATTR4_TIME_DELEG_ACCESS - 64) +#define FATTR4_WORD2_TIME_DELEG_MODIFY BIT(FATTR4_TIME_DELEG_MODIFY - 64) +#define FATTR4_WORD2_OPEN_ARGUMENTS BIT(FATTR4_OPEN_ARGUMENTS - 64) /* MDS threshold bitmap bits */ #define THRESHOLD_RD (1UL << 0) diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 92de074e63b9..1df86ab98c77 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -140,6 +140,7 @@ struct nfs_server { struct rpc_clnt * client_acl; /* ACL RPC client handle */ struct nlm_host *nlm_host; /* NLM client handle */ struct nfs_iostats __percpu *io_stats; /* I/O statistics */ + wait_queue_head_t write_congestion_wait; /* wait until write congestion eases */ atomic_long_t writeback; /* number of writeback pages */ unsigned int write_congested;/* flag set when writeback gets too high */ unsigned int flags; /* various flags */ @@ -278,6 +279,9 @@ struct nfs_server { #define NFS_CAP_LGOPEN (1U << 5) #define NFS_CAP_CASE_INSENSITIVE (1U << 6) #define NFS_CAP_CASE_PRESERVING (1U << 7) +#define NFS_CAP_REBOOT_LAYOUTRETURN (1U << 8) +#define NFS_CAP_OPEN_XOR (1U << 12) +#define NFS_CAP_DELEGTIME (1U << 13) #define NFS_CAP_POSIX_LOCK (1U << 14) #define NFS_CAP_UIDGID_NOMAP (1U << 15) #define NFS_CAP_STATEID_NFSV41 (1U << 16) diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 1c315f854ea8..169b4ae30ff4 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -152,11 +152,8 @@ extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req); -extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); -extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req); -extern int nfs_page_group_lock_subrequests(struct nfs_page *head); extern void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, struct inode *inode); @@ -208,8 +205,8 @@ static inline struct inode *nfs_page_to_inode(const struct nfs_page *req) struct folio *folio = nfs_page_to_folio(req); if (folio == NULL) - return page_file_mapping(req->wb_page)->host; - return folio_file_mapping(folio)->host; + return req->wb_page->mapping->host; + return folio->mapping->host; } /** diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index d09b9773b20c..45623af3e7b8 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -449,6 +449,22 @@ struct stateowner_id { __u32 uniquifier; }; +struct nfs4_open_delegation { + __u32 open_delegation_type; + union { + struct { + fmode_t type; + __u32 do_recall; + nfs4_stateid stateid; + unsigned long pagemod_limit; + }; + struct { + __u32 why_no_delegation; + __u32 will_notify; + }; + }; +}; + /* * Arguments to the open call. */ @@ -468,7 +484,7 @@ struct nfs_openargs { nfs4_verifier verifier; /* EXCLUSIVE */ }; nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ - fmode_t delegation_type; /* CLAIM_PREVIOUS */ + __u32 delegation_type; /* CLAIM_PREVIOUS */ } u; const struct qstr * name; const struct nfs_server *server; /* Needed for ID mapping */ @@ -490,13 +506,10 @@ struct nfs_openres { struct nfs_fattr * f_attr; struct nfs_seqid * seqid; const struct nfs_server *server; - fmode_t delegation_type; - nfs4_stateid delegation; - unsigned long pagemod_limit; - __u32 do_recall; __u32 attrset[NFS4_BITMAP_SIZE]; struct nfs4_string *owner; struct nfs4_string *group_owner; + struct nfs4_open_delegation delegation; __u32 access_request; __u32 access_supported; __u32 access_result; @@ -609,6 +622,13 @@ struct nfs_release_lockowner_res { struct nfs4_sequence_res seq_res; }; +struct nfs4_delegattr { + struct timespec64 atime; + struct timespec64 mtime; + bool atime_set; + bool mtime_set; +}; + struct nfs4_delegreturnargs { struct nfs4_sequence_args seq_args; const struct nfs_fh *fhandle; @@ -616,6 +636,7 @@ struct nfs4_delegreturnargs { const u32 *bitmask; u32 bitmask_store[NFS_BITMASK_SZ]; struct nfs4_layoutreturn_args *lr_args; + struct nfs4_delegattr *sattr_args; }; struct nfs4_delegreturnres { @@ -624,6 +645,8 @@ struct nfs4_delegreturnres { struct nfs_server *server; struct nfs4_layoutreturn_res *lr_res; int lr_ret; + bool sattr_res; + int sattr_ret; }; /* @@ -1190,6 +1213,14 @@ struct nfs4_statfs_res { struct nfs_fsstat *fsstat; }; +struct nfs4_open_caps { + u32 oa_share_access[1]; + u32 oa_share_deny[1]; + u32 oa_share_access_want[1]; + u32 oa_open_claim[1]; + u32 oa_createmode[1]; +}; + struct nfs4_server_caps_arg { struct nfs4_sequence_args seq_args; struct nfs_fh *fhandle; @@ -1206,6 +1237,7 @@ struct nfs4_server_caps_res { u32 fh_expire_type; u32 case_insensitive; u32 case_preserving; + struct nfs4_open_caps open_caps; }; #define NFS4_PATHNAME_MAXCOMPONENTS 512 @@ -1406,7 +1438,7 @@ struct nfs41_secinfo_no_name_args { struct nfs41_test_stateid_args { struct nfs4_sequence_args seq_args; - nfs4_stateid *stateid; + nfs4_stateid stateid; }; struct nfs41_test_stateid_res { @@ -1807,7 +1839,8 @@ struct nfs_rpc_ops { int open_flags, struct iattr *iattr, int *); - int (*have_delegation)(struct inode *, fmode_t); + int (*have_delegation)(struct inode *, fmode_t, int); + int (*return_delegation)(struct inode *); struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *); struct nfs_client *(*init_client) (struct nfs_client *, const struct nfs_client_initdata *); diff --git a/include/linux/node.h b/include/linux/node.h index dfc004e4bee7..9a881c2208b3 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -16,7 +16,6 @@ #define _LINUX_NODE_H_ #include <linux/device.h> -#include <linux/cpumask.h> #include <linux/list.h> /** diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 5601d14e2886..dab6a1734a22 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -42,6 +42,17 @@ struct nsproxy { }; extern struct nsproxy init_nsproxy; +#define to_ns_common(__ns) \ + _Generic((__ns), \ + struct cgroup_namespace *: &(__ns->ns), \ + struct ipc_namespace *: &(__ns->ns), \ + struct net *: &(__ns->ns), \ + struct pid_namespace *: &(__ns->ns), \ + struct mnt_namespace *: &(__ns->ns), \ + struct time_namespace *: &(__ns->ns), \ + struct user_namespace *: &(__ns->ns), \ + struct uts_namespace *: &(__ns->ns)) + /* * A structure to encompass all bits needed to install * a partial or complete new set of namespaces. @@ -112,4 +123,6 @@ static inline void get_nsproxy(struct nsproxy *ns) refcount_inc(&ns->count); } +DEFINE_FREE(put_nsproxy, struct nsproxy *, if (_T) put_nsproxy(_T)) + #endif diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 4109f1bd6128..9f6acadfe0c8 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -620,7 +620,7 @@ enum { * * Structure used between LLDD and nvmet-fc layer to represent the exchange * context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related - * memory transfers, and its assocated cqe transfer). + * memory transfers, and its associated cqe transfer). * * The structure is allocated by the LLDD whenever a FCP CMD IU is received * from the FC link. The address of the structure is passed to the nvmet-fc @@ -920,6 +920,9 @@ struct nvmet_fc_target_port { * further references to hosthandle. * Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host(). * + * @host_traddr: called by the transport to retrieve the node name and + * port name of the host port address. + * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. @@ -975,6 +978,7 @@ struct nvmet_fc_target_template { void (*ls_abort)(struct nvmet_fc_target_port *targetport, void *hosthandle, struct nvmefc_ls_req *lsreq); void (*host_release)(void *hosthandle); + int (*host_traddr)(void *hosthandle, u64 *wwnn, u64 *wwpn); u32 max_hw_queues; u16 max_sgl_segments; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index c693ac344ec0..c12a329dd463 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -25,6 +25,9 @@ #define NVME_NSID_ALL 0xffffffff +/* Special NSSR value, 'NVMe' */ +#define NVME_SUBSYS_RESET 0x4E564D65 + enum nvme_subsys_type { /* Referral to another discovery type target subsystem */ NVME_NQN_DISC = 1, @@ -1848,6 +1851,7 @@ enum { /* * Generic Command Status: */ + NVME_SCT_GENERIC = 0x0, NVME_SC_SUCCESS = 0x0, NVME_SC_INVALID_OPCODE = 0x1, NVME_SC_INVALID_FIELD = 0x2, @@ -1895,6 +1899,7 @@ enum { /* * Command Specific Status: */ + NVME_SCT_COMMAND_SPECIFIC = 0x100, NVME_SC_CQ_INVALID = 0x100, NVME_SC_QID_INVALID = 0x101, NVME_SC_QUEUE_SIZE = 0x102, @@ -1968,6 +1973,7 @@ enum { /* * Media and Data Integrity Errors: */ + NVME_SCT_MEDIA_ERROR = 0x200, NVME_SC_WRITE_FAULT = 0x280, NVME_SC_READ_ERROR = 0x281, NVME_SC_GUARD_CHECK = 0x282, @@ -1980,6 +1986,7 @@ enum { /* * Path-related Errors: */ + NVME_SCT_PATH = 0x300, NVME_SC_INTERNAL_PATH_ERROR = 0x300, NVME_SC_ANA_PERSISTENT_LOSS = 0x301, NVME_SC_ANA_INACCESSIBLE = 0x302, @@ -1988,11 +1995,17 @@ enum { NVME_SC_HOST_PATH_ERROR = 0x370, NVME_SC_HOST_ABORTED_CMD = 0x371, - NVME_SC_CRD = 0x1800, - NVME_SC_MORE = 0x2000, - NVME_SC_DNR = 0x4000, + NVME_SC_MASK = 0x00ff, /* Status Code */ + NVME_SCT_MASK = 0x0700, /* Status Code Type */ + NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK, + + NVME_STATUS_CRD = 0x1800, /* Command Retry Delayed */ + NVME_STATUS_MORE = 0x2000, + NVME_STATUS_DNR = 0x4000, /* Do Not Retry */ }; +#define NVME_SCT(status) ((status) >> 8 & 7) + struct nvme_completion { /* * Used by Admin and Fabrics commands to return data: diff --git a/include/linux/objagg.h b/include/linux/objagg.h index 78021777df46..6df5b887dc54 100644 --- a/include/linux/objagg.h +++ b/include/linux/objagg.h @@ -8,7 +8,6 @@ struct objagg_ops { size_t obj_size; bool (*delta_check)(void *priv, const void *parent_obj, const void *obj); - int (*hints_obj_cmp)(const void *obj1, const void *obj2); void * (*delta_create)(void *priv, void *parent_obj, void *obj); void (*delta_destroy)(void *priv, void *delta_priv); void * (*root_create)(void *priv, void *obj, unsigned int root_id); diff --git a/include/linux/of.h b/include/linux/of.h index a0bedd038a05..13cf7a43b473 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1639,7 +1639,7 @@ int of_changeset_add_prop_string(struct of_changeset *ocs, int of_changeset_add_prop_string_array(struct of_changeset *ocs, struct device_node *np, const char *prop_name, - const char **str_array, size_t sz); + const char * const *str_array, size_t sz); int of_changeset_add_prop_u32_array(struct of_changeset *ocs, struct device_node *np, const char *prop_name, @@ -1652,6 +1652,9 @@ static inline int of_changeset_add_prop_u32(struct of_changeset *ocs, return of_changeset_add_prop_u32_array(ocs, np, prop_name, &val, 1); } +int of_changeset_add_prop_bool(struct of_changeset *ocs, struct device_node *np, + const char *prop_name); + #else /* CONFIG_OF_DYNAMIC */ static inline int of_reconfig_notifier_register(struct notifier_block *nb) { diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index b9e914e1face..5769fe6e4950 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -30,16 +30,11 @@ * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying * to read/write these pages might end badly. Don't touch! * - The zero page(s) - * - Pages not added to the page allocator when onlining a section because - * they were excluded via the online_page_callback() or because they are - * PG_hwpoison. * - Pages allocated in the context of kexec/kdump (loaded kernel image, * control pages, vmcoreinfo) * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are * not marked PG_reserved (as they might be in use by somebody else who does * not respect the caching strategy). - * - Pages part of an offline section (struct pages of offline sections should - * not be trusted as they will be initialized when first onlined). * - MCA pages on ia64 * - Pages holding CPU notes for POWER Firmware Assisted Dump * - Device memory (e.g. PMEM, DAX, HMM) @@ -616,11 +611,6 @@ PAGEFLAG_FALSE(Uncached, uncached) PAGEFLAG(HWPoison, hwpoison, PF_ANY) TESTSCFLAG(HWPoison, hwpoison, PF_ANY) #define __PG_HWPOISON (1UL << PG_hwpoison) -#define MAGIC_HWPOISON 0x48575053U /* HWPS */ -extern void SetPageHWPoisonTakenOff(struct page *page); -extern void ClearPageHWPoisonTakenOff(struct page *page); -extern bool take_page_off_buddy(struct page *page); -extern bool put_page_back_buddy(struct page *page); #else PAGEFLAG_FALSE(HWPoison, hwpoison) #define __PG_HWPOISON 0 @@ -655,27 +645,28 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) #endif /* - * On an anonymous page mapped into a user virtual memory area, - * page->mapping points to its anon_vma, not to a struct address_space; + * On an anonymous folio mapped into a user virtual memory area, + * folio->mapping points to its anon_vma, not to a struct address_space; * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. * * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON - * bit; and then page->mapping points, not to an anon_vma, but to a private + * bit; and then folio->mapping points, not to an anon_vma, but to a private * structure which KSM associates with that merged page. See ksm.h. * * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable - * page and then page->mapping points to a struct movable_operations. + * page and then folio->mapping points to a struct movable_operations. * - * Please note that, confusingly, "page_mapping" refers to the inode - * address_space which maps the page from disk; whereas "page_mapped" - * refers to user virtual address space into which the page is mapped. + * Please note that, confusingly, "folio_mapping" refers to the inode + * address_space which maps the folio from disk; whereas "folio_mapped" + * refers to user virtual address space into which the folio is mapped. * * For slab pages, since slab reuses the bits in struct page to store its - * internal states, the page->mapping does not exist as such, nor do these - * flags below. So in order to avoid testing non-existent bits, please - * make sure that PageSlab(page) actually evaluates to false before calling - * the following functions (e.g., PageAnon). See mm/slab.h. + * internal states, the folio->mapping does not exist as such, nor do + * these flags below. So in order to avoid testing non-existent bits, + * please make sure that folio_test_slab(folio) actually evaluates to + * false before calling the following functions (e.g., folio_test_anon). + * See mm/slab.h. */ #define PAGE_MAPPING_ANON 0x1 #define PAGE_MAPPING_MOVABLE 0x2 @@ -945,22 +936,28 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) */ enum pagetype { - PG_buddy = 0x00000080, - PG_offline = 0x00000100, - PG_table = 0x00000200, - PG_guard = 0x00000400, - PG_hugetlb = 0x00000800, - PG_slab = 0x00001000, - - PAGE_TYPE_BASE = 0xf0000000, - /* Reserve 0x0000007f to catch underflows of _mapcount */ - PAGE_MAPCOUNT_RESERVE = -128, + PG_buddy = 0x40000000, + PG_offline = 0x20000000, + PG_table = 0x10000000, + PG_guard = 0x08000000, + PG_hugetlb = 0x04000000, + PG_slab = 0x02000000, + PG_zsmalloc = 0x01000000, + + PAGE_TYPE_BASE = 0x80000000, + + /* + * Reserve 0xffff0000 - 0xfffffffe to catch _mapcount underflows and + * allow owners that set a type to reuse the lower 16 bit for their own + * purposes. + */ + PAGE_MAPCOUNT_RESERVE = ~0x0000ffff, }; #define PageType(page, flag) \ - ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) + ((READ_ONCE(page->page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) #define folio_test_type(folio, flag) \ - ((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) + ((READ_ONCE(folio->page.page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) static inline int page_type_has_type(unsigned int page_type) { @@ -969,7 +966,7 @@ static inline int page_type_has_type(unsigned int page_type) static inline int page_has_type(const struct page *page) { - return page_type_has_type(page->page_type); + return page_type_has_type(READ_ONCE(page->page_type)); } #define FOLIO_TYPE_OPS(lname, fname) \ @@ -1018,15 +1015,22 @@ PAGE_TYPE_OPS(Buddy, buddy, buddy) * The content of these pages is effectively stale. Such pages should not * be touched (read/write/dump/save) except by their owner. * + * When a memory block gets onlined, all pages are initialized with a + * refcount of 1 and PageOffline(). generic_online_page() will + * take care of clearing PageOffline(). + * * If a driver wants to allow to offline unmovable PageOffline() pages without * putting them back to the buddy, it can do so via the memory notifier by * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() - * pages (now with a reference count of zero) are treated like free pages, - * allowing the containing memory block to get offlined. A driver that + * pages (now with a reference count of zero) are treated like free (unmanaged) + * pages, allowing the containing memory block to get offlined. A driver that * relies on this feature is aware that re-onlining the memory block will - * require to re-set the pages PageOffline() and not giving them to the - * buddy via online_page_callback_t. + * require not giving them to the buddy via generic_online_page(). + * + * Memory offlining code will not adjust the managed page count for any + * PageOffline() pages, treating them like they were never exposed to the + * buddy using generic_online_page(). * * There are drivers that mark a page PageOffline() and expect there won't be * any further access to page content. PFN walkers that read content of random @@ -1070,6 +1074,8 @@ FOLIO_TYPE_OPS(hugetlb, hugetlb) FOLIO_TEST_FLAG_FALSE(hugetlb) #endif +PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc) + /** * PageHuge - Determine if the page belongs to hugetlbfs * @page: The page to test. diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 8cd858d912c4..904c52f97284 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -81,4 +81,8 @@ static inline void page_counter_reset_watermark(struct page_counter *counter) counter->watermark = page_counter_read(counter); } +void page_counter_calculate_protection(struct page_counter *root, + struct page_counter *counter, + bool recursive_protection); + #endif /* _LINUX_PAGE_COUNTER_H */ diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index 1acf5bac7f50..8c236c651d1d 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -230,7 +230,13 @@ static inline int folio_ref_dec_return(struct folio *folio) static inline bool page_ref_add_unless(struct page *page, int nr, int u) { - bool ret = atomic_add_unless(&page->_refcount, nr, u); + bool ret = false; + + rcu_read_lock(); + /* avoid writing to the vmemmap area being remapped */ + if (!page_is_fake_head(page) && page_ref_count(page) != u) + ret = atomic_add_unless(&page->_refcount, nr, u); + rcu_read_unlock(); if (page_ref_tracepoint_active(page_ref_mod_unless)) __page_ref_mod_unless(page, nr, ret); @@ -258,54 +264,9 @@ static inline bool folio_try_get(struct folio *folio) return folio_ref_add_unless(folio, 1, 0); } -static inline bool folio_ref_try_add_rcu(struct folio *folio, int count) -{ -#ifdef CONFIG_TINY_RCU - /* - * The caller guarantees the folio will not be freed from interrupt - * context, so (on !SMP) we only need preemption to be disabled - * and TINY_RCU does that for us. - */ -# ifdef CONFIG_PREEMPT_COUNT - VM_BUG_ON(!in_atomic() && !irqs_disabled()); -# endif - VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio); - folio_ref_add(folio, count); -#else - if (unlikely(!folio_ref_add_unless(folio, count, 0))) { - /* Either the folio has been freed, or will be freed. */ - return false; - } -#endif - return true; -} - -/** - * folio_try_get_rcu - Attempt to increase the refcount on a folio. - * @folio: The folio. - * - * This is a version of folio_try_get() optimised for non-SMP kernels. - * If you are still holding the rcu_read_lock() after looking up the - * page and know that the page cannot have its refcount decreased to - * zero in interrupt context, you can use this instead of folio_try_get(). - * - * Example users include get_user_pages_fast() (as pages are not unmapped - * from interrupt context) and the page cache lookups (as pages are not - * truncated from interrupt context). We also know that pages are not - * frozen in interrupt context for the purposes of splitting or migration. - * - * You can also use this function if you're holding a lock that prevents - * pages being frozen & removed; eg the i_pages lock for the page cache - * or the mmap_lock or page table lock for page tables. In this case, - * it will always succeed, and you could have used a plain folio_get(), - * but it's sometimes more convenient to have a common function called - * from both locked and RCU-protected contexts. - * - * Return: True if the reference count was successfully incremented. - */ -static inline bool folio_try_get_rcu(struct folio *folio) +static inline bool folio_ref_try_add(struct folio *folio, int count) { - return folio_ref_try_add_rcu(folio, 1); + return folio_ref_add_unless(folio, count, 0); } static inline int page_ref_freeze(struct page *page, int count) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 59f1df0cde5a..483a191bb4df 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -208,7 +208,8 @@ enum mapping_flags { AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ AS_STABLE_WRITES, /* must wait for writeback before modifying folio contents */ - AS_UNMOVABLE, /* The mapping cannot be moved, ever */ + AS_INACCESSIBLE, /* Do not attempt direct R/W access to the mapping, + including to move the mapping */ }; /** @@ -309,20 +310,20 @@ static inline void mapping_clear_stable_writes(struct address_space *mapping) clear_bit(AS_STABLE_WRITES, &mapping->flags); } -static inline void mapping_set_unmovable(struct address_space *mapping) +static inline void mapping_set_inaccessible(struct address_space *mapping) { /* - * It's expected unmovable mappings are also unevictable. Compaction + * It's expected inaccessible mappings are also unevictable. Compaction * migrate scanner (isolate_migratepages_block()) relies on this to * reduce page locking. */ set_bit(AS_UNEVICTABLE, &mapping->flags); - set_bit(AS_UNMOVABLE, &mapping->flags); + set_bit(AS_INACCESSIBLE, &mapping->flags); } -static inline bool mapping_unmovable(struct address_space *mapping) +static inline bool mapping_inaccessible(struct address_space *mapping) { - return test_bit(AS_UNMOVABLE, &mapping->flags); + return test_bit(AS_INACCESSIBLE, &mapping->flags); } static inline gfp_t mapping_gfp_mask(struct address_space * mapping) @@ -354,11 +355,18 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) * a good order (that's 1MB if you're using 4kB pages) */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER +#define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER #else -#define MAX_PAGECACHE_ORDER 8 +#define PREFERRED_MAX_PAGECACHE_ORDER 8 #endif +/* + * xas_split_alloc() does not support arbitrary orders. This implies no + * 512MB THP on ARM64 with 64KB base page size. + */ +#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1) +#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER) + /** * mapping_set_large_folios() - Indicate the file supports large folios. * @mapping: The file. @@ -426,7 +434,6 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping) #endif } -struct address_space *page_mapping(struct page *); struct address_space *folio_mapping(struct folio *); struct address_space *swapcache_mapping(struct folio *); @@ -792,7 +799,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, mapping_gfp_mask(mapping)); } -#define swapcache_index(folio) __page_file_index(&(folio)->page) +extern pgoff_t __folio_swap_cache_index(struct folio *folio); /** * folio_index - File index of a folio. @@ -807,9 +814,9 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, */ static inline pgoff_t folio_index(struct folio *folio) { - if (unlikely(folio_test_swapcache(folio))) - return swapcache_index(folio); - return folio->index; + if (unlikely(folio_test_swapcache(folio))) + return __folio_swap_cache_index(folio); + return folio->index; } /** @@ -932,11 +939,6 @@ static inline loff_t page_offset(struct page *page) return ((loff_t)page->index) << PAGE_SHIFT; } -static inline loff_t page_file_offset(struct page *page) -{ - return ((loff_t)page_index(page)) << PAGE_SHIFT; -} - /** * folio_pos - Returns the byte position of this folio in its file. * @folio: The folio. @@ -946,18 +948,6 @@ static inline loff_t folio_pos(struct folio *folio) return page_offset(&folio->page); } -/** - * folio_file_pos - Returns the byte position of this folio in its file. - * @folio: The folio. - * - * This differs from folio_pos() for folios which belong to a swap file. - * NFS is the only filesystem today which needs to use folio_file_pos(). - */ -static inline loff_t folio_file_pos(struct folio *folio) -{ - return page_file_offset(&folio->page); -} - /* * Get the offset in PAGE_SIZE (even for hugetlb folios). */ @@ -1311,8 +1301,7 @@ void page_cache_sync_readahead(struct address_space *mapping, * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @file: Used by the filesystem for authentication. - * @folio: The folio at @index which triggered the readahead call. - * @index: Index of first page to be read. + * @folio: The folio which triggered the readahead call. * @req_count: Total number of pages being read by the caller. * * page_cache_async_readahead() should be called when a page is used which @@ -1323,9 +1312,9 @@ void page_cache_sync_readahead(struct address_space *mapping, static inline void page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, - struct folio *folio, pgoff_t index, unsigned long req_count) + struct folio *folio, unsigned long req_count) { - DEFINE_READAHEAD(ractl, file, ra, mapping, index); + DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index); page_cache_async_ra(&ractl, folio, req_count); } diff --git a/include/linux/panic.h b/include/linux/panic.h index 6717b15e798c..3130e0b5116b 100644 --- a/include/linux/panic.h +++ b/include/linux/panic.h @@ -77,9 +77,10 @@ static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) #define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1) struct taint_flag { - char c_true; /* character printed when tainted */ - char c_false; /* character printed when not tainted */ - bool module; /* also show as a per-module taint flag */ + char c_true; /* character printed when tainted */ + char c_false; /* character printed when not tainted */ + bool module; /* also show as a per-module taint flag */ + const char *desc; /* verbose description of the set taint flag */ }; extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT]; @@ -90,6 +91,7 @@ enum lockdep_ok { }; extern const char *print_tainted(void); +extern const char *print_tainted_verbose(void); extern void add_taint(unsigned flag, enum lockdep_ok); extern int test_taint(unsigned flag); extern unsigned long get_taint(void); diff --git a/include/linux/parport.h b/include/linux/parport.h index fff39bc30629..464c2ad28039 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -252,13 +252,10 @@ struct parport { struct parport_driver { const char *name; - void (*attach) (struct parport *); void (*detach) (struct parport *); void (*match_port)(struct parport *); int (*probe)(struct pardevice *); struct device_driver driver; - bool devmodel; - struct list_head list; }; #define to_parport_driver(n) container_of(n, struct parport_driver, driver) @@ -301,9 +298,6 @@ int __must_check __parport_register_driver(struct parport_driver *, * to receive notifications about ports being found in the * system, as well as ports no longer available. * - * If devmodel is true then the new device model is used - * for registration. - * * The @driver structure is allocated by the caller and must not be * deallocated until after calling parport_unregister_driver(). * diff --git a/include/linux/path.h b/include/linux/path.h index 475225a03d0d..ca073e70decd 100644 --- a/include/linux/path.h +++ b/include/linux/path.h @@ -24,4 +24,13 @@ static inline void path_put_init(struct path *path) *path = (struct path) { }; } +/* + * Cleanup macro for use with __free(path_put). Avoids dereference and + * copying @path unlike DEFINE_FREE(). path_put() will handle the empty + * path correctly just ensure @path is initialized: + * + * struct path path __free(path_put) = {}; + */ +#define __free_path_put path_put + #endif /* _LINUX_PATH_H */ diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index acc5f96161fe..85bdf2adb760 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -197,6 +197,8 @@ struct pci_epc_features { #define to_pci_epc(device) container_of((device), struct pci_epc, dev) +#ifdef CONFIG_PCI_ENDPOINT + #define pci_epc_create(dev, ops) \ __pci_epc_create((dev), (ops), THIS_MODULE) #define devm_pci_epc_create(dev, ops) \ @@ -226,7 +228,8 @@ void pci_epc_linkup(struct pci_epc *epc); void pci_epc_linkdown(struct pci_epc *epc); void pci_epc_init_notify(struct pci_epc *epc); void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf); -void pci_epc_bme_notify(struct pci_epc *epc); +void pci_epc_deinit_notify(struct pci_epc *epc); +void pci_epc_bus_master_enable_notify(struct pci_epc *epc); void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, enum pci_epc_interface_type type); int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, @@ -272,4 +275,14 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size); void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, void __iomem *virt_addr, size_t size); + +#else +static inline void pci_epc_init_notify(struct pci_epc *epc) +{ +} + +static inline void pci_epc_deinit_notify(struct pci_epc *epc) +{ +} +#endif /* CONFIG_PCI_ENDPOINT */ #endif /* __LINUX_PCI_EPC_H */ diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index adee6a1b35db..0639d4dc8986 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -70,16 +70,18 @@ struct pci_epf_ops { /** * struct pci_epc_event_ops - Callbacks for capturing the EPC events - * @core_init: Callback for the EPC initialization complete event + * @epc_init: Callback for the EPC initialization complete event + * @epc_deinit: Callback for the EPC deinitialization event * @link_up: Callback for the EPC link up event * @link_down: Callback for the EPC link down event - * @bme: Callback for the EPC BME (Bus Master Enable) event + * @bus_master_enable: Callback for the EPC Bus Master Enable event */ struct pci_epc_event_ops { - int (*core_init)(struct pci_epf *epf); + int (*epc_init)(struct pci_epf *epf); + void (*epc_deinit)(struct pci_epf *epf); int (*link_up)(struct pci_epf *epf); int (*link_down)(struct pci_epf *epf); - int (*bme)(struct pci_epf *epf); + int (*bus_master_enable)(struct pci_epf *epf); }; /** diff --git a/include/linux/pci-pwrctl.h b/include/linux/pci-pwrctl.h new file mode 100644 index 000000000000..45e9cfe740e4 --- /dev/null +++ b/include/linux/pci-pwrctl.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Linaro Ltd. + */ + +#ifndef __PCI_PWRCTL_H__ +#define __PCI_PWRCTL_H__ + +#include <linux/notifier.h> + +struct device; +struct device_link; + +/* + * This is a simple framework for solving the issue of PCI devices that require + * certain resources (regulators, GPIOs, clocks) to be enabled before the + * device can actually be detected on the PCI bus. + * + * The idea is to reuse the platform bus to populate OF nodes describing the + * PCI device and its resources, let these platform devices probe and enable + * relevant resources and then trigger a rescan of the PCI bus allowing for the + * same device (with a second associated struct device) to be registered with + * the PCI subsystem. + * + * To preserve a correct hierarchy for PCI power management and device reset, + * we create a device link between the power control platform device (parent) + * and the supplied PCI device (child). + */ + +/** + * struct pci_pwrctl - PCI device power control context. + * @dev: Address of the power controlling device. + * + * An object of this type must be allocated by the PCI power control device and + * passed to the pwrctl subsystem to trigger a bus rescan and setup a device + * link with the device once it's up. + */ +struct pci_pwrctl { + struct device *dev; + + /* Private: don't use. */ + struct notifier_block nb; + struct device_link *link; +}; + +int pci_pwrctl_device_set_ready(struct pci_pwrctl *pwrctl); +void pci_pwrctl_device_unset_ready(struct pci_pwrctl *pwrctl); +int devm_pci_pwrctl_device_set_ready(struct device *dev, + struct pci_pwrctl *pwrctl); + +#endif /* __PCI_PWRCTL_H__ */ diff --git a/include/linux/pci.h b/include/linux/pci.h index cafc5ab1cbcb..9e36b6c1810e 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -367,10 +367,11 @@ struct pci_dev { this is D0-D3, D0 being fully functional, and D3 being off. */ u8 pm_cap; /* PM capability offset */ - unsigned int imm_ready:1; /* Supports Immediate Readiness */ unsigned int pme_support:5; /* Bitmask of states from which PME# can be generated */ unsigned int pme_poll:1; /* Poll device's PME status bit */ + unsigned int pinned:1; /* Whether this dev is pinned */ + unsigned int imm_ready:1; /* Supports Immediate Readiness */ unsigned int d1_support:1; /* Low power state D1 is supported */ unsigned int d2_support:1; /* Low power state D2 is supported */ unsigned int no_d1d2:1; /* D1 and D2 are forbidden */ @@ -1549,10 +1550,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, resource_size_t size, resource_size_t align, resource_size_t min, unsigned long type_mask, - resource_size_t (*alignf)(void *, - const struct resource *, - resource_size_t, - resource_size_t), + resource_alignf alignf, void *alignf_data); @@ -2300,6 +2298,8 @@ int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name); int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, const char *name); void pcim_iounmap_regions(struct pci_dev *pdev, int mask); +void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar, + unsigned long offset, unsigned long len); extern int pci_pci_problems; #define PCIPCI_FAIL 1 /* No PCI PCI DMA */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 942a587bb97e..e388c8b1cbc2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2126,6 +2126,8 @@ #define PCI_VENDOR_ID_CHELSIO 0x1425 +#define PCI_VENDOR_ID_EDIMAX 0x1432 + #define PCI_VENDOR_ID_ADLINK 0x144a #define PCI_VENDOR_ID_SAMSUNG 0x144d @@ -2599,6 +2601,8 @@ #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_VENDOR_ID_META 0x1d9b + #define PCI_VENDOR_ID_FUNGIBLE 0x1dad #define PCI_VENDOR_ID_HXT 0x1dbf @@ -3112,6 +3116,7 @@ #define PCI_DEVICE_ID_INTEL_HDA_LNL_P 0xa828 #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 #define PCI_DEVICE_ID_INTEL_HDA_BMG 0xe2f7 +#define PCI_DEVICE_ID_INTEL_HDA_PTL 0xe428 #define PCI_DEVICE_ID_INTEL_HDA_CML_R 0xf0c8 #define PCI_DEVICE_ID_INTEL_HDA_RKL_S 0xf1c8 diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index da3a6c30f6d2..b4a4eb6c8866 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -7,11 +7,12 @@ #ifndef __LINUX_PCS_XPCS_H #define __LINUX_PCS_XPCS_H +#include <linux/clk.h> +#include <linux/fwnode.h> +#include <linux/mdio.h> #include <linux/phy.h> #include <linux/phylink.h> - -#define NXP_SJA1105_XPCS_ID 0x00000010 -#define NXP_SJA1110_XPCS_ID 0x00000020 +#include <linux/types.h> /* AN mode */ #define DW_AN_C73 1 @@ -20,20 +21,46 @@ #define DW_AN_C37_1000BASEX 4 #define DW_10GBASER 5 -/* device vendor OUI */ -#define DW_OUI_WX 0x0018fc80 +struct dw_xpcs_desc; + +enum dw_xpcs_pcs_id { + DW_XPCS_ID_NATIVE = 0, + NXP_SJA1105_XPCS_ID = 0x00000010, + NXP_SJA1110_XPCS_ID = 0x00000020, + DW_XPCS_ID = 0x7996ced0, + DW_XPCS_ID_MASK = 0xffffffff, +}; -/* dev_flag */ -#define DW_DEV_TXGBE BIT(0) +enum dw_xpcs_pma_id { + DW_XPCS_PMA_ID_NATIVE = 0, + DW_XPCS_PMA_GEN1_3G_ID, + DW_XPCS_PMA_GEN2_3G_ID, + DW_XPCS_PMA_GEN2_6G_ID, + DW_XPCS_PMA_GEN4_3G_ID, + DW_XPCS_PMA_GEN4_6G_ID, + DW_XPCS_PMA_GEN5_10G_ID, + DW_XPCS_PMA_GEN5_12G_ID, + WX_TXGBE_XPCS_PMA_10G_ID = 0x0018fc80, +}; -struct xpcs_id; +struct dw_xpcs_info { + u32 pcs; + u32 pma; +}; + +enum dw_xpcs_clock { + DW_XPCS_CORE_CLK, + DW_XPCS_PAD_CLK, + DW_XPCS_NUM_CLKS, +}; struct dw_xpcs { + struct dw_xpcs_info info; + const struct dw_xpcs_desc *desc; struct mdio_device *mdiodev; - const struct xpcs_id *id; + struct clk_bulk_data clks[DW_XPCS_NUM_CLKS]; struct phylink_pcs pcs; phy_interface_t interface; - int dev_flag; }; int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); @@ -46,6 +73,8 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable); struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, phy_interface_t interface); +struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode, + phy_interface_t interface); void xpcs_destroy(struct dw_xpcs *xpcs); #endif /* __LINUX_PCS_XPCS_H */ diff --git a/include/linux/peci-cpu.h b/include/linux/peci-cpu.h index ff8ae9c26c80..601cdd086bf6 100644 --- a/include/linux/peci-cpu.h +++ b/include/linux/peci-cpu.h @@ -6,6 +6,30 @@ #include <linux/types.h> +/* Copied from x86 <asm/processor.h> */ +#define X86_VENDOR_INTEL 0 + +/* Copied from x86 <asm/cpu_device_id.h> */ +#define VFM_MODEL_BIT 0 +#define VFM_FAMILY_BIT 8 +#define VFM_VENDOR_BIT 16 +#define VFM_RSVD_BIT 24 + +#define VFM_MODEL_MASK GENMASK(VFM_FAMILY_BIT - 1, VFM_MODEL_BIT) +#define VFM_FAMILY_MASK GENMASK(VFM_VENDOR_BIT - 1, VFM_FAMILY_BIT) +#define VFM_VENDOR_MASK GENMASK(VFM_RSVD_BIT - 1, VFM_VENDOR_BIT) + +#define VFM_MODEL(vfm) (((vfm) & VFM_MODEL_MASK) >> VFM_MODEL_BIT) +#define VFM_FAMILY(vfm) (((vfm) & VFM_FAMILY_MASK) >> VFM_FAMILY_BIT) +#define VFM_VENDOR(vfm) (((vfm) & VFM_VENDOR_MASK) >> VFM_VENDOR_BIT) + +#define VFM_MAKE(_vendor, _family, _model) ( \ + ((_model) << VFM_MODEL_BIT) | \ + ((_family) << VFM_FAMILY_BIT) | \ + ((_vendor) << VFM_VENDOR_BIT) \ +) +/* End of copied code */ + #include "../../arch/x86/include/asm/intel-family.h" #define PECI_PCS_PKG_ID 0 /* Package Identifier Read */ diff --git a/include/linux/peci.h b/include/linux/peci.h index 90e241458ef6..3e0bc37591d6 100644 --- a/include/linux/peci.h +++ b/include/linux/peci.h @@ -59,8 +59,7 @@ static inline struct peci_controller *to_peci_controller(void *d) * struct peci_device - PECI device * @dev: device object to register PECI device to the device model * @info: PECI device characteristics - * @info.family: device family - * @info.model: device model + * @info.x86_vfm: device vendor-family-model * @info.peci_revision: PECI revision supported by the PECI device * @info.socket_id: the socket ID represented by the PECI device * @addr: address used on the PECI bus connected to the parent controller @@ -73,8 +72,7 @@ static inline struct peci_controller *to_peci_controller(void *d) struct peci_device { struct device dev; struct { - u16 family; - u8 model; + u32 x86_vfm; u8 peci_revision; u8 socket_id; } info; diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index ec3573119923..8efce7414fad 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -475,6 +475,12 @@ do { \ raw_cpu_cmpxchg(pcp, oval, nval); \ }) +#define __this_cpu_try_cmpxchg(pcp, ovalp, nval) \ +({ \ + __this_cpu_preempt_check("try_cmpxchg"); \ + raw_cpu_try_cmpxchg(pcp, ovalp, nval); \ +}) + #define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) #define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) #define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 03053de557cf..4b2047b78b67 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -6,7 +6,6 @@ #include <linux/mmdebug.h> #include <linux/preempt.h> #include <linux/smp.h> -#include <linux/cpumask.h> #include <linux/pfn.h> #include <linux/init.h> #include <linux/cleanup.h> diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index 46377e134d67..7867db04ec98 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -309,4 +309,6 @@ } \ } while (0) +#include <asm/arm_pmuv3.h> + #endif diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a5304ae8c654..65ece0d5b4b6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -781,11 +781,12 @@ struct perf_event { unsigned int pending_wakeup; unsigned int pending_kill; unsigned int pending_disable; - unsigned int pending_sigtrap; unsigned long pending_addr; /* SIGTRAP */ struct irq_work pending_irq; + struct irq_work pending_disable_irq; struct callback_head pending_task; unsigned int pending_work; + struct rcuwait pending_work_wait; atomic_t event_limit; @@ -962,7 +963,7 @@ struct perf_event_context { struct rcu_head rcu_head; /* - * Sum (event->pending_sigtrap + event->pending_work) + * Sum (event->pending_work + event->pending_work) * * The SIGTRAP is targeted at ctx->task, as such it won't do changing * that until the signal is delivered. @@ -970,12 +971,6 @@ struct perf_event_context { local_t nr_pending; }; -/* - * Number of contexts where an event can trigger: - * task, softirq, hardirq, nmi. - */ -#define PERF_NR_CONTEXTS 4 - struct perf_cpu_pmu_context { struct perf_event_pmu_context epc; struct perf_event_pmu_context *task_epc; diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 9cacadbd61f8..18cd0c0c73d9 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -15,7 +15,7 @@ extern struct page_ext_operations page_alloc_tagging_ops; static inline union codetag_ref *codetag_ref_from_page_ext(struct page_ext *page_ext) { - return (void *)page_ext + page_alloc_tagging_ops.offset; + return (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops); } static inline struct page_ext *page_ext_from_codetag_ref(union codetag_ref *ref) @@ -71,6 +71,7 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) static inline void pgalloc_tag_split(struct page *page, unsigned int nr) { int i; + struct page_ext *first_page_ext; struct page_ext *page_ext; union codetag_ref *ref; struct alloc_tag *tag; @@ -78,7 +79,7 @@ static inline void pgalloc_tag_split(struct page *page, unsigned int nr) if (!mem_alloc_profiling_enabled()) return; - page_ext = page_ext_get(page); + first_page_ext = page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; @@ -94,7 +95,7 @@ static inline void pgalloc_tag_split(struct page *page, unsigned int nr) page_ext = page_ext_next(page_ext); } out: - page_ext_put(page_ext); + page_ext_put(first_page_ext); } static inline struct alloc_tag *pgalloc_tag_get(struct page *page) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 18019f037bae..2a6a3cccfc36 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -729,13 +729,18 @@ static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, * fault. This function updates TLB only, do nothing with cache or others. * It is the difference with function update_mmu_cache. */ -#ifndef __HAVE_ARCH_UPDATE_MMU_TLB +#ifndef update_mmu_tlb_range +static inline void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr) +{ +} +#endif + static inline void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + update_mmu_tlb_range(vma, address, ptep, 1); } -#define __HAVE_ARCH_UPDATE_MMU_TLB -#endif /* * Some architectures may be able to avoid expensive synchronization @@ -1084,6 +1089,15 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) }) #ifndef __HAVE_ARCH_DO_SWAP_PAGE +static inline void arch_do_swap_page_nr(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte, + int nr) +{ + +} +#else /* * Some architectures support metadata associated with a page. When a * page is being swapped out, this metadata must be saved so it can be @@ -1092,12 +1106,17 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) * page as metadata for the page. arch_do_swap_page() can restore this * metadata when a page is swapped back in. */ -static inline void arch_do_swap_page(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long addr, - pte_t pte, pte_t oldpte) -{ - +static inline void arch_do_swap_page_nr(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte, + int nr) +{ + for (int i = 0; i < nr; i++) { + arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE, + pte_advance_pfn(pte, i), + pte_advance_pfn(oldpte, i)); + } } #endif @@ -1888,9 +1907,12 @@ typedef unsigned int pgtbl_mod_mask; #ifndef pmd_leaf_size #define pmd_leaf_size(x) PMD_SIZE #endif +#ifndef __pte_leaf_size #ifndef pte_leaf_size #define pte_leaf_size(x) PAGE_SIZE #endif +#define __pte_leaf_size(x,y) pte_leaf_size(y) +#endif /* * We always define pmd_pfn for all archs as it's used in lots of generic diff --git a/include/linux/phy.h b/include/linux/phy.h index e6e83304558e..04ae5c811cfb 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -128,6 +128,7 @@ extern const int phy_10gbit_features_array[1]; * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN + * @PHY_INTERFACE_MODE_10G_QXGMII: 10G-QXGMII - 4 ports over 10G USXGMII * @PHY_INTERFACE_MODE_MAX: Book keeping * * Describes the interface between the MAC and PHY. @@ -168,6 +169,7 @@ typedef enum { PHY_INTERFACE_MODE_10GKR, PHY_INTERFACE_MODE_QUSGMII, PHY_INTERFACE_MODE_1000BASEKX, + PHY_INTERFACE_MODE_10G_QXGMII, PHY_INTERFACE_MODE_MAX, } phy_interface_t; @@ -289,6 +291,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "100base-x"; case PHY_INTERFACE_MODE_QUSGMII: return "qusgmii"; + case PHY_INTERFACE_MODE_10G_QXGMII: + return "10g-qxgmii"; default: return "unknown"; } @@ -612,6 +616,8 @@ struct macsec_ops; * handling shall be postponed until PHY has resumed * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended, * requiring a rerun of the interrupt handler after resume + * @default_timestamp: Flag indicating whether we are using the phy + * timestamp as the default one * @interface: enum phy_interface_t value * @possible_interfaces: bitmap if interface modes that the attached PHY * will switch between depending on media speed. @@ -677,6 +683,8 @@ struct phy_device { unsigned irq_suspended:1; unsigned irq_rerun:1; + unsigned default_timestamp:1; + int rate_matching; enum phy_state state; @@ -1122,7 +1130,7 @@ struct phy_driver { u8 index, enum led_brightness value); /** - * @led_blink_set: Set a PHY LED brightness. Index indicates + * @led_blink_set: Set a PHY LED blinking. Index indicates * which of the PHYs led should be configured to blink. Delays * are in milliseconds and if both are zero then a sensible * default should be chosen. The call should adjust the @@ -1610,7 +1618,7 @@ static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb, } static inline int phy_ts_info(struct phy_device *phydev, - struct ethtool_ts_info *tsinfo) + struct kernel_ethtool_ts_info *tsinfo) { return phydev->mii_ts->ts_info(phydev->mii_ts, tsinfo); } @@ -1622,6 +1630,21 @@ static inline void phy_txtstamp(struct phy_device *phydev, struct sk_buff *skb, } /** + * phy_is_default_hwtstamp - Is the PHY hwtstamp the default timestamp + * @phydev: Pointer to phy_device + * + * This is used to get default timestamping device taking into account + * the new API choice, which is selecting the timestamping from MAC by + * default if the phydev does not have default_timestamp flag enabled. + * + * Return: True if phy is the default hw timestamp, false otherwise. + */ +static inline bool phy_is_default_hwtstamp(struct phy_device *phydev) +{ + return phy_has_hwtstamp(phydev) && phydev->default_timestamp; +} + +/** * phy_is_internal - Convenience function for testing if a PHY is internal * @phydev: the phy_device struct */ diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 5ea6b2ad2396..2381e07429a2 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -141,7 +141,8 @@ enum phylink_op_type { * @mac_requires_rxc: if true, the MAC always requires a receive clock from PHY. * The PHY driver should start the clock signal as soon as * possible and avoid stopping it during suspend events. - * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND + * @default_an_inband: if true, defaults to MLO_AN_INBAND rather than + * MLO_AN_PHY. A fixed-link specification will override. * @get_fixed_state: callback to execute to determine the fixed link state, * if MAC link is at %MLO_AN_FIXED mode. * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx @@ -154,7 +155,7 @@ struct phylink_config { bool poll_fixed_state; bool mac_managed_pm; bool mac_requires_rxc; - bool ovr_an_inband; + bool default_an_inband; void (*get_fixed_state)(struct phylink_config *config, struct phylink_link_state *state); DECLARE_PHY_INTERFACE_MASK(supported_interfaces); @@ -653,6 +654,7 @@ static inline int phylink_get_link_timer_ns(phy_interface_t interface) case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: case PHY_INTERFACE_MODE_USXGMII: + case PHY_INTERFACE_MODE_10G_QXGMII: return 1600000; case PHY_INTERFACE_MODE_1000BASEX: diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index ecc47d5fe239..e574b790be6f 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -3463,6 +3463,34 @@ union __ec_align_offset1 ec_response_get_next_data_v1 { }; BUILD_ASSERT(sizeof(union ec_response_get_next_data_v1) == 16); +union __ec_align_offset1 ec_response_get_next_data_v3 { + uint8_t key_matrix[18]; + + /* Unaligned */ + uint32_t host_event; + uint64_t host_event64; + + struct __ec_todo_unpacked { + /* For aligning the fifo_info */ + uint8_t reserved[3]; + struct ec_response_motion_sense_fifo_info info; + } sensor_fifo; + + uint32_t buttons; + + uint32_t switches; + + uint32_t fp_events; + + uint32_t sysrq; + + /* CEC events from enum mkbp_cec_event */ + uint32_t cec_events; + + uint8_t cec_message[16]; +}; +BUILD_ASSERT(sizeof(union ec_response_get_next_data_v3) == 18); + struct ec_response_get_next_event { uint8_t event_type; /* Followed by event data if any */ @@ -3475,6 +3503,12 @@ struct ec_response_get_next_event_v1 { union ec_response_get_next_data_v1 data; } __ec_align1; +struct ec_response_get_next_event_v3 { + uint8_t event_type; + /* Followed by event data if any */ + union ec_response_get_next_data_v3 data; +} __ec_align1; + /* Bit indices for buttons and switches.*/ /* Buttons */ #define EC_MKBP_POWER_BUTTON 0 @@ -3809,16 +3843,61 @@ struct ec_params_i2c_write { * discharge the battery. */ #define EC_CMD_CHARGE_CONTROL 0x0096 -#define EC_VER_CHARGE_CONTROL 1 +#define EC_VER_CHARGE_CONTROL 3 enum ec_charge_control_mode { CHARGE_CONTROL_NORMAL = 0, CHARGE_CONTROL_IDLE, CHARGE_CONTROL_DISCHARGE, + /* Add no more entry below. */ + CHARGE_CONTROL_COUNT, +}; + +#define EC_CHARGE_MODE_TEXT \ + { \ + [CHARGE_CONTROL_NORMAL] = "NORMAL", \ + [CHARGE_CONTROL_IDLE] = "IDLE", \ + [CHARGE_CONTROL_DISCHARGE] = "DISCHARGE", \ + } + +enum ec_charge_control_cmd { + EC_CHARGE_CONTROL_CMD_SET = 0, + EC_CHARGE_CONTROL_CMD_GET, +}; + +enum ec_charge_control_flag { + EC_CHARGE_CONTROL_FLAG_NO_IDLE = BIT(0), }; struct ec_params_charge_control { - uint32_t mode; /* enum charge_control_mode */ + uint32_t mode; /* enum charge_control_mode */ + + /* Below are the fields added in V2. */ + uint8_t cmd; /* enum ec_charge_control_cmd. */ + uint8_t flags; /* enum ec_charge_control_flag (v3+) */ + /* + * Lower and upper thresholds for battery sustainer. This struct isn't + * named to avoid tainting foreign projects' name spaces. + * + * If charge mode is explicitly set (e.g. DISCHARGE), battery sustainer + * will be disabled. To disable battery sustainer, set mode=NORMAL, + * lower=-1, upper=-1. + */ + struct { + int8_t lower; /* Display SoC in percentage. */ + int8_t upper; /* Display SoC in percentage. */ + } sustain_soc; +} __ec_align4; + +/* Added in v2 */ +struct ec_response_charge_control { + uint32_t mode; /* enum charge_control_mode */ + struct { /* Battery sustainer thresholds */ + int8_t lower; + int8_t upper; + } sustain_soc; + uint8_t flags; /* enum ec_charge_control_flag (v3+) */ + uint8_t reserved; } __ec_align4; /*****************************************************************************/ diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index 8865e350c12a..b34ed0cc1f8d 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -185,7 +185,7 @@ struct cros_ec_device { bool host_sleep_v1; struct blocking_notifier_head event_notifier; - struct ec_response_get_next_event_v1 event_data; + struct ec_response_get_next_event_v3 event_data; int event_size; u32 host_event_wake_mask; u32 last_resume_result; @@ -261,6 +261,10 @@ int cros_ec_get_sensor_count(struct cros_ec_dev *ec); int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, const void *outdata, size_t outsize, void *indata, size_t insize); +int cros_ec_cmd_readmem(struct cros_ec_device *ec_dev, u8 offset, u8 size, void *dest); + +int cros_ec_get_cmd_versions(struct cros_ec_device *ec_dev, u16 cmd); + /** * cros_ec_get_time_ns() - Return time in ns. * diff --git a/include/linux/platform_data/lenovo-yoga-c630.h b/include/linux/platform_data/lenovo-yoga-c630.h new file mode 100644 index 000000000000..5d1f9fb33cfc --- /dev/null +++ b/include/linux/platform_data/lenovo-yoga-c630.h @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024, Linaro Ltd + * Authors: + * Bjorn Andersson + * Dmitry Baryshkov + */ + +#ifndef _LENOVO_YOGA_C630_DATA_H +#define _LENOVO_YOGA_C630_DATA_H + +struct yoga_c630_ec; +struct notifier_block; + +#define YOGA_C630_MOD_NAME "lenovo_yoga_c630" + +#define YOGA_C630_DEV_UCSI "ucsi" +#define YOGA_C630_DEV_PSY "psy" + +int yoga_c630_ec_read8(struct yoga_c630_ec *ec, u8 addr); +int yoga_c630_ec_read16(struct yoga_c630_ec *ec, u8 addr); + +int yoga_c630_ec_register_notify(struct yoga_c630_ec *ec, struct notifier_block *nb); +void yoga_c630_ec_unregister_notify(struct yoga_c630_ec *ec, struct notifier_block *nb); + +#define YOGA_C630_UCSI_WRITE_SIZE 8 +#define YOGA_C630_UCSI_CCI_SIZE 4 +#define YOGA_C630_UCSI_DATA_SIZE 16 +#define YOGA_C630_UCSI_READ_SIZE (YOGA_C630_UCSI_CCI_SIZE + YOGA_C630_UCSI_DATA_SIZE) + +u16 yoga_c630_ec_ucsi_get_version(struct yoga_c630_ec *ec); +int yoga_c630_ec_ucsi_write(struct yoga_c630_ec *ec, + const u8 req[YOGA_C630_UCSI_WRITE_SIZE]); +int yoga_c630_ec_ucsi_read(struct yoga_c630_ec *ec, + u8 resp[YOGA_C630_UCSI_READ_SIZE]); + +#define LENOVO_EC_EVENT_USB 0x20 +#define LENOVO_EC_EVENT_UCSI 0x21 +#define LENOVO_EC_EVENT_HPD 0x22 +#define LENOVO_EC_EVENT_BAT_STATUS 0x24 +#define LENOVO_EC_EVENT_BAT_INFO 0x25 +#define LENOVO_EC_EVENT_BAT_ADPT_STATUS 0x37 + +#endif diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h index 7e44e84e7150..652f323b5ecc 100644 --- a/include/linux/platform_data/mmc-pxamci.h +++ b/include/linux/platform_data/mmc-pxamci.h @@ -7,6 +7,7 @@ struct device; struct mmc_host; +struct property_entry; struct pxamci_platform_data { unsigned int ocr_mask; /* available voltages */ @@ -18,7 +19,8 @@ struct pxamci_platform_data { bool gpio_card_ro_invert; /* gpio ro is inverted */ }; -extern void pxa_set_mci_info(struct pxamci_platform_data *info); +extern void pxa_set_mci_info(const struct pxamci_platform_data *info, + const struct property_entry *props); extern void pxa3xx_set_mci2_info(struct pxamci_platform_data *info); extern void pxa3xx_set_mci3_info(struct pxamci_platform_data *info); diff --git a/include/linux/platform_data/tmio.h b/include/linux/platform_data/tmio.h new file mode 100644 index 000000000000..b060124ba1ae --- /dev/null +++ b/include/linux/platform_data/tmio.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef MFD_TMIO_H +#define MFD_TMIO_H + +#include <linux/platform_device.h> +#include <linux/types.h> + +/* TMIO MMC platform flags */ + +/* + * Some controllers can support a 2-byte block size when the bus width is + * configured in 4-bit mode. + */ +#define TMIO_MMC_BLKSZ_2BYTES BIT(1) + +/* Some controllers can support SDIO IRQ signalling */ +#define TMIO_MMC_SDIO_IRQ BIT(2) + +/* Some features are only available or tested on R-Car Gen2 or later */ +#define TMIO_MMC_MIN_RCAR2 BIT(3) + +/* + * Some controllers require waiting for the SD bus to become idle before + * writing to some registers. + */ +#define TMIO_MMC_HAS_IDLE_WAIT BIT(4) + +/* + * Use the busy timeout feature. Probably all TMIO versions support it. Yet, + * we don't have documentation for old variants, so we enable only known good + * variants with this flag. Can be removed once all variants are known good. + */ +#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5) + +/* Some controllers have CMD12 automatically issue/non-issue register */ +#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7) + +/* Controller has some SDIO status bits which must be 1 */ +#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8) + +/* Some controllers have a 32-bit wide data port register */ +#define TMIO_MMC_32BIT_DATA_PORT BIT(9) + +/* Some controllers allows to set SDx actual clock */ +#define TMIO_MMC_CLK_ACTUAL BIT(10) + +/* Some controllers have a CBSY bit */ +#define TMIO_MMC_HAVE_CBSY BIT(11) + +struct tmio_mmc_data { + void *chan_priv_tx; + void *chan_priv_rx; + unsigned int hclk; + unsigned long capabilities; + unsigned long capabilities2; + unsigned long flags; + u32 ocr_mask; /* available voltages */ + dma_addr_t dma_rx_offset; + unsigned int max_blk_count; + unsigned short max_segs; +}; +#endif diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 3eb5cd6773ad..0aeeae1c1943 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -51,6 +51,10 @@ #define ASUS_WMI_DEVID_LED6 0x00020016 #define ASUS_WMI_DEVID_MICMUTE_LED 0x00040017 +/* Disable Camera LED */ +#define ASUS_WMI_DEVID_CAMERA_LED_NEG 0x00060078 /* 0 = on (unused) */ +#define ASUS_WMI_DEVID_CAMERA_LED 0x00060079 /* 1 = on */ + /* Backlight and Brightness */ #define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */ #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 diff --git a/include/linux/platform_data/x86/soc.h b/include/linux/platform_data/x86/soc.h index a5705189e2ac..f981907a5cb0 100644 --- a/include/linux/platform_data/x86/soc.h +++ b/include/linux/platform_data/x86/soc.h @@ -20,7 +20,7 @@ static inline bool soc_intel_is_##soc(void) \ { \ static const struct x86_cpu_id soc##_cpu_ids[] = { \ - X86_MATCH_INTEL_FAM6_MODEL(type, NULL), \ + X86_MATCH_VFM(type, NULL), \ {} \ }; \ const struct x86_cpu_id *id; \ @@ -31,11 +31,11 @@ static inline bool soc_intel_is_##soc(void) \ return false; \ } -SOC_INTEL_IS_CPU(byt, ATOM_SILVERMONT); -SOC_INTEL_IS_CPU(cht, ATOM_AIRMONT); -SOC_INTEL_IS_CPU(apl, ATOM_GOLDMONT); -SOC_INTEL_IS_CPU(glk, ATOM_GOLDMONT_PLUS); -SOC_INTEL_IS_CPU(cml, KABYLAKE_L); +SOC_INTEL_IS_CPU(byt, INTEL_ATOM_SILVERMONT); +SOC_INTEL_IS_CPU(cht, INTEL_ATOM_AIRMONT); +SOC_INTEL_IS_CPU(apl, INTEL_ATOM_GOLDMONT); +SOC_INTEL_IS_CPU(glk, INTEL_ATOM_GOLDMONT_PLUS); +SOC_INTEL_IS_CPU(cml, INTEL_KABYLAKE_L); #undef SOC_INTEL_IS_CPU diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index f24546a3d3db..858c8e7851fb 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -16,7 +16,7 @@ #include <linux/of.h> #include <linux/notifier.h> #include <linux/spinlock.h> -#include <linux/cpumask.h> +#include <linux/cpumask_types.h> #include <linux/time64.h> /* @@ -175,6 +175,10 @@ struct generic_pm_domain { int (*set_performance_state)(struct generic_pm_domain *genpd, unsigned int state); struct gpd_dev_ops dev_ops; + int (*set_hwmode_dev)(struct generic_pm_domain *domain, + struct device *dev, bool enable); + bool (*get_hwmode_dev)(struct generic_pm_domain *domain, + struct device *dev); int (*attach_dev)(struct generic_pm_domain *domain, struct device *dev); void (*detach_dev)(struct generic_pm_domain *domain, @@ -237,6 +241,7 @@ struct generic_pm_domain_data { unsigned int performance_state; unsigned int default_pstate; unsigned int rpm_pstate; + bool hw_mode; void *data; }; @@ -267,6 +272,8 @@ int dev_pm_genpd_remove_notifier(struct device *dev); void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next); ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev); void dev_pm_genpd_synced_poweroff(struct device *dev); +int dev_pm_genpd_set_hwmode(struct device *dev, bool enable); +bool dev_pm_genpd_get_hwmode(struct device *dev); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -340,6 +347,16 @@ static inline ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev) static inline void dev_pm_genpd_synced_poweroff(struct device *dev) { } +static inline int dev_pm_genpd_set_hwmode(struct device *dev, bool enable) +{ + return -EOPNOTSUPP; +} + +static inline bool dev_pm_genpd_get_hwmode(struct device *dev) +{ + return false; +} + #define simple_qos_governor (*(struct dev_power_governor *)(NULL)) #define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL)) #endif diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index dd7c8441af42..6424692c30b7 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -474,6 +474,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); int of_get_required_opp_performance_state(struct device_node *np, int index); +bool dev_pm_opp_of_has_required_opp(struct device *dev); int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table); int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus); int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, @@ -552,6 +553,11 @@ static inline int of_get_required_opp_performance_state(struct device_node *np, return -EOPNOTSUPP; } +static inline bool dev_pm_opp_of_has_required_opp(struct device *dev) +{ + return false; +} + static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) { return -EOPNOTSUPP; diff --git a/include/linux/poison.h b/include/linux/poison.h index 1f0ee2459f2a..331a9a996fa8 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -38,11 +38,8 @@ * Magic nums for obj red zoning. * Placed in the first word before and the first word after an obj. */ -#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ -#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ - -#define SLUB_RED_INACTIVE 0xbb -#define SLUB_RED_ACTIVE 0xcc +#define SLUB_RED_INACTIVE 0xbb /* when obj is inactive */ +#define SLUB_RED_ACTIVE 0xcc /* when obj is active */ /* ...and for poisoning */ #define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ @@ -52,12 +49,6 @@ /********** arch/$ARCH/mm/init.c **********/ #define POISON_FREE_INITMEM 0xcc -/********** arch/ia64/hp/common/sba_iommu.c **********/ -/* - * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a - * value of "SBAIOMMU POISON\0" for spill-over poisoning. - */ - /********** fs/jbd/journal.c **********/ #define JBD_POISON_FREE 0x5b #define JBD2_POISON_FREE 0x5c diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 8e5705a56b85..c852cc882501 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -319,6 +319,8 @@ struct power_supply { char *online_trig_name; struct led_trigger *charging_blink_full_solid_trig; char *charging_blink_full_solid_trig_name; + struct led_trigger *charging_orange_full_green_trig; + char *charging_orange_full_green_trig_name; #endif }; diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 7233e9cf1bab..ce76f1a45722 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -481,4 +481,45 @@ DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable()) DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace()) DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) +#ifdef CONFIG_PREEMPT_DYNAMIC + +extern bool preempt_model_none(void); +extern bool preempt_model_voluntary(void); +extern bool preempt_model_full(void); + +#else + +static inline bool preempt_model_none(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_NONE); +} +static inline bool preempt_model_voluntary(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); +} +static inline bool preempt_model_full(void) +{ + return IS_ENABLED(CONFIG_PREEMPT); +} + +#endif + +static inline bool preempt_model_rt(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_RT); +} + +/* + * Does the preemption model allow non-cooperative preemption? + * + * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with + * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the + * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the + * PREEMPT_NONE model. + */ +static inline bool preempt_model_preemptible(void) +{ + return preempt_model_full() || preempt_model_rt(); +} + #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h index 65c5184470f1..7239976698e4 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -60,6 +60,10 @@ static inline const char *printk_skip_headers(const char *buffer) #define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT #define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET +int match_devname_and_update_preferred_console(const char *match, + const char *name, + const short idx); + extern int console_printk[]; #define console_loglevel (console_printk[0]) diff --git a/include/linux/profile.h b/include/linux/profile.h index 04ae5ebcb637..2fb487f61d12 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -4,7 +4,6 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <linux/cpumask.h> #include <linux/cache.h> #include <asm/errno.h> diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index 6eec24ffa866..591a53e082e6 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -9,6 +9,9 @@ #include <linux/list.h> #include <uapi/linux/ethtool.h> +/* Maximum current in uA according to IEEE 802.3-2022 Table 145-1 */ +#define MAX_PI_CURRENT 1920000 + struct phy_device; struct pse_controller_dev; @@ -36,12 +39,29 @@ struct pse_control_config { * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState * @c33_pw_status: power detection status of the PSE. * IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus: + * @c33_pw_class: detected class of a powered PD + * IEEE 802.3-2022 30.9.1.1.8 aPSEPowerClassification + * @c33_actual_pw: power currently delivered by the PSE in mW + * IEEE 802.3-2022 30.9.1.1.23 aPSEActualPower + * @c33_ext_state_info: extended state information of the PSE + * @c33_avail_pw_limit: available power limit of the PSE in mW + * IEEE 802.3-2022 145.2.5.4 pse_avail_pwr + * @c33_pw_limit_ranges: supported power limit configuration range. The driver + * is in charge of the memory allocation. + * @c33_pw_limit_nb_ranges: number of supported power limit configuration + * ranges */ struct pse_control_status { enum ethtool_podl_pse_admin_state podl_admin_state; enum ethtool_podl_pse_pw_d_status podl_pw_status; enum ethtool_c33_pse_admin_state c33_admin_state; enum ethtool_c33_pse_pw_d_status c33_pw_status; + u32 c33_pw_class; + u32 c33_actual_pw; + struct ethtool_c33_pse_ext_state_info c33_ext_state_info; + u32 c33_avail_pw_limit; + struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges; + u32 c33_pw_limit_nb_ranges; }; /** @@ -53,6 +73,14 @@ struct pse_control_status { * May also return negative errno. * @pi_enable: Configure the PSE PI as enabled. * @pi_disable: Configure the PSE PI as disabled. + * @pi_get_voltage: Return voltage similarly to get_voltage regulator + * callback. + * @pi_get_current_limit: Get the configured current limit similarly to + * get_current_limit regulator callback. + * @pi_set_current_limit: Configure the current limit similarly to + * set_current_limit regulator callback. + * Should not return an error in case of MAX_PI_CURRENT + * current value set. */ struct pse_controller_ops { int (*ethtool_get_status)(struct pse_controller_dev *pcdev, @@ -62,6 +90,11 @@ struct pse_controller_ops { int (*pi_is_enabled)(struct pse_controller_dev *pcdev, int id); int (*pi_enable)(struct pse_controller_dev *pcdev, int id); int (*pi_disable)(struct pse_controller_dev *pcdev, int id); + int (*pi_get_voltage)(struct pse_controller_dev *pcdev, int id); + int (*pi_get_current_limit)(struct pse_controller_dev *pcdev, + int id); + int (*pi_set_current_limit)(struct pse_controller_dev *pcdev, + int id, int max_uA); }; struct module; @@ -148,6 +181,11 @@ int pse_ethtool_get_status(struct pse_control *psec, int pse_ethtool_set_config(struct pse_control *psec, struct netlink_ext_ack *extack, const struct pse_control_config *config); +int pse_ethtool_set_pw_limit(struct pse_control *psec, + struct netlink_ext_ack *extack, + const unsigned int pw_limit); +int pse_ethtool_get_pw_limit(struct pse_control *psec, + struct netlink_ext_ack *extack); bool pse_has_podl(struct pse_control *psec); bool pse_has_c33(struct pse_control *psec); @@ -177,6 +215,19 @@ static inline int pse_ethtool_set_config(struct pse_control *psec, return -EOPNOTSUPP; } +static inline int pse_ethtool_set_pw_limit(struct pse_control *psec, + struct netlink_ext_ack *extack, + const unsigned int pw_limit) +{ + return -EOPNOTSUPP; +} + +static inline int pse_ethtool_get_pw_limit(struct pse_control *psec, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + static inline bool pse_has_podl(struct pse_control *psec) { return false; diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h index c1dc87fc536b..1504fb012c05 100644 --- a/include/linux/psp-platform-access.h +++ b/include/linux/psp-platform-access.h @@ -6,8 +6,9 @@ #include <linux/psp.h> enum psp_platform_access_msg { - PSP_CMD_NONE = 0x0, - PSP_I2C_REQ_BUS_CMD = 0x64, + PSP_CMD_NONE = 0x0, + PSP_CMD_HSTI_QUERY = 0x14, + PSP_I2C_REQ_BUS_CMD = 0x64, PSP_DYNAMIC_BOOST_GET_NONCE, PSP_DYNAMIC_BOOST_SET_UID, PSP_DYNAMIC_BOOST_GET_PARAMETER, diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 3705c2044fc0..903ddfea8585 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -658,6 +658,7 @@ struct sev_data_snp_launch_update { * @id_auth_paddr: system physical address of ID block authentication structure * @id_block_en: indicates whether ID block is present * @auth_key_en: indicates whether author key is present in authentication structure + * @vcek_disabled: indicates whether use of VCEK is allowed for attestation reports * @rsvd: reserved * @host_data: host-supplied data for guest, not interpreted by firmware */ @@ -667,7 +668,8 @@ struct sev_data_snp_launch_finish { u64 id_auth_paddr; u8 id_block_en:1; u8 auth_key_en:1; - u64 rsvd:62; + u8 vcek_disabled:1; + u64 rsvd:61; u8 host_data[32]; } __packed; diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 60b92c2c75ef..f8c2dc12dbd3 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -4,9 +4,12 @@ #include <linux/device.h> #include <linux/err.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> +MODULE_IMPORT_NS(PWM); + struct pwm_chip; /** @@ -249,9 +252,7 @@ struct pwm_capture { * @free: optional hook for freeing a PWM * @capture: capture and report PWM signal * @apply: atomically apply a new PWM config - * @get_state: get the current PWM state. This function is only - * called once per PWM device when the PWM chip is - * registered. + * @get_state: get the current PWM state. */ struct pwm_ops { int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); @@ -407,10 +408,6 @@ void pwmchip_remove(struct pwm_chip *chip); int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner); #define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE) -struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, - unsigned int index, - const char *label); - struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *args); struct pwm_device *of_pwm_single_xlate(struct pwm_chip *chip, @@ -505,14 +502,6 @@ static inline int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip) return -EINVAL; } -static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, - unsigned int index, - const char *label) -{ - might_sleep(); - return ERR_PTR(-ENODEV); -} - static inline struct pwm_device *pwm_get(struct device *dev, const char *consumer) { @@ -574,13 +563,6 @@ static inline void pwm_apply_args(struct pwm_device *pwm) pwm_apply_might_sleep(pwm, &state); } -/* only for backwards-compatibility, new code should not use this */ -static inline int pwm_apply_state(struct pwm_device *pwm, - const struct pwm_state *state) -{ - return pwm_apply_might_sleep(pwm, state); -} - struct pwm_lookup { struct list_head list; const char *provider; diff --git a/include/linux/pwrseq/consumer.h b/include/linux/pwrseq/consumer.h new file mode 100644 index 000000000000..7d583b4f266e --- /dev/null +++ b/include/linux/pwrseq/consumer.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Linaro Ltd. + */ + +#ifndef __POWER_SEQUENCING_CONSUMER_H__ +#define __POWER_SEQUENCING_CONSUMER_H__ + +#include <linux/err.h> + +struct device; +struct pwrseq_desc; + +#if IS_ENABLED(CONFIG_POWER_SEQUENCING) + +struct pwrseq_desc * __must_check +pwrseq_get(struct device *dev, const char *target); +void pwrseq_put(struct pwrseq_desc *desc); + +struct pwrseq_desc * __must_check +devm_pwrseq_get(struct device *dev, const char *target); + +int pwrseq_power_on(struct pwrseq_desc *desc); +int pwrseq_power_off(struct pwrseq_desc *desc); + +#else /* CONFIG_POWER_SEQUENCING */ + +static inline struct pwrseq_desc * __must_check +pwrseq_get(struct device *dev, const char *target) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void pwrseq_put(struct pwrseq_desc *desc) +{ +} + +static inline struct pwrseq_desc * __must_check +devm_pwrseq_get(struct device *dev, const char *target) +{ + return ERR_PTR(-ENOSYS); +} + +static inline int pwrseq_power_on(struct pwrseq_desc *desc) +{ + return -ENOSYS; +} + +static inline int pwrseq_power_off(struct pwrseq_desc *desc) +{ + return -ENOSYS; +} + +#endif /* CONFIG_POWER_SEQUENCING */ + +#endif /* __POWER_SEQUENCING_CONSUMER_H__ */ diff --git a/include/linux/pwrseq/provider.h b/include/linux/pwrseq/provider.h new file mode 100644 index 000000000000..cbc3607cbfcf --- /dev/null +++ b/include/linux/pwrseq/provider.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Linaro Ltd. + */ + +#ifndef __POWER_SEQUENCING_PROVIDER_H__ +#define __POWER_SEQUENCING_PROVIDER_H__ + +struct device; +struct module; +struct pwrseq_device; + +typedef int (*pwrseq_power_state_func)(struct pwrseq_device *); +typedef int (*pwrseq_match_func)(struct pwrseq_device *, struct device *); + +/** + * struct pwrseq_unit_data - Configuration of a single power sequencing + * unit. + * @name: Name of the unit. + * @deps: Units that must be enabled before this one and disabled after it + * in the order they come in this array. Must be NULL-terminated. + * @enable: Callback running the part of the power-on sequence provided by + * this unit. + * @disable: Callback running the part of the power-off sequence provided + * by this unit. + */ +struct pwrseq_unit_data { + const char *name; + const struct pwrseq_unit_data **deps; + pwrseq_power_state_func enable; + pwrseq_power_state_func disable; +}; + +/** + * struct pwrseq_target_data - Configuration of a power sequencing target. + * @name: Name of the target. + * @unit: Final unit that this target must reach in order to be considered + * enabled. + * @post_enable: Callback run after the target unit has been enabled, *after* + * the state lock has been released. It's useful for implementing + * boot-up delays without blocking other users from powering up + * using the same power sequencer. + */ +struct pwrseq_target_data { + const char *name; + const struct pwrseq_unit_data *unit; + pwrseq_power_state_func post_enable; +}; + +/** + * struct pwrseq_config - Configuration used for registering a new provider. + * @parent: Parent device for the sequencer. Must be set. + * @owner: Module providing this device. + * @drvdata: Private driver data. + * @match: Provider callback used to match the consumer device to the sequencer. + * @targets: Array of targets for this power sequencer. Must be NULL-terminated. + */ +struct pwrseq_config { + struct device *parent; + struct module *owner; + void *drvdata; + pwrseq_match_func match; + const struct pwrseq_target_data **targets; +}; + +struct pwrseq_device * +pwrseq_device_register(const struct pwrseq_config *config); +void pwrseq_device_unregister(struct pwrseq_device *pwrseq); +struct pwrseq_device * +devm_pwrseq_device_register(struct device *dev, + const struct pwrseq_config *config); + +void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq); + +#endif /* __POWER_SEQUENCING_PROVIDER_H__ */ diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h index 6d92b68efbf6..1d982dbdd0d0 100644 --- a/include/linux/randomize_kstack.h +++ b/include/linux/randomize_kstack.h @@ -32,13 +32,19 @@ DECLARE_PER_CPU(u32, kstack_offset); #endif /* - * Use, at most, 10 bits of entropy. We explicitly cap this to keep the - * "VLA" from being unbounded (see above). 10 bits leaves enough room for - * per-arch offset masks to reduce entropy (by removing higher bits, since - * high entropy may overly constrain usable stack space), and for - * compiler/arch-specific stack alignment to remove the lower bits. + * Use, at most, 6 bits of entropy (on 64-bit; 8 on 32-bit). This cap is + * to keep the "VLA" from being unbounded (see above). Additionally clear + * the bottom 4 bits (on 64-bit systems, 2 for 32-bit), since stack + * alignment will always be at least word size. This makes the compiler + * code gen better when it is applying the actual per-arch alignment to + * the final offset. The resulting randomness is reasonable without overly + * constraining usable stack space. */ -#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF) +#ifdef CONFIG_64BIT +#define KSTACK_OFFSET_MAX(x) ((x) & 0b1111110000) +#else +#define KSTACK_OFFSET_MAX(x) ((x) & 0b1111111100) +#endif /** * add_random_kstack_offset - Increase stack utilization by previously diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 659d13a7ddaa..ba95c06675e1 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -80,36 +80,35 @@ struct rcu_cblist { * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED | * | | * | Callbacks processed by rcu_core() from softirqs or local | - * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, | - * | allowing nocb_timer to be armed. | + * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads. | * ---------------------------------------------------------------------------- * | * v - * ----------------------------------- - * | | - * v v - * --------------------------------------- ----------------------------------| - * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | | - * | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | | - * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | | - * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP | - * | | | | - * | | | | - * | CB kthread woke up and | | GP kthread woke up and | - * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED| - * | Processes callbacks concurrently | | | - * | with rcu_core(), holding | | | - * | nocb_lock. | | | - * --------------------------------------- ----------------------------------- - * | | - * ----------------------------------- + * ---------------------------------------------------------------------------- + * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED | + * | + unparked CB kthread | + * | | + * | CB kthread got unparked and processes callbacks concurrently with | + * | rcu_core(), holding nocb_lock. | + * --------------------------------------------------------------------------- + * | + * v + * ---------------------------------------------------------------------------| + * | SEGCBLIST_RCU_CORE | | + * | SEGCBLIST_LOCKING | | + * | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_KTHREAD_GP | + * | + unparked CB kthread | + * | | + * | GP kthread woke up and acknowledged nocb_lock. | + * ---------------------------------------- ----------------------------------- * | * v * |--------------------------------------------------------------------------| - * | SEGCBLIST_LOCKING | | - * | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_LOCKING | | + * | SEGCBLIST_OFFLOADED | | * | SEGCBLIST_KTHREAD_GP | | - * | SEGCBLIST_KTHREAD_CB | + * | + unparked CB kthread | * | | * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops | * | handling callbacks. Enable bypass queueing. | @@ -125,8 +124,8 @@ struct rcu_cblist { * |--------------------------------------------------------------------------| * | SEGCBLIST_LOCKING | | * | SEGCBLIST_OFFLOADED | | - * | SEGCBLIST_KTHREAD_CB | | * | SEGCBLIST_KTHREAD_GP | + * | + unparked CB kthread | * | | * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() | * | ignores callbacks. Bypass enqueue is enabled. | @@ -137,11 +136,11 @@ struct rcu_cblist { * | SEGCBLIST_RCU_CORE | | * | SEGCBLIST_LOCKING | | * | SEGCBLIST_OFFLOADED | | - * | SEGCBLIST_KTHREAD_CB | | * | SEGCBLIST_KTHREAD_GP | + * | + unparked CB kthread | * | | * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() | - * | handles callbacks concurrently. Bypass enqueue is enabled. | + * | handles callbacks concurrently. Bypass enqueue is disabled. | * | Invoke RCU core so we make sure not to preempt it in the middle with | * | leaving some urgent work unattended within a jiffy. | * ---------------------------------------------------------------------------- @@ -150,42 +149,31 @@ struct rcu_cblist { * |--------------------------------------------------------------------------| * | SEGCBLIST_RCU_CORE | | * | SEGCBLIST_LOCKING | | - * | SEGCBLIST_KTHREAD_CB | | * | SEGCBLIST_KTHREAD_GP | + * | + unparked CB kthread | * | | * | CB/GP kthreads and local rcu_core() handle callbacks concurrently | - * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable | - * | bypass enqueue. | + * | holding nocb_lock. Wake up GP kthread if necessary. | * ---------------------------------------------------------------------------- * | * v - * ----------------------------------- - * | | - * v v - * ---------------------------------------------------------------------------| - * | | | - * | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | | - * | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | | - * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | - * | | | - * | GP kthread woke up and | CB kthread woke up and | - * | acknowledged the fact that | acknowledged the fact that | - * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. | - * | | The CB kthread goes to sleep | - * | The callbacks from the target CPU | until it ever gets re-offloaded. | - * | will be ignored from the GP kthread | | - * | loop. | | + * |--------------------------------------------------------------------------| + * | SEGCBLIST_RCU_CORE | | + * | SEGCBLIST_LOCKING | | + * | + unparked CB kthread | + * | | + * | GP kthread woke up and acknowledged the fact that SEGCBLIST_OFFLOADED | + * | got cleared. The callbacks from the target CPU will be ignored from the| + * | GP kthread loop. | * ---------------------------------------------------------------------------- - * | | - * ----------------------------------- * | * v * ---------------------------------------------------------------------------- * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | + * | + parked CB kthread | * | | - * | Callbacks processed by rcu_core() from softirqs or local | - * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. | - * | Flush pending nocb_timer. Flush nocb bypass callbacks. | + * | CB kthread is parked. Callbacks processed by rcu_core() from softirqs or | + * | local rcuc kthread, while holding nocb_lock. | * ---------------------------------------------------------------------------- * | * v diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index dfd2399f2cde..13f6f00aecf9 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -29,7 +29,6 @@ #include <linux/lockdep.h> #include <linux/cleanup.h> #include <asm/processor.h> -#include <linux/cpumask.h> #include <linux/context_tracking_irq.h> #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) @@ -209,7 +208,6 @@ void synchronize_rcu_tasks_rude(void); #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) void exit_tasks_rcu_start(void); -void exit_tasks_rcu_stop(void); void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ #define rcu_tasks_classic_qs(t, preempt) do { } while (0) @@ -218,7 +216,6 @@ void exit_tasks_rcu_finish(void); #define call_rcu_tasks call_rcu #define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } -static inline void exit_tasks_rcu_stop(void) { } static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ @@ -421,11 +418,71 @@ static inline void rcu_preempt_sleep_check(void) { } "Illegal context switch in RCU-sched read-side critical section"); \ } while (0) +// See RCU_LOCKDEP_WARN() for an explanation of the double call to +// debug_lockdep_rcu_enabled(). +static inline bool lockdep_assert_rcu_helper(bool c) +{ + return debug_lockdep_rcu_enabled() && + (c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) && + debug_lockdep_rcu_enabled(); +} + +/** + * lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock() + * + * Splats if lockdep is enabled and there is no rcu_read_lock() in effect. + */ +#define lockdep_assert_in_rcu_read_lock() \ + WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map))) + +/** + * lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh() + * + * Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect. + * Note that local_bh_disable() and friends do not suffice here, instead an + * actual rcu_read_lock_bh() is required. + */ +#define lockdep_assert_in_rcu_read_lock_bh() \ + WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map))) + +/** + * lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched() + * + * Splats if lockdep is enabled and there is no rcu_read_lock_sched() + * in effect. Note that preempt_disable() and friends do not suffice here, + * instead an actual rcu_read_lock_sched() is required. + */ +#define lockdep_assert_in_rcu_read_lock_sched() \ + WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map))) + +/** + * lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader + * + * Splats if lockdep is enabled and there is no RCU reader of any + * type in effect. Note that regions of code protected by things like + * preempt_disable, local_bh_disable(), and local_irq_disable() all qualify + * as RCU readers. + * + * Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY + * kernels that are not also built with PREEMPT_COUNT. But if you have + * lockdep enabled, you might as well also enable PREEMPT_COUNT. + */ +#define lockdep_assert_in_rcu_reader() \ + WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \ + !lock_is_held(&rcu_bh_lock_map) && \ + !lock_is_held(&rcu_sched_lock_map) && \ + preemptible())) + #else /* #ifdef CONFIG_PROVE_RCU */ #define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c)) #define rcu_sleep_check() do { } while (0) +#define lockdep_assert_in_rcu_read_lock() do { } while (0) +#define lockdep_assert_in_rcu_read_lock_bh() do { } while (0) +#define lockdep_assert_in_rcu_read_lock_sched() do { } while (0) +#define lockdep_assert_in_rcu_reader() do { } while (0) + #endif /* #else #ifdef CONFIG_PROVE_RCU */ /* diff --git a/include/linux/regmap.h b/include/linux/regmap.h index a6bc2980a98b..122e38161acb 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1237,6 +1237,8 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg, void *val, size_t val_len); int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, size_t val_count); +int regmap_multi_reg_read(struct regmap *map, unsigned int *reg, void *val, + size_t val_count); int regmap_update_bits_base(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change, bool async, bool force); @@ -1607,7 +1609,7 @@ struct regmap_irq_chip { unsigned int main_status; unsigned int num_main_status_bits; - struct regmap_irq_sub_irq_map *sub_reg_offsets; + const struct regmap_irq_sub_irq_map *sub_reg_offsets; int num_main_regs; unsigned int status_base; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 59d0b9a79e6e..d986ec13092e 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -128,11 +128,11 @@ struct regulator; * * @supply: The name of the supply. Initialised by the user before * using the bulk regulator APIs. + * @consumer: The regulator consumer for the supply. This will be managed + * by the bulk API. * @init_load_uA: After getting the regulator, regulator_set_load() will be * called with this load. Initialised by the user before * using the bulk regulator APIs. - * @consumer: The regulator consumer for the supply. This will be managed - * by the bulk API. * * The regulator APIs provide a series of regulator_bulk_() API calls as * a convenience to consumers which require multiple supplies. This @@ -140,8 +140,8 @@ struct regulator; */ struct regulator_bulk_data { const char *supply; - int init_load_uA; struct regulator *consumer; + int init_load_uA; /* private: Internal use */ int ret; @@ -250,6 +250,7 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator, unsigned *vsel_mask); int regulator_list_hardware_vsel(struct regulator *regulator, unsigned selector); +int regulator_hardware_enable(struct regulator *regulator, bool enable); /* regulator notifier block */ int regulator_register_notifier(struct regulator *regulator, @@ -571,6 +572,12 @@ static inline int regulator_list_hardware_vsel(struct regulator *regulator, return -EOPNOTSUPP; } +static inline int regulator_hardware_enable(struct regulator *regulator, + bool enable) +{ + return -EOPNOTSUPP; +} + static inline int regulator_register_notifier(struct regulator *regulator, struct notifier_block *nb) { diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index a365f67131ec..b0875b99e811 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -2,6 +2,7 @@ #ifndef _RESCTRL_H #define _RESCTRL_H +#include <linux/cacheinfo.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/pid.h> @@ -58,11 +59,45 @@ struct resctrl_staged_config { bool have_new_ctrl; }; +enum resctrl_domain_type { + RESCTRL_CTRL_DOMAIN, + RESCTRL_MON_DOMAIN, +}; + /** - * struct rdt_domain - group of CPUs sharing a resctrl resource + * struct rdt_domain_hdr - common header for different domain types * @list: all instances of this resource * @id: unique id for this instance + * @type: type of this instance * @cpu_mask: which CPUs share this resource + */ +struct rdt_domain_hdr { + struct list_head list; + int id; + enum resctrl_domain_type type; + struct cpumask cpu_mask; +}; + +/** + * struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource + * @hdr: common header for different domain types + * @plr: pseudo-locked region (if any) associated with domain + * @staged_config: parsed configuration to be applied + * @mbps_val: When mba_sc is enabled, this holds the array of user + * specified control values for mba_sc in MBps, indexed + * by closid + */ +struct rdt_ctrl_domain { + struct rdt_domain_hdr hdr; + struct pseudo_lock_region *plr; + struct resctrl_staged_config staged_config[CDP_NUM_TYPES]; + u32 *mbps_val; +}; + +/** + * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource + * @hdr: common header for different domain types + * @ci: cache info for this domain * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold * @mbm_total: saved state for MBM total bandwidth * @mbm_local: saved state for MBM local bandwidth @@ -70,16 +105,10 @@ struct resctrl_staged_config { * @cqm_limbo: worker to periodically read CQM h/w counters * @mbm_work_cpu: worker CPU for MBM h/w counters * @cqm_work_cpu: worker CPU for CQM h/w counters - * @plr: pseudo-locked region (if any) associated with domain - * @staged_config: parsed configuration to be applied - * @mbps_val: When mba_sc is enabled, this holds the array of user - * specified control values for mba_sc in MBps, indexed - * by closid */ -struct rdt_domain { - struct list_head list; - int id; - struct cpumask cpu_mask; +struct rdt_mon_domain { + struct rdt_domain_hdr hdr; + struct cacheinfo *ci; unsigned long *rmid_busy_llc; struct mbm_state *mbm_total; struct mbm_state *mbm_local; @@ -87,9 +116,6 @@ struct rdt_domain { struct delayed_work cqm_limbo; int mbm_work_cpu; int cqm_work_cpu; - struct pseudo_lock_region *plr; - struct resctrl_staged_config staged_config[CDP_NUM_TYPES]; - u32 *mbps_val; }; /** @@ -150,16 +176,24 @@ struct resctrl_membw { struct rdt_parse_data; struct resctrl_schema; +enum resctrl_scope { + RESCTRL_L2_CACHE = 2, + RESCTRL_L3_CACHE = 3, + RESCTRL_L3_NODE, +}; + /** * struct rdt_resource - attributes of a resctrl resource * @rid: The index of the resource * @alloc_capable: Is allocation available on this machine * @mon_capable: Is monitor feature available on this machine * @num_rmid: Number of RMIDs available - * @cache_level: Which cache level defines scope of this resource + * @ctrl_scope: Scope of this resource for control functions + * @mon_scope: Scope of this resource for monitor functions * @cache: Cache allocation related data * @membw: If the component has bandwidth controls, their properties. - * @domains: RCU list of all domains for this resource + * @ctrl_domains: RCU list of all control domains for this resource + * @mon_domains: RCU list of all monitor domains for this resource * @name: Name to use in "schemata" file. * @data_width: Character width of data when displaying * @default_ctrl: Specifies default cache cbm or memory B/W percent. @@ -174,17 +208,19 @@ struct rdt_resource { bool alloc_capable; bool mon_capable; int num_rmid; - int cache_level; + enum resctrl_scope ctrl_scope; + enum resctrl_scope mon_scope; struct resctrl_cache cache; struct resctrl_membw membw; - struct list_head domains; + struct list_head ctrl_domains; + struct list_head mon_domains; char *name; int data_width; u32 default_ctrl; const char *format_str; int (*parse_ctrlval)(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d); + struct rdt_ctrl_domain *d); struct list_head evt_list; unsigned long fflags; bool cdp_capable; @@ -218,13 +254,15 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); * Update the ctrl_val and apply this config right now. * Must be called on one of the domain's CPUs. */ -int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, +int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d, u32 closid, enum resctrl_conf_type t, u32 cfg_val); -u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, +u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d, u32 closid, enum resctrl_conf_type type); -int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d); -void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); +int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d); +int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d); +void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d); +void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d); void resctrl_online_cpu(unsigned int cpu); void resctrl_offline_cpu(unsigned int cpu); @@ -253,7 +291,7 @@ void resctrl_offline_cpu(unsigned int cpu); * Return: * 0 on success, or -EIO, -EINVAL etc on error. */ -int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, +int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d, u32 closid, u32 rmid, enum resctrl_event_id eventid, u64 *val, void *arch_mon_ctx); @@ -286,7 +324,7 @@ static inline void resctrl_arch_rmid_read_context_check(void) * * This can be called from any CPU. */ -void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, +void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d, u32 closid, u32 rmid, enum resctrl_event_id eventid); @@ -299,7 +337,7 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, * * This can be called from any CPU. */ -void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d); +void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d); extern unsigned int resctrl_rmid_realloc_threshold; extern unsigned int resctrl_rmid_realloc_limit; diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 7229b9baf20d..0978c64f49d8 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -200,6 +200,9 @@ static inline void __folio_rmap_sanity_checks(struct folio *folio, /* hugetlb folios are handled separately. */ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + /* When (un)mapping zeropages, we should never touch ref+mapcount. */ + VM_WARN_ON_FOLIO(is_zero_folio(folio), folio); + /* * TODO: we get driver-allocated folios that have nothing to do with * the rmap using vm_insert_page(); therefore, we cannot assume that @@ -241,7 +244,7 @@ void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages, void folio_add_anon_rmap_pmd(struct folio *, struct page *, struct vm_area_struct *, unsigned long address, rmap_t flags); void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, - unsigned long address); + unsigned long address, rmap_t flags); void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, struct vm_area_struct *); #define folio_add_file_rmap_pte(folio, page, vma) \ @@ -681,16 +684,6 @@ struct page_vma_mapped_walk { unsigned int flags; }; -#define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \ - struct page_vma_mapped_walk name = { \ - .pfn = page_to_pfn(_page), \ - .nr_pages = compound_nr(_page), \ - .pgoff = page_to_pgoff(_page), \ - .vma = _vma, \ - .address = _address, \ - .flags = _flags, \ - } - #define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \ struct page_vma_mapped_walk name = { \ .pfn = folio_pfn(_folio), \ @@ -710,6 +703,30 @@ static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) spin_unlock(pvmw->ptl); } +/** + * page_vma_mapped_walk_restart - Restart the page table walk. + * @pvmw: Pointer to struct page_vma_mapped_walk. + * + * It restarts the page table walk when changes occur in the page + * table, such as splitting a PMD. Ensures that the PTL held during + * the previous walk is released and resets the state to allow for + * a new walk starting at the current address stored in pvmw->address. + */ +static inline void +page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw) +{ + WARN_ON_ONCE(!pvmw->pmd && !pvmw->pte); + + if (likely(pvmw->ptl)) + spin_unlock(pvmw->ptl); + else + WARN_ON_ONCE(1); + + pvmw->ptl = NULL; + pvmw->pmd = NULL; + pvmw->pte = NULL; +} + bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); /* @@ -730,8 +747,6 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); -unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); - /* * rmap_walk_control: To control rmap traversing for specific needs * @@ -787,8 +802,4 @@ static inline int folio_mkclean(struct folio *folio) } #endif /* CONFIG_MMU */ -static inline int page_mkclean(struct page *page) -{ - return folio_mkclean(page_folio(page)); -} #endif /* _LINUX_RMAP_H */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 77df3d7b18a6..e61d164622db 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -332,7 +332,7 @@ static inline void sg_dma_unmark_bus_address(struct scatterlist *sg) * Description: * Returns true if the scatterlist was marked for SWIOTLB bouncing. Not all * elements may have been bounced, so the caller would have to check - * individual SG entries with is_swiotlb_buffer(). + * individual SG entries with swiotlb_find_pool(). */ static inline bool sg_dma_is_swiotlb(struct scatterlist *sg) { diff --git a/include/linux/sched.h b/include/linux/sched.h index 61591ac6eab6..f8d150343d42 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -13,7 +13,7 @@ #include <asm/processor.h> #include <linux/thread_info.h> #include <linux/preempt.h> -#include <linux/cpumask.h> +#include <linux/cpumask_types.h> #include <linux/cache.h> #include <linux/irqflags_types.h> @@ -36,6 +36,7 @@ #include <linux/signal_types.h> #include <linux/syscall_user_dispatch_types.h> #include <linux/mm_types_task.h> +#include <linux/netdevice_xmit.h> #include <linux/task_io_accounting.h> #include <linux/posix-timers_types.h> #include <linux/restart_block.h> @@ -53,6 +54,7 @@ struct bio_list; struct blk_plug; struct bpf_local_storage; struct bpf_run_ctx; +struct bpf_net_context; struct capture_control; struct cfs_rq; struct fs_struct; @@ -734,6 +736,12 @@ enum perf_event_task_context { perf_nr_task_contexts, }; +/* + * Number of contexts where an event can trigger: + * task, softirq, hardirq, nmi. + */ +#define PERF_NR_CONTEXTS 4 + struct wake_q_node { struct wake_q_node *next; }; @@ -934,7 +942,7 @@ struct task_struct { #ifndef TIF_RESTORE_SIGMASK unsigned restore_sigmask:1; #endif -#ifdef CONFIG_MEMCG +#ifdef CONFIG_MEMCG_V1 unsigned in_user_fault:1; #endif #ifdef CONFIG_LRU_GEN @@ -975,7 +983,9 @@ struct task_struct { /* delay due to memory thrashing */ unsigned in_thrashing:1; #endif - +#ifdef CONFIG_PREEMPT_RT + struct netdev_xmit net_xmit; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ struct restart_block restart_block; @@ -1256,6 +1266,7 @@ struct task_struct { unsigned int futex_state; #endif #ifdef CONFIG_PERF_EVENTS + u8 perf_recursion[PERF_NR_CONTEXTS]; struct perf_event_context *perf_event_ctxp; struct mutex perf_event_mutex; struct list_head perf_event_list; @@ -1402,7 +1413,7 @@ struct task_struct { int curr_ret_depth; /* Stack of return addresses for return function tracing: */ - struct ftrace_ret_stack *ret_stack; + unsigned long *ret_stack; /* Timestamp for last schedule: */ unsigned long long ftrace_timestamp; @@ -1447,17 +1458,18 @@ struct task_struct { unsigned int kcov_softirq; #endif -#ifdef CONFIG_MEMCG +#ifdef CONFIG_MEMCG_V1 struct mem_cgroup *memcg_in_oom; +#endif +#ifdef CONFIG_MEMCG /* Number of pages to reclaim on returning to userland: */ unsigned int memcg_nr_pages_over_high; /* Used by memcontrol for targeted memcg charge: */ struct mem_cgroup *active_memcg; -#endif -#ifdef CONFIG_MEMCG_KMEM + /* Cache for current->cgroups->memcg->objcg lookups: */ struct obj_cgroup *objcg; #endif @@ -1506,6 +1518,8 @@ struct task_struct { /* Used for BPF run context */ struct bpf_run_ctx *bpf_ctx; #endif + /* Used by BPF for per-TASK xdp storage */ + struct bpf_net_context *bpf_net_context; #ifdef CONFIG_GCC_PLUGIN_STACKLEAK unsigned long lowest_stack; @@ -1604,7 +1618,7 @@ static inline char task_index_to_char(unsigned int state) { static const char state_char[] = "RSDTtXZPI"; - BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); + BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1)); return state_char[state]; } @@ -1778,7 +1792,8 @@ static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpuma } static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) { - if (!cpumask_test_cpu(0, new_mask)) + /* Opencoded cpumask_test_cpu(0, new_mask) to avoid dependency on cpumask.h */ + if ((*cpumask_bits(new_mask) & 1) == 0) return -EINVAL; return 0; } @@ -2064,47 +2079,6 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock); __cond_resched_rwlock_write(lock); \ }) -#ifdef CONFIG_PREEMPT_DYNAMIC - -extern bool preempt_model_none(void); -extern bool preempt_model_voluntary(void); -extern bool preempt_model_full(void); - -#else - -static inline bool preempt_model_none(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_NONE); -} -static inline bool preempt_model_voluntary(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); -} -static inline bool preempt_model_full(void) -{ - return IS_ENABLED(CONFIG_PREEMPT); -} - -#endif - -static inline bool preempt_model_rt(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_RT); -} - -/* - * Does the preemption model allow non-cooperative preemption? - * - * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with - * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the - * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the - * PREEMPT_NONE model. - */ -static inline bool preempt_model_preemptible(void) -{ - return preempt_model_full() || preempt_model_rt(); -} - static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); @@ -2192,13 +2166,13 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); #ifdef CONFIG_MEM_ALLOC_PROFILING -static inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag) +static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag) { swap(current->alloc_tag, tag); return tag; } -static inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old) +static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old) { #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n"); diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h index 75303c126285..6a4a3cec4638 100644 --- a/include/linux/screen_info.h +++ b/include/linux/screen_info.h @@ -49,6 +49,16 @@ static inline u64 __screen_info_lfb_size(const struct screen_info *si, unsigned return lfb_size; } +static inline bool __screen_info_vbe_mode_nonvga(const struct screen_info *si) +{ + /* + * VESA modes typically run on VGA hardware. Set bit 5 signals that this + * is not the case. Drivers can then not make use of VGA resources. See + * Sec 4.4 of the VBE 2.0 spec. + */ + return si->vesa_attributes & BIT(5); +} + static inline unsigned int __screen_info_video_type(unsigned int type) { switch (type) { diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 8bd4fda6e027..2fb266ea69fa 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -7,7 +7,6 @@ #include <linux/string_helpers.h> #include <linux/bug.h> #include <linux/mutex.h> -#include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/fs.h> #include <linux/cred.h> diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 1c89611e0e06..0f2f50b8d28e 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -37,6 +37,7 @@ enum { SCIx_SH7705_SCIF_REGTYPE, SCIx_HSCIF_REGTYPE, SCIx_RZ_SCIFA_REGTYPE, + SCIx_RZV2H_SCIF_REGTYPE, SCIx_NR_REGTYPES, }; diff --git a/include/linux/sfp.h b/include/linux/sfp.h index a45da7eef9a2..b14be59550e3 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -284,6 +284,12 @@ enum { SFF8024_ID_QSFP_8438 = 0x0c, SFF8024_ID_QSFP_8436_8636 = 0x0d, SFF8024_ID_QSFP28_8636 = 0x11, + SFF8024_ID_QSFP_DD = 0x18, + SFF8024_ID_OSFP = 0x19, + SFF8024_ID_DSFP = 0x1B, + SFF8024_ID_QSFP_PLUS_CMIS = 0x1E, + SFF8024_ID_SFP_DD_CMIS = 0x1F, + SFF8024_ID_SFP_PLUS_CMIS = 0x20, SFF8024_ENCODING_UNSPEC = 0x00, SFF8024_ENCODING_8B10B = 0x01, diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 3fb18f7eb73e..1d06b1e5408a 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -113,12 +113,21 @@ int shmem_unuse(unsigned int type); #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, struct mm_struct *mm, unsigned long vm_flags); +unsigned long shmem_allowable_huge_orders(struct inode *inode, + struct vm_area_struct *vma, pgoff_t index, + bool global_huge); #else static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, struct mm_struct *mm, unsigned long vm_flags) { return false; } +static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, + struct vm_area_struct *vma, pgoff_t index, + bool global_huge) +{ + return 0; +} #endif #ifdef CONFIG_SHMEM diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1c2902eaebd3..29c3ea5b6e93 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -706,6 +706,13 @@ typedef unsigned int sk_buff_data_t; typedef unsigned char *sk_buff_data_t; #endif +enum skb_tstamp_type { + SKB_CLOCK_REALTIME, + SKB_CLOCK_MONOTONIC, + SKB_CLOCK_TAI, + __SKB_CLOCK_MAX = SKB_CLOCK_TAI, +}; + /** * DOC: Basic sk_buff geometry * @@ -823,10 +830,8 @@ typedef unsigned char *sk_buff_data_t; * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB * @slow_gro: state present at GRO time, slower prepare step required - * @mono_delivery_time: When set, skb->tstamp has the - * delivery_time in mono clock base (i.e. EDT). Otherwise, the - * skb->tstamp has the (rcv) timestamp at ingress and - * delivery_time at egress. + * @tstamp_type: When set, skb->tstamp has the + * delivery_time clock base of skb->tstamp. * @napi_id: id of the NAPI struct this skb came from * @sender_cpu: (aka @napi_id) source CPU in XPS * @alloc_cpu: CPU which did the skb allocation. @@ -954,7 +959,7 @@ struct sk_buff { /* private: */ __u8 __mono_tc_offset[0]; /* public: */ - __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */ + __u8 tstamp_type:2; /* See skb_tstamp_type */ #ifdef CONFIG_NET_XGRESS __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ __u8 tc_skip_classify:1; @@ -1084,15 +1089,16 @@ struct sk_buff { #endif #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset) -/* if you move tc_at_ingress or mono_delivery_time +/* if you move tc_at_ingress or tstamp_type * around, you also must adapt these constants. */ #ifdef __BIG_ENDIAN_BITFIELD -#define SKB_MONO_DELIVERY_TIME_MASK (1 << 7) -#define TC_AT_INGRESS_MASK (1 << 6) +#define SKB_TSTAMP_TYPE_MASK (3 << 6) +#define SKB_TSTAMP_TYPE_RSHIFT (6) +#define TC_AT_INGRESS_MASK (1 << 5) #else -#define SKB_MONO_DELIVERY_TIME_MASK (1 << 0) -#define TC_AT_INGRESS_MASK (1 << 1) +#define SKB_TSTAMP_TYPE_MASK (3) +#define TC_AT_INGRESS_MASK (1 << 2) #endif #define SKB_BF_MONO_TC_OFFSET offsetof(struct sk_buff, __mono_tc_offset) @@ -1245,8 +1251,14 @@ static inline bool skb_data_unref(const struct sk_buff *skb, return true; } -void __fix_address -kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason); +void __fix_address sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, + enum skb_drop_reason reason); + +static inline void +kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) +{ + sk_skb_reason_drop(NULL, skb, reason); +} /** * kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason @@ -1492,8 +1504,14 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) __skb_set_hash(skb, hash, true, is_l4); } -void __skb_get_hash(struct sk_buff *skb); -u32 __skb_get_hash_symmetric(const struct sk_buff *skb); +u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb); + +static inline u32 __skb_get_hash_symmetric(const struct sk_buff *skb) +{ + return __skb_get_hash_symmetric_net(NULL, skb); +} + +void __skb_get_hash_net(const struct net *net, struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); u32 __skb_get_poff(const struct sk_buff *skb, const void *data, const struct flow_keys_basic *keys, int hlen); @@ -1572,10 +1590,18 @@ void skb_flow_dissect_hash(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container); +static inline __u32 skb_get_hash_net(const struct net *net, struct sk_buff *skb) +{ + if (!skb->l4_hash && !skb->sw_hash) + __skb_get_hash_net(net, skb); + + return skb->hash; +} + static inline __u32 skb_get_hash(struct sk_buff *skb) { if (!skb->l4_hash && !skb->sw_hash) - __skb_get_hash(skb); + __skb_get_hash_net(NULL, skb); return skb->hash; } @@ -1677,6 +1703,9 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, struct iov_iter *from, size_t length); +int zerocopy_fill_skb_from_iter(struct sk_buff *skb, + struct iov_iter *from, size_t length); + static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) { @@ -3400,6 +3429,10 @@ static inline struct page *__dev_alloc_pages_noprof(gfp_t gfp_mask, } #define __dev_alloc_pages(...) alloc_hooks(__dev_alloc_pages_noprof(__VA_ARGS__)) +/* + * This specialized allocator has to be a macro for its allocations to be + * accounted separately (to have a separate alloc_tag). + */ #define dev_alloc_pages(_order) __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, _order) /** @@ -3416,6 +3449,10 @@ static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask) } #define __dev_alloc_page(...) alloc_hooks(__dev_alloc_page_noprof(__VA_ARGS__)) +/* + * This specialized allocator has to be a macro for its allocations to be + * accounted separately (to have a separate alloc_tag). + */ #define dev_alloc_page() dev_alloc_pages(0) /** @@ -4179,7 +4216,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb, static inline void __net_timestamp(struct sk_buff *skb) { skb->tstamp = ktime_get_real(); - skb->mono_delivery_time = 0; + skb->tstamp_type = SKB_CLOCK_REALTIME; } static inline ktime_t net_timedelta(ktime_t t) @@ -4188,10 +4225,36 @@ static inline ktime_t net_timedelta(ktime_t t) } static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, - bool mono) + u8 tstamp_type) { skb->tstamp = kt; - skb->mono_delivery_time = kt && mono; + + if (kt) + skb->tstamp_type = tstamp_type; + else + skb->tstamp_type = SKB_CLOCK_REALTIME; +} + +static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb, + ktime_t kt, clockid_t clockid) +{ + u8 tstamp_type = SKB_CLOCK_REALTIME; + + switch (clockid) { + case CLOCK_REALTIME: + break; + case CLOCK_MONOTONIC: + tstamp_type = SKB_CLOCK_MONOTONIC; + break; + case CLOCK_TAI: + tstamp_type = SKB_CLOCK_TAI; + break; + default: + WARN_ON_ONCE(1); + kt = 0; + } + + skb_set_delivery_time(skb, kt, tstamp_type); } DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); @@ -4201,8 +4264,8 @@ DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); */ static inline void skb_clear_delivery_time(struct sk_buff *skb) { - if (skb->mono_delivery_time) { - skb->mono_delivery_time = 0; + if (skb->tstamp_type) { + skb->tstamp_type = SKB_CLOCK_REALTIME; if (static_branch_unlikely(&netstamp_needed_key)) skb->tstamp = ktime_get_real(); else @@ -4212,7 +4275,7 @@ static inline void skb_clear_delivery_time(struct sk_buff *skb) static inline void skb_clear_tstamp(struct sk_buff *skb) { - if (skb->mono_delivery_time) + if (skb->tstamp_type) return; skb->tstamp = 0; @@ -4220,7 +4283,7 @@ static inline void skb_clear_tstamp(struct sk_buff *skb) static inline ktime_t skb_tstamp(const struct sk_buff *skb) { - if (skb->mono_delivery_time) + if (skb->tstamp_type) return 0; return skb->tstamp; @@ -4228,7 +4291,7 @@ static inline ktime_t skb_tstamp(const struct sk_buff *skb) static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond) { - if (!skb->mono_delivery_time && skb->tstamp) + if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp) return skb->tstamp; if (static_branch_unlikely(&netstamp_needed_key) || cond) diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h index 11f0a4063403..16c241a23472 100644 --- a/include/linux/skbuff_ref.h +++ b/include/linux/skbuff_ref.h @@ -32,13 +32,13 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) __skb_frag_ref(&skb_shinfo(skb)->frags[f]); } -bool napi_pp_put_page(struct page *page); +bool napi_pp_put_page(netmem_ref netmem); static inline void skb_page_unref(struct page *page, bool recycle) { #ifdef CONFIG_PAGE_POOL - if (recycle && napi_pp_put_page(page)) + if (recycle && napi_pp_put_page(page_to_netmem(page))) return; #endif put_page(page); diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index c9efda9df285..d9b03e0746e7 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -414,6 +414,11 @@ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg); +/* + * This specialized allocator has to be a macro for its allocations to be + * accounted separately (to have a separate alloc_tag). The typecast is + * intentional to enforce typesafety. + */ #define sk_psock_init_link() \ ((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link), \ GFP_ATOMIC | __GFP_NOWARN)) diff --git a/include/linux/slab.h b/include/linux/slab.h index 7247e217e21b..eb2bf4629157 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -41,7 +41,7 @@ enum _slab_flag_bits { #ifdef CONFIG_FAILSLAB _SLAB_FAILSLAB, #endif -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG _SLAB_ACCOUNT, #endif #ifdef CONFIG_KASAN_GENERIC @@ -171,7 +171,7 @@ enum _slab_flag_bits { # define SLAB_FAILSLAB __SLAB_FLAG_UNUSED #endif /* Account to memcg */ -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG # define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT) #else # define SLAB_ACCOUNT __SLAB_FLAG_UNUSED @@ -407,7 +407,7 @@ enum kmalloc_cache_type { #ifndef CONFIG_ZONE_DMA KMALLOC_DMA = KMALLOC_NORMAL, #endif -#ifndef CONFIG_MEMCG_KMEM +#ifndef CONFIG_MEMCG KMALLOC_CGROUP = KMALLOC_NORMAL, #endif KMALLOC_RANDOM_START = KMALLOC_NORMAL, @@ -420,14 +420,15 @@ enum kmalloc_cache_type { #ifdef CONFIG_ZONE_DMA KMALLOC_DMA, #endif -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG KMALLOC_CGROUP, #endif NR_KMALLOC_TYPES }; -extern struct kmem_cache * -kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; +typedef struct kmem_cache * kmem_buckets[KMALLOC_SHIFT_HIGH + 1]; + +extern kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES]; /* * Define gfp bits that should not be set for KMALLOC_NORMAL. @@ -435,7 +436,7 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; #define KMALLOC_NOT_NORMAL_BITS \ (__GFP_RECLAIMABLE | \ (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \ - (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0)) + (IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0)) extern unsigned long random_kmalloc_seed; @@ -463,7 +464,7 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigne */ if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA)) return KMALLOC_DMA; - if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE)) + if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE)) return KMALLOC_RECLAIM; else return KMALLOC_CGROUP; @@ -528,9 +529,6 @@ static_assert(PAGE_SHIFT <= 20); #include <linux/alloc_tag.h> -void *__kmalloc_noprof(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); -#define __kmalloc(...) alloc_hooks(__kmalloc_noprof(__VA_ARGS__)) - /** * kmem_cache_alloc - Allocate an object * @cachep: The cache to allocate from. @@ -551,6 +549,10 @@ void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, void kmem_cache_free(struct kmem_cache *s, void *objp); +kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, + void (*ctor)(void *)); + /* * Bulk allocation and freeing operations. These are accelerated in an * allocator specific way to avoid taking locks repeatedly or building @@ -568,31 +570,49 @@ static __always_inline void kfree_bulk(size_t size, void **p) kmem_cache_free_bulk(NULL, size, p); } -void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment - __alloc_size(1); -#define __kmalloc_node(...) alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__)) - void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment __malloc; #define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__)) -void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t flags, size_t size) - __assume_kmalloc_alignment __alloc_size(3); +/* + * These macros allow declaring a kmem_buckets * parameter alongside size, which + * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call + * sites don't have to pass NULL. + */ +#ifdef CONFIG_SLAB_BUCKETS +#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size), kmem_buckets *(_b) +#define PASS_BUCKET_PARAMS(_size, _b) (_size), (_b) +#define PASS_BUCKET_PARAM(_b) (_b) +#else +#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size) +#define PASS_BUCKET_PARAMS(_size, _b) (_size) +#define PASS_BUCKET_PARAM(_b) NULL +#endif + +/* + * The following functions are not to be used directly and are intended only + * for internal use from kmalloc() and kmalloc_node() + * with the exception of kunit tests + */ + +void *__kmalloc_noprof(size_t size, gfp_t flags) + __assume_kmalloc_alignment __alloc_size(1); -void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, - int node, size_t size) __assume_kmalloc_alignment - __alloc_size(4); -#define kmalloc_trace(...) alloc_hooks(kmalloc_trace_noprof(__VA_ARGS__)) +void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) + __assume_kmalloc_alignment __alloc_size(1); -#define kmalloc_node_trace(...) alloc_hooks(kmalloc_node_trace_noprof(__VA_ARGS__)) +void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size) + __assume_kmalloc_alignment __alloc_size(3); -void *kmalloc_large_noprof(size_t size, gfp_t flags) __assume_page_alignment - __alloc_size(1); -#define kmalloc_large(...) alloc_hooks(kmalloc_large_noprof(__VA_ARGS__)) +void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags, + int node, size_t size) + __assume_kmalloc_alignment __alloc_size(4); -void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_page_alignment - __alloc_size(1); -#define kmalloc_large_node(...) alloc_hooks(kmalloc_large_node_noprof(__VA_ARGS__)) +void *__kmalloc_large_noprof(size_t size, gfp_t flags) + __assume_page_alignment __alloc_size(1); + +void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) + __assume_page_alignment __alloc_size(1); /** * kmalloc - allocate kernel memory @@ -604,7 +624,8 @@ void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_pag * * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN * bytes. For @size of power of two bytes, the alignment is also guaranteed - * to be at least to the size. + * to be at least to the size. For other sizes, the alignment is guaranteed to + * be at least the largest power-of-two divisor of @size. * * The @flags argument may be one of the GFP flags defined at * include/linux/gfp_types.h and described at @@ -654,10 +675,10 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f unsigned int index; if (size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large_noprof(size, flags); + return __kmalloc_large_noprof(size, flags); index = kmalloc_index(size); - return kmalloc_trace_noprof( + return __kmalloc_cache_noprof( kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], flags, size); } @@ -665,20 +686,26 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f } #define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__)) +#define kmem_buckets_alloc(_b, _size, _flags) \ + alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) + +#define kmem_buckets_alloc_track_caller(_b, _size, _flags) \ + alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE, _RET_IP_)) + static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && size) { unsigned int index; if (size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large_node_noprof(size, flags, node); + return __kmalloc_large_node_noprof(size, flags, node); index = kmalloc_index(size); - return kmalloc_node_trace_noprof( + return __kmalloc_cache_node_noprof( kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], flags, node, size); } - return __kmalloc_node_noprof(size, flags, node); + return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node); } #define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__)) @@ -729,8 +756,10 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(voi */ #define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO) -void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node, - unsigned long caller) __alloc_size(1); +void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node, + unsigned long caller) __alloc_size(1); +#define kmalloc_node_track_caller_noprof(size, flags, node, caller) \ + __kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller) #define kmalloc_node_track_caller(...) \ alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_)) @@ -756,7 +785,7 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_ return NULL; if (__builtin_constant_p(n) && __builtin_constant_p(size)) return kmalloc_node_noprof(bytes, flags, node); - return __kmalloc_node_noprof(bytes, flags, node); + return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node); } #define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__)) @@ -780,7 +809,9 @@ static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags) #define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__)) #define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node) -extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_size(1); +void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) __alloc_size(1); +#define kvmalloc_node_noprof(size, flags, node) \ + __kvmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node) #define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__)) #define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) @@ -788,6 +819,8 @@ extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_si #define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO) #define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node) +#define kmem_buckets_valloc(_b, _size, _flags) \ + alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) static inline __alloc_size(1, 2) void * kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node) diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h index 8c9ca857ccf6..c06d17599ae7 100644 --- a/include/linux/soc/apple/rtkit.h +++ b/include/linux/soc/apple/rtkit.h @@ -69,7 +69,7 @@ struct apple_rtkit; * Initializes the internal state required to handle RTKit. This * should usually be called within _probe. * - * @dev: Pointer to the device node this coprocessor is assocated with + * @dev: Pointer to the device node this coprocessor is associated with * @cookie: opaque cookie passed to all functions defined in rtkit_ops * @mbox_name: mailbox name used to communicate with the co-processor * @mbox_idx: mailbox index to be used if mbox_name is NULL @@ -83,7 +83,7 @@ struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie, * Non-devm version of devm_apple_rtkit_init. Must be freed with * apple_rtkit_free. * - * @dev: Pointer to the device node this coprocessor is assocated with + * @dev: Pointer to the device node this coprocessor is associated with * @cookie: opaque cookie passed to all functions defined in rtkit_ops * @mbox_name: mailbox name used to communicate with the co-processor * @mbox_idx: mailbox index to be used if mbox_name is NULL diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index d4a8e34505e6..5bee6f7fc400 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -25,6 +25,31 @@ struct cmdq_pkt; +enum cmdq_logic_op { + CMDQ_LOGIC_ASSIGN = 0, + CMDQ_LOGIC_ADD = 1, + CMDQ_LOGIC_SUBTRACT = 2, + CMDQ_LOGIC_MULTIPLY = 3, + CMDQ_LOGIC_XOR = 8, + CMDQ_LOGIC_NOT = 9, + CMDQ_LOGIC_OR = 10, + CMDQ_LOGIC_AND = 11, + CMDQ_LOGIC_LEFT_SHIFT = 12, + CMDQ_LOGIC_RIGHT_SHIFT = 13, + CMDQ_LOGIC_MAX, +}; + +struct cmdq_operand { + /* register type */ + bool reg; + union { + /* index */ + u16 idx; + /* value */ + u16 value; + }; +}; + struct cmdq_client_reg { u8 subsys; u16 offset; @@ -273,6 +298,23 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value, u32 mask); /** + * cmdq_pkt_logic_command() - Append logic command to the CMDQ packet, ask GCE to + * execute an instruction that store the result of logic operation + * with left and right operand into result_reg_idx. + * @pkt: the CMDQ packet + * @result_reg_idx: SPR index that store operation result of left_operand and right_operand + * @left_operand: left operand + * @s_op: the logic operator enum + * @right_operand: right operand + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, u16 result_reg_idx, + struct cmdq_operand *left_operand, + enum cmdq_logic_op s_op, + struct cmdq_operand *right_operand); + +/** * cmdq_pkt_assign() - Append logic assign command to the CMDQ packet, ask GCE * to execute an instruction that set a constant value into * internal register and use as value, mask or address in diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 1a886666bbb6..9e9f528b1370 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -115,7 +115,8 @@ struct llcc_edac_reg_offset { /** * struct llcc_drv_data - Data associated with the llcc driver * @regmaps: regmaps associated with the llcc device - * @bcast_regmap: regmap associated with llcc broadcast offset + * @bcast_regmap: regmap associated with llcc broadcast OR offset + * @bcast_and_regmap: regmap associated with llcc broadcast AND offset * @cfg: pointer to the data structure for slice configuration * @edac_reg_offset: Offset of the LLCC EDAC registers * @lock: mutex associated with each slice @@ -129,6 +130,7 @@ struct llcc_edac_reg_offset { struct llcc_drv_data { struct regmap **regmaps; struct regmap *bcast_regmap; + struct regmap *bcast_and_regmap; const struct llcc_slice_config *cfg; const struct llcc_edac_reg_offset *edac_reg_offset; struct mutex lock; diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h index a36a3b9d4929..0943bf419e11 100644 --- a/include/linux/soc/qcom/smem.h +++ b/include/linux/soc/qcom/smem.h @@ -13,5 +13,6 @@ int qcom_smem_get_free_space(unsigned host); phys_addr_t qcom_smem_virt_to_phys(void *p); int qcom_smem_get_soc_id(u32 *id); +int qcom_smem_get_feature_code(u32 *code); #endif diff --git a/include/linux/soc/qcom/socinfo.h b/include/linux/soc/qcom/socinfo.h index e78777bb0f4a..608950443eee 100644 --- a/include/linux/soc/qcom/socinfo.h +++ b/include/linux/soc/qcom/socinfo.h @@ -3,6 +3,8 @@ #ifndef __QCOM_SOCINFO_H__ #define __QCOM_SOCINFO_H__ +#include <linux/types.h> + /* * SMEM item id, used to acquire handles to respective * SMEM region. @@ -12,6 +14,14 @@ #define SMEM_SOCINFO_BUILD_ID_LENGTH 32 #define SMEM_SOCINFO_CHIP_ID_LENGTH 32 +/* + * SoC version type with major number in the upper 16 bits and minor + * number in the lower 16 bits. + */ +#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff) +#define SOCINFO_MINOR(ver) ((ver) & 0xffff) +#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff)) + /* Socinfo SMEM item structure */ struct socinfo { __le32 fmt; @@ -74,4 +84,28 @@ struct socinfo { __le32 boot_core; }; +/* Internal feature codes */ +enum qcom_socinfo_feature_code { + /* External feature codes */ + SOCINFO_FC_UNKNOWN = 0x0, + SOCINFO_FC_AA, + SOCINFO_FC_AB, + SOCINFO_FC_AC, + SOCINFO_FC_AD, + SOCINFO_FC_AE, + SOCINFO_FC_AF, + SOCINFO_FC_AG, + SOCINFO_FC_AH, +}; + +/* Internal feature codes */ +/* Valid values: 0 <= n <= 0xf */ +#define SOCINFO_FC_Yn(n) (0xf1 + (n)) +#define SOCINFO_FC_INT_MAX SOCINFO_FC_Yn(0xf) + +/* Product codes */ +#define SOCINFO_PC_UNKNOWN 0 +#define SOCINFO_PCn(n) ((n) + 1) +#define SOCINFO_PC_RESERVE (BIT(31) - 1) + #endif diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index aa840ed043e1..f411c176536d 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -657,4 +657,8 @@ #define EXYNOS5433_PAD_RETENTION_UFS_OPTION (0x3268) #define EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION (0x32A8) +/* For Tensor GS101 */ +#define GS101_SYSIP_DAT0 (0x810) +#define GS101_SYSTEM_CONFIGURATION (0x3A00) + #endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */ diff --git a/include/linux/socket.h b/include/linux/socket.h index 89d16b90370b..df9cdb8bbfb8 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -76,7 +76,7 @@ struct msghdr { __kernel_size_t msg_controllen; /* ancillary data buffer length */ struct kiocb *msg_iocb; /* ptr to iocb for async requests */ struct ubuf_info *msg_ubuf; - int (*sg_from_iter)(struct sock *sk, struct sk_buff *skb, + int (*sg_from_iter)(struct sk_buff *skb, struct iov_iter *from, size_t length); }; @@ -442,11 +442,14 @@ extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, extern int __sys_socket(int family, int type, int protocol); extern struct file *__sys_socket_file(int family, int type, int protocol); extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen); +extern int __sys_bind_socket(struct socket *sock, struct sockaddr_storage *address, + int addrlen); extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr, int addrlen, int file_flags); extern int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen); extern int __sys_listen(int fd, int backlog); +extern int __sys_listen_socket(struct socket *sock, int backlog); extern int __sys_getsockname(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len); extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 98fdef6e28f2..d7a16e0adf44 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -447,7 +447,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @cur_msg_need_completion: Flag used internally to opportunistically skip * the @cur_msg_completion. This flag is used to signal the context that * is running spi_finalize_current_message() that it needs to complete() - * @cur_msg_mapped: message has been mapped for DMA * @fallback: fallback to PIO if DMA transfer return failure with * SPI_TRANS_FAIL_NO_START. * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs. @@ -533,6 +532,9 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @queue_empty: signal green light for opportunistically skipping the queue * for spi_sync transfers. * @must_async: disable all fast paths in the core + * @defer_optimize_message: set to true if controller cannot pre-optimize messages + * and needs to defer the optimization step until the message is actually + * being transferred * * Each SPI controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals @@ -708,7 +710,6 @@ struct spi_controller { bool running; bool rt; bool auto_runtime_pm; - bool cur_msg_mapped; bool fallback; bool last_cs_mode_high; s8 last_cs[SPI_CS_CNT_MAX]; @@ -776,6 +777,7 @@ struct spi_controller { /* Flag for enabling opportunistic skipping of the queue in spi_sync */ bool queue_empty; bool must_async; + bool defer_optimize_message; }; static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) @@ -981,6 +983,8 @@ struct spi_res { * transfer this transfer. Set to 0 if the SPI bus driver does * not support it. * @transfer_list: transfers are sequenced through @spi_message.transfers + * @tx_sg_mapped: If true, the @tx_sg is mapped for DMA + * @rx_sg_mapped: If true, the @rx_sg is mapped for DMA * @tx_sg: Scatterlist for transmit, currently not for client use * @rx_sg: Scatterlist for receive, currently not for client use * @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset @@ -1077,10 +1081,13 @@ struct spi_transfer { #define SPI_TRANS_FAIL_IO BIT(1) u16 error; - dma_addr_t tx_dma; - dma_addr_t rx_dma; + bool tx_sg_mapped; + bool rx_sg_mapped; + struct sg_table tx_sg; struct sg_table rx_sg; + dma_addr_t tx_dma; + dma_addr_t rx_dma; unsigned dummy_data:1; unsigned cs_off:1; @@ -1269,6 +1276,8 @@ static inline void spi_message_free(struct spi_message *m) extern int spi_optimize_message(struct spi_device *spi, struct spi_message *msg); extern void spi_unoptimize_message(struct spi_message *msg); +extern int devm_spi_optimize_message(struct device *dev, struct spi_device *spi, + struct spi_message *msg); extern int spi_setup(struct spi_device *spi); extern int spi_async(struct spi_device *spi, struct spi_message *message); diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index b930eca2ef7b..d4cb83195f7a 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h @@ -4,6 +4,8 @@ #include <linux/workqueue.h> +typedef u32 (*spi_bb_txrx_word_fn)(struct spi_device *, unsigned int, u32, u8, unsigned int); + struct spi_bitbang { struct mutex lock; u8 busy; @@ -28,9 +30,8 @@ struct spi_bitbang { int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); /* txrx_word[SPI_MODE_*]() just looks like a shift register */ - u32 (*txrx_word[4])(struct spi_device *spi, - unsigned nsecs, - u32 word, u8 bits, unsigned flags); + spi_bb_txrx_word_fn txrx_word[SPI_MODE_X_MASK + 1]; + int (*set_line_direction)(struct spi_device *spi, bool output); }; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 3fcd20de6ca8..63dd8cf3c3c2 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -462,11 +462,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock) */ static inline int spin_needbreak(spinlock_t *lock) { -#ifdef CONFIG_PREEMPTION + if (!preempt_model_preemptible()) + return 0; + return spin_is_contended(lock); -#else - return 0; -#endif } /* @@ -479,11 +478,10 @@ static inline int spin_needbreak(spinlock_t *lock) */ static inline int rwlock_needbreak(rwlock_t *lock) { -#ifdef CONFIG_PREEMPTION + if (!preempt_model_preemptible()) + return 0; + return rwlock_is_contended(lock); -#else - return 0; -#endif } /* diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 236610e4a8fa..835bbb2d1f88 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -57,10 +57,45 @@ void cleanup_srcu_struct(struct srcu_struct *ssp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); void synchronize_srcu(struct srcu_struct *ssp); + +#define SRCU_GET_STATE_COMPLETED 0x1 + +/** + * get_completed_synchronize_srcu - Return a pre-completed polled state cookie + * + * Returns a value that poll_state_synchronize_srcu() will always treat + * as a cookie whose grace period has already completed. + */ +static inline unsigned long get_completed_synchronize_srcu(void) +{ + return SRCU_GET_STATE_COMPLETED; +} + unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); +// Maximum number of unsigned long values corresponding to +// not-yet-completed SRCU grace periods. +#define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2 + +/** + * same_state_synchronize_srcu - Are two old-state values identical? + * @oldstate1: First old-state value. + * @oldstate2: Second old-state value. + * + * The two old-state values must have been obtained from either + * get_state_synchronize_srcu(), start_poll_synchronize_srcu(), or + * get_completed_synchronize_srcu(). Returns @true if the two values are + * identical and @false otherwise. This allows structures whose lifetimes + * are tracked by old-state values to push these values to a list header, + * allowing those structures to be slightly smaller. + */ +static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned long oldstate2) +{ + return oldstate1 == oldstate2; +} + #ifdef CONFIG_NEED_SRCU_NMI_SAFE int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp); void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp); @@ -343,6 +378,20 @@ static inline void smp_mb__after_srcu_read_unlock(void) /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ } +/** + * smp_mb__after_srcu_read_lock - ensure full ordering after srcu_read_lock + * + * Converts the preceding srcu_read_lock into a two-way memory barrier. + * + * Call this after srcu_read_lock, to guarantee that all memory operations + * that occur after smp_mb__after_srcu_read_lock will appear to happen after + * the preceding srcu_read_lock. + */ +static inline void smp_mb__after_srcu_read_lock(void) +{ + /* __srcu_read_lock has smp_mb() internally so nothing to do here. */ +} + DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct, _T->idx = srcu_read_lock(_T->lock), srcu_read_unlock(_T->lock, _T->idx), diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index f92c195c76ed..84e13bd5df28 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -13,7 +13,7 @@ #define __STMMAC_PLATFORM_DATA #include <linux/platform_device.h> -#include <linux/phy.h> +#include <linux/phylink.h> #define MTL_MAX_RX_QUEUES 8 #define MTL_MAX_TX_QUEUES 8 @@ -82,8 +82,8 @@ struct stmmac_priv; struct stmmac_mdio_bus_data { unsigned int phy_mask; - unsigned int has_xpcs; - unsigned int xpcs_an_inband; + unsigned int pcs_mask; + unsigned int default_an_inband; int *irqs; int probed_phy_irq; bool needs_reset; @@ -271,6 +271,8 @@ struct plat_stmmacenet_data { void (*dump_debug_regs)(void *priv); int (*pcs_init)(struct stmmac_priv *priv); void (*pcs_exit)(struct stmmac_priv *priv); + struct phylink_pcs *(*select_pcs)(struct stmmac_priv *priv, + phy_interface_t interface); void *bsp_priv; struct clk *stmmac_clk; struct clk *pclk; diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index ea7a74ea7389..3132262a404d 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -3,7 +3,7 @@ #define _LINUX_STOP_MACHINE #include <linux/cpu.h> -#include <linux/cpumask.h> +#include <linux/cpumask_types.h> #include <linux/smp.h> #include <linux/list.h> diff --git a/include/linux/sunrpc/rdma_rn.h b/include/linux/sunrpc/rdma_rn.h new file mode 100644 index 000000000000..7d032ca057af --- /dev/null +++ b/include/linux/sunrpc/rdma_rn.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * * Copyright (c) 2024, Oracle and/or its affiliates. + */ + +#ifndef _LINUX_SUNRPC_RDMA_RN_H +#define _LINUX_SUNRPC_RDMA_RN_H + +#include <rdma/ib_verbs.h> + +/** + * rpcrdma_notification - request removal notification + */ +struct rpcrdma_notification { + void (*rn_done)(struct rpcrdma_notification *rn); + u32 rn_index; +}; + +int rpcrdma_rn_register(struct ib_device *device, + struct rpcrdma_notification *rn, + void (*done)(struct rpcrdma_notification *rn)); +void rpcrdma_rn_unregister(struct ib_device *device, + struct rpcrdma_notification *rn); +int rpcrdma_ib_client_register(void); +void rpcrdma_ib_client_unregister(void); + +#endif /* _LINUX_SUNRPC_RDMA_RN_H */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 23617da0e565..a7d0406b9ef5 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -85,6 +85,7 @@ struct svc_serv { char * sv_name; /* service name */ unsigned int sv_nrpools; /* number of thread pools */ + bool sv_is_pooled; /* is this a pooled service? */ struct svc_pool * sv_pools; /* array of thread pools */ int (*sv_threadfn)(void *data); @@ -398,6 +399,8 @@ struct svc_procedure { /* * Function prototypes. */ +int sunrpc_set_pool_mode(const char *val); +int sunrpc_get_pool_mode(char *val, size_t size); int svc_rpcb_setup(struct svc_serv *serv, struct net *net); void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); int svc_bind(struct svc_serv *serv, struct net *net); diff --git a/include/linux/swap.h b/include/linux/swap.h index bd450023b9a4..ba7ea95d1c57 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -354,7 +354,8 @@ static inline swp_entry_t page_swap_entry(struct page *page) } /* linux/mm/workingset.c */ -bool workingset_test_recent(void *shadow, bool file, bool *workingset); +bool workingset_test_recent(void *shadow, bool file, bool *workingset, + bool flush); void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); void workingset_refault(struct folio *folio, void *shadow); @@ -404,10 +405,13 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) #define MEMCG_RECLAIM_PROACTIVE (1 << 2) +#define MIN_SWAPPINESS 0 +#define MAX_SWAPPINESS 200 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, - unsigned int reclaim_options); + unsigned int reclaim_options, + int *swappiness); extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, @@ -477,7 +481,7 @@ extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); -extern void swap_free(swp_entry_t); +extern void swap_free_nr(swp_entry_t entry, int nr_pages); extern void swapcache_free_entries(swp_entry_t *entries, int n); extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); int swap_type_of(dev_t device, sector_t offset); @@ -555,7 +559,7 @@ static inline int swapcache_prepare(swp_entry_t swp) return 0; } -static inline void swap_free(swp_entry_t swp) +static inline void swap_free_nr(swp_entry_t entry, int nr_pages) { } @@ -603,6 +607,11 @@ static inline void free_swap_and_cache(swp_entry_t entry) free_swap_and_cache_nr(entry, 1); } +static inline void swap_free(swp_entry_t entry) +{ + swap_free_nr(entry, 1); +} + #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { diff --git a/include/linux/swapops.h b/include/linux/swapops.h index a5c560a2f8c2..cb468e418ea1 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -334,7 +334,7 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry) extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); -extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte); +extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte); #else /* CONFIG_MIGRATION */ static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) { @@ -359,7 +359,7 @@ static inline int is_migration_entry(swp_entry_t swp) static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } static inline void migration_entry_wait_huge(struct vm_area_struct *vma, - pte_t *pte) { } + unsigned long addr, pte_t *pte) { } static inline int is_writable_migration_entry(swp_entry_t entry) { return 0; diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 14bc10c1bb23..3dae0f592063 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -42,24 +42,6 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask, int (*remap)(void *tlb, unsigned long nslabs)); extern void __init swiotlb_update_mem_attributes(void); -phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, - size_t mapping_size, - unsigned int alloc_aligned_mask, enum dma_data_direction dir, - unsigned long attrs); - -extern void swiotlb_tbl_unmap_single(struct device *hwdev, - phys_addr_t tlb_addr, - size_t mapping_size, - enum dma_data_direction dir, - unsigned long attrs); - -void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir); -void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir); -dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, - size_t size, enum dma_data_direction dir, unsigned long attrs); - #ifdef CONFIG_SWIOTLB /** @@ -143,37 +125,27 @@ struct io_tlb_mem { #endif }; -#ifdef CONFIG_SWIOTLB_DYNAMIC - -struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr); - -#else - -static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev, - phys_addr_t paddr) -{ - return &dev->dma_io_tlb_mem->defpool; -} - -#endif +struct io_tlb_pool *__swiotlb_find_pool(struct device *dev, phys_addr_t paddr); /** - * is_swiotlb_buffer() - check if a physical address belongs to a swiotlb + * swiotlb_find_pool() - find swiotlb pool to which a physical address belongs * @dev: Device which has mapped the buffer. * @paddr: Physical address within the DMA buffer. * - * Check if @paddr points into a bounce buffer. + * Find the swiotlb pool that @paddr points into. * * Return: - * * %true if @paddr points into a bounce buffer - * * %false otherwise + * * pool address if @paddr points into a bounce buffer + * * NULL if @paddr does not point into a bounce buffer. As such, this function + * can be used to determine if @paddr denotes a swiotlb bounce buffer. */ -static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) +static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev, + phys_addr_t paddr) { struct io_tlb_mem *mem = dev->dma_io_tlb_mem; if (!mem) - return false; + return NULL; #ifdef CONFIG_SWIOTLB_DYNAMIC /* @@ -182,16 +154,19 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) * If a SWIOTLB address is checked on another CPU, then it was * presumably loaded by the device driver from an unspecified private * data structure. Make sure that this load is ordered before reading - * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool(). + * dev->dma_uses_io_tlb here and mem->pools in __swiotlb_find_pool(). * * This barrier pairs with smp_mb() in swiotlb_find_slots(). */ smp_rmb(); - return READ_ONCE(dev->dma_uses_io_tlb) && - swiotlb_find_pool(dev, paddr); + if (READ_ONCE(dev->dma_uses_io_tlb)) + return __swiotlb_find_pool(dev, paddr); #else - return paddr >= mem->defpool.start && paddr < mem->defpool.end; + if (paddr >= mem->defpool.start && paddr < mem->defpool.end) + return &mem->defpool; #endif + + return NULL; } static inline bool is_swiotlb_force_bounce(struct device *dev) @@ -219,9 +194,10 @@ static inline void swiotlb_dev_init(struct device *dev) { } -static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) +static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev, + phys_addr_t paddr) { - return false; + return NULL; } static inline bool is_swiotlb_force_bounce(struct device *dev) { @@ -260,6 +236,49 @@ static inline phys_addr_t default_swiotlb_limit(void) } #endif /* CONFIG_SWIOTLB */ +phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, + size_t mapping_size, unsigned int alloc_aligned_mask, + enum dma_data_direction dir, unsigned long attrs); +dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs); + +void __swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, + size_t mapping_size, enum dma_data_direction dir, + unsigned long attrs, struct io_tlb_pool *pool); +static inline void swiotlb_tbl_unmap_single(struct device *dev, + phys_addr_t addr, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr); + + if (unlikely(pool)) + __swiotlb_tbl_unmap_single(dev, addr, size, dir, attrs, pool); +} + +void __swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, + struct io_tlb_pool *pool); +static inline void swiotlb_sync_single_for_device(struct device *dev, + phys_addr_t addr, size_t size, enum dma_data_direction dir) +{ + struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr); + + if (unlikely(pool)) + __swiotlb_sync_single_for_device(dev, addr, size, dir, pool); +} + +void __swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, + struct io_tlb_pool *pool); +static inline void swiotlb_sync_single_for_cpu(struct device *dev, + phys_addr_t addr, size_t size, enum dma_data_direction dir) +{ + struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr); + + if (unlikely(pool)) + __swiotlb_sync_single_for_cpu(dev, addr, size, dir, pool); +} + extern void swiotlb_print_info(void); #ifdef CONFIG_DMA_RESTRICTED_POOL diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h index 8d8fac1626bd..cdb58d61c152 100644 --- a/include/linux/switchtec.h +++ b/include/linux/switchtec.h @@ -521,6 +521,6 @@ static inline struct switchtec_dev *to_stdev(struct device *dev) return container_of(dev, struct switchtec_dev, dev); } -extern struct class *switchtec_class; +extern const struct class switchtec_class; #endif diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 63424af87bba..4bcf6754738d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -859,9 +859,15 @@ asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource, const struct rlimit64 __user *new_rlim, struct rlimit64 __user *old_rlim); asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags); +#if defined(CONFIG_ARCH_SPLIT_ARG64) +asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, + unsigned int mask_1, unsigned int mask_2, + int dfd, const char __user * pathname); +#else asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, u64 mask, int fd, const char __user *pathname); +#endif asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name, struct file_handle __user *handle, int __user *mnt_id, int flag); @@ -973,6 +979,8 @@ asmlinkage long sys_lsm_list_modules(u64 __user *ids, u32 __user *size, u32 flag /* x86 */ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); +asmlinkage long sys_uretprobe(void); + /* pciconfig: alpha, arm, arm64, ia64, sparc */ asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 09db2f2e6488..54fbec062772 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -237,7 +237,7 @@ extern struct ctl_table_header *register_sysctl_mount_point(const char *path); void do_sysctl_args(void); bool sysctl_is_alias(char *param); -int do_proc_douintvec(struct ctl_table *table, int write, +int do_proc_douintvec(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos, int (*conv)(unsigned long *lvalp, unsigned int *valp, diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index 1773610010eb..2c59fe3efcd4 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -39,8 +39,11 @@ struct t10_pi_tuple { static inline u32 t10_pi_ref_tag(struct request *rq) { - unsigned int shift = rq->q->limits.integrity.interval_exp; + unsigned int shift = ilog2(queue_logical_block_size(rq->q)); + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && + rq->q->limits.integrity.interval_exp) + shift = rq->q->limits.integrity.interval_exp; return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; } @@ -61,8 +64,11 @@ static inline u64 lower_48_bits(u64 n) static inline u64 ext_pi_ref_tag(struct request *rq) { - unsigned int shift = rq->q->limits.integrity.interval_exp; + unsigned int shift = ilog2(queue_logical_block_size(rq->q)); + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && + rq->q->limits.integrity.interval_exp) + shift = rq->q->limits.integrity.interval_exp; return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT)); } diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 795ef5a68429..cf5e7e891a77 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -18,6 +18,7 @@ enum task_work_notify_mode { TWA_RESUME, TWA_SIGNAL, TWA_SIGNAL_NO_IPI, + TWA_NMI_CURRENT, }; static inline bool task_work_pending(struct task_struct *task) @@ -30,7 +31,8 @@ int task_work_add(struct task_struct *task, struct callback_head *twork, struct callback_head *task_work_cancel_match(struct task_struct *task, bool (*match)(struct callback_head *, void *data), void *data); -struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); +struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t); +bool task_work_cancel(struct task_struct *task, struct callback_head *cb); void task_work_run(void); static inline void exit_task_work(struct task_struct *task) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index f1155c0439c4..25fbf960b474 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -79,6 +79,9 @@ struct thermal_trip { #define THERMAL_TRIP_FLAG_RW (THERMAL_TRIP_FLAG_RW_TEMP | \ THERMAL_TRIP_FLAG_RW_HYST) +#define THERMAL_TRIP_PRIV_TO_INT(_val_) (uintptr_t)(_val_) +#define THERMAL_INT_TO_TRIP_PRIV(_val_) (void *)(uintptr_t)(_val_) + struct thermal_zone_device; struct thermal_zone_device_ops { @@ -90,7 +93,8 @@ struct thermal_zone_device_ops { int (*set_trips) (struct thermal_zone_device *, int, int); int (*change_mode) (struct thermal_zone_device *, enum thermal_device_mode); - int (*set_trip_temp) (struct thermal_zone_device *, int, int); + int (*set_trip_temp) (struct thermal_zone_device *, + const struct thermal_trip *, int); int (*get_crit_temp) (struct thermal_zone_device *, int *); int (*set_emul_temp) (struct thermal_zone_device *, int); int (*get_trend) (struct thermal_zone_device *, @@ -198,8 +202,6 @@ static inline void devm_thermal_of_zone_unregister(struct device *dev, } #endif -int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, - struct thermal_trip *trip); int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, struct thermal_trip *trip); int for_each_thermal_trip(struct thermal_zone_device *tz, @@ -221,7 +223,8 @@ struct thermal_zone_device *thermal_zone_device_register_with_trips( int num_trips, void *devdata, const struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp, - int passive_delay, int polling_delay); + unsigned int passive_delay, + unsigned int polling_delay); struct thermal_zone_device *thermal_tripless_zone_device_register( const char *type, @@ -261,7 +264,7 @@ thermal_of_cooling_device_register(struct device_node *np, const char *, void *, struct thermal_cooling_device * devm_thermal_of_cooling_device_register(struct device *dev, struct device_node *np, - char *type, void *devdata, + const char *type, void *devdata, const struct thermal_cooling_device_ops *ops); void thermal_cooling_device_update(struct thermal_cooling_device *); void thermal_cooling_device_unregister(struct thermal_cooling_device *); @@ -269,6 +272,9 @@ struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name); int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); +bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev); int thermal_zone_device_enable(struct thermal_zone_device *tz); int thermal_zone_device_disable(struct thermal_zone_device *tz); @@ -305,7 +311,7 @@ thermal_of_cooling_device_register(struct device_node *np, static inline struct thermal_cooling_device * devm_thermal_of_cooling_device_register(struct device *dev, struct device_node *np, - char *type, void *devdata, + const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); diff --git a/include/linux/tick.h b/include/linux/tick.h index 4924a33700b7..72744638c5b0 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -139,7 +139,6 @@ extern void tick_nohz_irq_exit(void); extern bool tick_nohz_idle_got_tick(void); extern ktime_t tick_nohz_get_next_hrtimer(void); extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); -extern unsigned long tick_nohz_get_idle_calls(void); extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 0ea7823b7f31..fc12a9ba2c88 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -310,12 +310,18 @@ struct system_device_crosststamp { * timekeeping code to verify comparability of two cycle values. * The default ID, CSID_GENERIC, does not identify a specific * clocksource. + * @use_nsecs: @cycles is in nanoseconds. */ struct system_counterval_t { u64 cycles; enum clocksource_ids cs_id; + bool use_nsecs; }; +extern bool ktime_real_to_base_clock(ktime_t treal, + enum clocksource_ids base_id, u64 *cycles); +extern bool timekeeping_clocksource_has_base(enum clocksource_ids id); + /* * Get cross timestamp between system clock and device clock */ diff --git a/include/linux/torture.h b/include/linux/torture.h index 1541454da03e..c2e979f82f8d 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -14,7 +14,7 @@ #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/threads.h> -#include <linux/cpumask.h> +#include <linux/cpumask_types.h> #include <linux/seqlock.h> #include <linux/lockdep.h> #include <linux/completion.h> diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 21a67dc9efe8..e93ee8d936a9 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -490,9 +490,16 @@ static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle) { } #endif + +static inline struct tpm2_auth *tpm2_chip_auth(struct tpm_chip *chip) +{ #ifdef CONFIG_TCG_TPM2_HMAC + return chip->auth; +#else + return NULL; +#endif +} -int tpm2_start_auth_session(struct tpm_chip *chip); void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle, u8 *name); void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, @@ -504,9 +511,27 @@ static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip, u8 *passphrase, int passphraselen) { - tpm_buf_append_hmac_session(chip, buf, attributes, passphrase, - passphraselen); + struct tpm_header *head; + int offset; + + if (tpm2_chip_auth(chip)) { + tpm_buf_append_hmac_session(chip, buf, attributes, passphrase, passphraselen); + } else { + offset = buf->handles * 4 + TPM_HEADER_SIZE; + head = (struct tpm_header *)buf->data; + + /* + * If the only sessions are optional, the command tag must change to + * TPM2_ST_NO_SESSIONS. + */ + if (tpm_buf_length(buf) == offset) + head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); + } } + +#ifdef CONFIG_TCG_TPM2_HMAC + +int tpm2_start_auth_session(struct tpm_chip *chip); void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf); int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, int rc); @@ -521,56 +546,6 @@ static inline int tpm2_start_auth_session(struct tpm_chip *chip) static inline void tpm2_end_auth_session(struct tpm_chip *chip) { } -static inline void tpm_buf_append_name(struct tpm_chip *chip, - struct tpm_buf *buf, - u32 handle, u8 *name) -{ - tpm_buf_append_u32(buf, handle); - /* count the number of handles in the upper bits of flags */ - buf->handles++; -} -static inline void tpm_buf_append_hmac_session(struct tpm_chip *chip, - struct tpm_buf *buf, - u8 attributes, u8 *passphrase, - int passphraselen) -{ - /* offset tells us where the sessions area begins */ - int offset = buf->handles * 4 + TPM_HEADER_SIZE; - u32 len = 9 + passphraselen; - - if (tpm_buf_length(buf) != offset) { - /* not the first session so update the existing length */ - len += get_unaligned_be32(&buf->data[offset]); - put_unaligned_be32(len, &buf->data[offset]); - } else { - tpm_buf_append_u32(buf, len); - } - /* auth handle */ - tpm_buf_append_u32(buf, TPM2_RS_PW); - /* nonce */ - tpm_buf_append_u16(buf, 0); - /* attributes */ - tpm_buf_append_u8(buf, 0); - /* passphrase */ - tpm_buf_append_u16(buf, passphraselen); - tpm_buf_append(buf, passphrase, passphraselen); -} -static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip, - struct tpm_buf *buf, - u8 attributes, - u8 *passphrase, - int passphraselen) -{ - int offset = buf->handles * 4 + TPM_HEADER_SIZE; - struct tpm_header *head = (struct tpm_header *) buf->data; - - /* - * if the only sessions are optional, the command tag - * must change to TPM2_ST_NO_SESSIONS - */ - if (tpm_buf_length(buf) == offset) - head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); -} static inline void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) { diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h index 24ea8ac049b4..ae04054a1be3 100644 --- a/include/linux/trace_recursion.h +++ b/include/linux/trace_recursion.h @@ -44,35 +44,6 @@ enum { */ TRACE_IRQ_BIT, - /* Set if the function is in the set_graph_function file */ - TRACE_GRAPH_BIT, - - /* - * In the very unlikely case that an interrupt came in - * at a start of graph tracing, and we want to trace - * the function in that interrupt, the depth can be greater - * than zero, because of the preempted start of a previous - * trace. In an even more unlikely case, depth could be 2 - * if a softirq interrupted the start of graph tracing, - * followed by an interrupt preempting a start of graph - * tracing in the softirq, and depth can even be 3 - * if an NMI came in at the start of an interrupt function - * that preempted a softirq start of a function that - * preempted normal context!!!! Luckily, it can't be - * greater than 3, so the next two bits are a mask - * of what the depth is when we set TRACE_GRAPH_BIT - */ - - TRACE_GRAPH_DEPTH_START_BIT, - TRACE_GRAPH_DEPTH_END_BIT, - - /* - * To implement set_graph_notrace, if this bit is set, we ignore - * function graph tracing of called functions, until the return - * function is called to clear it. - */ - TRACE_GRAPH_NOTRACE_BIT, - /* Used to prevent recursion recording from recursing. */ TRACE_RECORD_RECURSION_BIT, }; @@ -81,16 +52,6 @@ enum { #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) -#define trace_recursion_depth() \ - (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3) -#define trace_recursion_set_depth(depth) \ - do { \ - current->trace_recursion &= \ - ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \ - current->trace_recursion |= \ - ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \ - } while (0) - #define TRACE_CONTEXT_BITS 4 #define TRACE_FTRACE_START TRACE_FTRACE_BIT diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 689b6d71590e..6be396bb4297 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -16,7 +16,6 @@ #include <linux/srcu.h> #include <linux/errno.h> #include <linux/types.h> -#include <linux/cpumask.h> #include <linux/rcupdate.h> #include <linux/tracepoint-defs.h> #include <linux/static_call.h> diff --git a/include/linux/tsm.h b/include/linux/tsm.h index de8324a2223c..11b0c525be30 100644 --- a/include/linux/tsm.h +++ b/include/linux/tsm.h @@ -4,6 +4,7 @@ #include <linux/sizes.h> #include <linux/types.h> +#include <linux/uuid.h> #define TSM_INBLOB_MAX 64 #define TSM_OUTBLOB_MAX SZ_32K @@ -19,11 +20,17 @@ * @privlevel: optional privilege level to associate with @outblob * @inblob_len: sizeof @inblob * @inblob: arbitrary input data + * @service_provider: optional name of where to obtain the tsm report blob + * @service_guid: optional service-provider service guid to attest + * @service_manifest_version: optional service-provider service manifest version requested */ struct tsm_desc { unsigned int privlevel; size_t inblob_len; u8 inblob[TSM_INBLOB_MAX]; + char *service_provider; + guid_t service_guid; + unsigned int service_manifest_version; }; /** @@ -33,6 +40,8 @@ struct tsm_desc { * @outblob: generated evidence to provider to the attestation agent * @auxblob_len: sizeof(@auxblob) * @auxblob: (optional) auxiliary data to the report (e.g. certificate data) + * @manifestblob_len: sizeof(@manifestblob) + * @manifestblob: (optional) manifest data associated with the report */ struct tsm_report { struct tsm_desc desc; @@ -40,6 +49,42 @@ struct tsm_report { u8 *outblob; size_t auxblob_len; u8 *auxblob; + size_t manifestblob_len; + u8 *manifestblob; +}; + +/** + * enum tsm_attr_index - index used to reference report attributes + * @TSM_REPORT_GENERATION: index of the report generation number attribute + * @TSM_REPORT_PROVIDER: index of the provider name attribute + * @TSM_REPORT_PRIVLEVEL: index of the desired privilege level attribute + * @TSM_REPORT_PRIVLEVEL_FLOOR: index of the minimum allowed privileg level attribute + * @TSM_REPORT_SERVICE_PROVIDER: index of the service provider identifier attribute + * @TSM_REPORT_SERVICE_GUID: index of the service GUID attribute + * @TSM_REPORT_SERVICE_MANIFEST_VER: index of the service manifest version attribute + */ +enum tsm_attr_index { + TSM_REPORT_GENERATION, + TSM_REPORT_PROVIDER, + TSM_REPORT_PRIVLEVEL, + TSM_REPORT_PRIVLEVEL_FLOOR, + TSM_REPORT_SERVICE_PROVIDER, + TSM_REPORT_SERVICE_GUID, + TSM_REPORT_SERVICE_MANIFEST_VER, +}; + +/** + * enum tsm_bin_attr_index - index used to reference binary report attributes + * @TSM_REPORT_INBLOB: index of the binary report input attribute + * @TSM_REPORT_OUTBLOB: index of the binary report output attribute + * @TSM_REPORT_AUXBLOB: index of the binary auxiliary data attribute + * @TSM_REPORT_MANIFESTBLOB: index of the binary manifest data attribute + */ +enum tsm_bin_attr_index { + TSM_REPORT_INBLOB, + TSM_REPORT_OUTBLOB, + TSM_REPORT_AUXBLOB, + TSM_REPORT_MANIFESTBLOB, }; /** @@ -48,22 +93,20 @@ struct tsm_report { * @privlevel_floor: convey base privlevel for nested scenarios * @report_new: Populate @report with the report blob and auxblob * (optional), return 0 on successful population, or -errno otherwise + * @report_attr_visible: show or hide a report attribute entry + * @report_bin_attr_visible: show or hide a report binary attribute entry * * Implementation specific ops, only one is expected to be registered at * a time i.e. only one of "sev-guest", "tdx-guest", etc. */ struct tsm_ops { const char *name; - const unsigned int privlevel_floor; + unsigned int privlevel_floor; int (*report_new)(struct tsm_report *report, void *data); + bool (*report_attr_visible)(int n); + bool (*report_bin_attr_visible)(int n); }; -extern const struct config_item_type tsm_report_default_type; - -/* publish @privlevel, @privlevel_floor, and @auxblob attributes */ -extern const struct config_item_type tsm_report_extra_type; - -int tsm_register(const struct tsm_ops *ops, void *priv, - const struct config_item_type *type); +int tsm_register(const struct tsm_ops *ops, void *priv); int tsm_unregister(const struct tsm_ops *ops); #endif /* __TSM_H */ diff --git a/include/linux/turris-omnia-mcu-interface.h b/include/linux/turris-omnia-mcu-interface.h new file mode 100644 index 000000000000..2da8cbeb158a --- /dev/null +++ b/include/linux/turris-omnia-mcu-interface.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CZ.NIC's Turris Omnia MCU I2C interface commands definitions + * + * 2024 by Marek BehĂșn <kabel@kernel.org> + */ + +#ifndef __TURRIS_OMNIA_MCU_INTERFACE_H +#define __TURRIS_OMNIA_MCU_INTERFACE_H + +#include <linux/bitfield.h> +#include <linux/bits.h> + +enum omnia_commands_e { + OMNIA_CMD_GET_STATUS_WORD = 0x01, /* slave sends status word back */ + OMNIA_CMD_GENERAL_CONTROL = 0x02, + OMNIA_CMD_LED_MODE = 0x03, /* default/user */ + OMNIA_CMD_LED_STATE = 0x04, /* LED on/off */ + OMNIA_CMD_LED_COLOR = 0x05, /* LED number + RED + GREEN + BLUE */ + OMNIA_CMD_USER_VOLTAGE = 0x06, + OMNIA_CMD_SET_BRIGHTNESS = 0x07, + OMNIA_CMD_GET_BRIGHTNESS = 0x08, + OMNIA_CMD_GET_RESET = 0x09, + OMNIA_CMD_GET_FW_VERSION_APP = 0x0A, /* 20B git hash number */ + OMNIA_CMD_SET_WATCHDOG_STATE = 0x0B, /* 0 - disable + * 1 - enable / ping + * after boot watchdog is started + * with 2 minutes timeout + */ + + /* OMNIA_CMD_WATCHDOG_STATUS = 0x0C, not implemented anymore */ + + OMNIA_CMD_GET_WATCHDOG_STATE = 0x0D, + OMNIA_CMD_GET_FW_VERSION_BOOT = 0x0E, /* 20B Git hash number */ + OMNIA_CMD_GET_FW_CHECKSUM = 0x0F, /* 4B length, 4B checksum */ + + /* available if FEATURES_SUPPORTED bit set in status word */ + OMNIA_CMD_GET_FEATURES = 0x10, + + /* available if EXT_CMD bit set in features */ + OMNIA_CMD_GET_EXT_STATUS_DWORD = 0x11, + OMNIA_CMD_EXT_CONTROL = 0x12, + OMNIA_CMD_GET_EXT_CONTROL_STATUS = 0x13, + + /* available if NEW_INT_API bit set in features */ + OMNIA_CMD_GET_INT_AND_CLEAR = 0x14, + OMNIA_CMD_GET_INT_MASK = 0x15, + OMNIA_CMD_SET_INT_MASK = 0x16, + + /* available if FLASHING bit set in features */ + OMNIA_CMD_FLASH = 0x19, + + /* available if WDT_PING bit set in features */ + OMNIA_CMD_SET_WDT_TIMEOUT = 0x20, + OMNIA_CMD_GET_WDT_TIMELEFT = 0x21, + + /* available if POWEROFF_WAKEUP bit set in features */ + OMNIA_CMD_SET_WAKEUP = 0x22, + OMNIA_CMD_GET_UPTIME_AND_WAKEUP = 0x23, + OMNIA_CMD_POWER_OFF = 0x24, + + /* available if USB_OVC_PROT_SETTING bit set in features */ + OMNIA_CMD_SET_USB_OVC_PROT = 0x25, + OMNIA_CMD_GET_USB_OVC_PROT = 0x26, + + /* available if TRNG bit set in features */ + OMNIA_CMD_TRNG_COLLECT_ENTROPY = 0x28, + + /* available if CRYPTO bit set in features */ + OMNIA_CMD_CRYPTO_GET_PUBLIC_KEY = 0x29, + OMNIA_CMD_CRYPTO_SIGN_MESSAGE = 0x2A, + OMNIA_CMD_CRYPTO_COLLECT_SIGNATURE = 0x2B, + + /* available if BOARD_INFO it set in features */ + OMNIA_CMD_BOARD_INFO_GET = 0x2C, + OMNIA_CMD_BOARD_INFO_BURN = 0x2D, + + /* available only at address 0x2b (LED-controller) */ + /* available only if LED_GAMMA_CORRECTION bit set in features */ + OMNIA_CMD_SET_GAMMA_CORRECTION = 0x30, + OMNIA_CMD_GET_GAMMA_CORRECTION = 0x31, + + /* available only at address 0x2b (LED-controller) */ + /* available only if PER_LED_CORRECTION bit set in features */ + /* available only if FROM_BIT_16_INVALID bit NOT set in features */ + OMNIA_CMD_SET_LED_CORRECTIONS = 0x32, + OMNIA_CMD_GET_LED_CORRECTIONS = 0x33, +}; + +enum omnia_flashing_commands_e { + OMNIA_FLASH_CMD_UNLOCK = 0x01, + OMNIA_FLASH_CMD_SIZE_AND_CSUM = 0x02, + OMNIA_FLASH_CMD_PROGRAM = 0x03, + OMNIA_FLASH_CMD_RESET = 0x04, +}; + +enum omnia_sts_word_e { + OMNIA_STS_MCU_TYPE_MASK = GENMASK(1, 0), + OMNIA_STS_MCU_TYPE_STM32 = FIELD_PREP_CONST(OMNIA_STS_MCU_TYPE_MASK, 0), + OMNIA_STS_MCU_TYPE_GD32 = FIELD_PREP_CONST(OMNIA_STS_MCU_TYPE_MASK, 1), + OMNIA_STS_MCU_TYPE_MKL = FIELD_PREP_CONST(OMNIA_STS_MCU_TYPE_MASK, 2), + OMNIA_STS_FEATURES_SUPPORTED = BIT(2), + OMNIA_STS_USER_REGULATOR_NOT_SUPPORTED = BIT(3), + OMNIA_STS_CARD_DET = BIT(4), + OMNIA_STS_MSATA_IND = BIT(5), + OMNIA_STS_USB30_OVC = BIT(6), + OMNIA_STS_USB31_OVC = BIT(7), + OMNIA_STS_USB30_PWRON = BIT(8), + OMNIA_STS_USB31_PWRON = BIT(9), + OMNIA_STS_ENABLE_4V5 = BIT(10), + OMNIA_STS_BUTTON_MODE = BIT(11), + OMNIA_STS_BUTTON_PRESSED = BIT(12), + OMNIA_STS_BUTTON_COUNTER_MASK = GENMASK(15, 13), +}; + +enum omnia_ctl_byte_e { + OMNIA_CTL_LIGHT_RST = BIT(0), + OMNIA_CTL_HARD_RST = BIT(1), + /* BIT(2) is currently reserved */ + OMNIA_CTL_USB30_PWRON = BIT(3), + OMNIA_CTL_USB31_PWRON = BIT(4), + OMNIA_CTL_ENABLE_4V5 = BIT(5), + OMNIA_CTL_BUTTON_MODE = BIT(6), + OMNIA_CTL_BOOTLOADER = BIT(7), +}; + +enum omnia_features_e { + OMNIA_FEAT_PERIPH_MCU = BIT(0), + OMNIA_FEAT_EXT_CMDS = BIT(1), + OMNIA_FEAT_WDT_PING = BIT(2), + OMNIA_FEAT_LED_STATE_EXT_MASK = GENMASK(4, 3), + OMNIA_FEAT_LED_STATE_EXT = FIELD_PREP_CONST(OMNIA_FEAT_LED_STATE_EXT_MASK, 1), + OMNIA_FEAT_LED_STATE_EXT_V32 = FIELD_PREP_CONST(OMNIA_FEAT_LED_STATE_EXT_MASK, 2), + OMNIA_FEAT_LED_GAMMA_CORRECTION = BIT(5), + OMNIA_FEAT_NEW_INT_API = BIT(6), + OMNIA_FEAT_BOOTLOADER = BIT(7), + OMNIA_FEAT_FLASHING = BIT(8), + OMNIA_FEAT_NEW_MESSAGE_API = BIT(9), + OMNIA_FEAT_BRIGHTNESS_INT = BIT(10), + OMNIA_FEAT_POWEROFF_WAKEUP = BIT(11), + OMNIA_FEAT_CAN_OLD_MESSAGE_API = BIT(12), + OMNIA_FEAT_TRNG = BIT(13), + OMNIA_FEAT_CRYPTO = BIT(14), + OMNIA_FEAT_BOARD_INFO = BIT(15), + + /* + * Orginally the features command replied only 16 bits. If more were + * read, either the I2C transaction failed or 0xff bytes were sent. + * Therefore to consider bits 16 - 31 valid, one bit (20) was reserved + * to be zero. + */ + + /* Bits 16 - 19 correspond to bits 0 - 3 of status word */ + OMNIA_FEAT_MCU_TYPE_MASK = GENMASK(17, 16), + OMNIA_FEAT_MCU_TYPE_STM32 = FIELD_PREP_CONST(OMNIA_FEAT_MCU_TYPE_MASK, 0), + OMNIA_FEAT_MCU_TYPE_GD32 = FIELD_PREP_CONST(OMNIA_FEAT_MCU_TYPE_MASK, 1), + OMNIA_FEAT_MCU_TYPE_MKL = FIELD_PREP_CONST(OMNIA_FEAT_MCU_TYPE_MASK, 2), + OMNIA_FEAT_FEATURES_SUPPORTED = BIT(18), + OMNIA_FEAT_USER_REGULATOR_NOT_SUPPORTED = BIT(19), + + /* must not be set */ + OMNIA_FEAT_FROM_BIT_16_INVALID = BIT(20), + + OMNIA_FEAT_PER_LED_CORRECTION = BIT(21), + OMNIA_FEAT_USB_OVC_PROT_SETTING = BIT(22), +}; + +enum omnia_ext_sts_dword_e { + OMNIA_EXT_STS_SFP_nDET = BIT(0), + OMNIA_EXT_STS_LED_STATES_MASK = GENMASK(31, 12), + OMNIA_EXT_STS_WLAN0_MSATA_LED = BIT(12), + OMNIA_EXT_STS_WLAN1_LED = BIT(13), + OMNIA_EXT_STS_WLAN2_LED = BIT(14), + OMNIA_EXT_STS_WPAN0_LED = BIT(15), + OMNIA_EXT_STS_WPAN1_LED = BIT(16), + OMNIA_EXT_STS_WPAN2_LED = BIT(17), + OMNIA_EXT_STS_WAN_LED0 = BIT(18), + OMNIA_EXT_STS_WAN_LED1 = BIT(19), + OMNIA_EXT_STS_LAN0_LED0 = BIT(20), + OMNIA_EXT_STS_LAN0_LED1 = BIT(21), + OMNIA_EXT_STS_LAN1_LED0 = BIT(22), + OMNIA_EXT_STS_LAN1_LED1 = BIT(23), + OMNIA_EXT_STS_LAN2_LED0 = BIT(24), + OMNIA_EXT_STS_LAN2_LED1 = BIT(25), + OMNIA_EXT_STS_LAN3_LED0 = BIT(26), + OMNIA_EXT_STS_LAN3_LED1 = BIT(27), + OMNIA_EXT_STS_LAN4_LED0 = BIT(28), + OMNIA_EXT_STS_LAN4_LED1 = BIT(29), + OMNIA_EXT_STS_LAN5_LED0 = BIT(30), + OMNIA_EXT_STS_LAN5_LED1 = BIT(31), +}; + +enum omnia_ext_ctl_e { + OMNIA_EXT_CTL_nRES_MMC = BIT(0), + OMNIA_EXT_CTL_nRES_LAN = BIT(1), + OMNIA_EXT_CTL_nRES_PHY = BIT(2), + OMNIA_EXT_CTL_nPERST0 = BIT(3), + OMNIA_EXT_CTL_nPERST1 = BIT(4), + OMNIA_EXT_CTL_nPERST2 = BIT(5), + OMNIA_EXT_CTL_PHY_SFP = BIT(6), + OMNIA_EXT_CTL_PHY_SFP_AUTO = BIT(7), + OMNIA_EXT_CTL_nVHV_CTRL = BIT(8), +}; + +enum omnia_int_e { + OMNIA_INT_CARD_DET = BIT(0), + OMNIA_INT_MSATA_IND = BIT(1), + OMNIA_INT_USB30_OVC = BIT(2), + OMNIA_INT_USB31_OVC = BIT(3), + OMNIA_INT_BUTTON_PRESSED = BIT(4), + OMNIA_INT_SFP_nDET = BIT(5), + OMNIA_INT_BRIGHTNESS_CHANGED = BIT(6), + OMNIA_INT_TRNG = BIT(7), + OMNIA_INT_MESSAGE_SIGNED = BIT(8), + + OMNIA_INT_LED_STATES_MASK = GENMASK(31, 12), + OMNIA_INT_WLAN0_MSATA_LED = BIT(12), + OMNIA_INT_WLAN1_LED = BIT(13), + OMNIA_INT_WLAN2_LED = BIT(14), + OMNIA_INT_WPAN0_LED = BIT(15), + OMNIA_INT_WPAN1_LED = BIT(16), + OMNIA_INT_WPAN2_LED = BIT(17), + OMNIA_INT_WAN_LED0 = BIT(18), + OMNIA_INT_WAN_LED1 = BIT(19), + OMNIA_INT_LAN0_LED0 = BIT(20), + OMNIA_INT_LAN0_LED1 = BIT(21), + OMNIA_INT_LAN1_LED0 = BIT(22), + OMNIA_INT_LAN1_LED1 = BIT(23), + OMNIA_INT_LAN2_LED0 = BIT(24), + OMNIA_INT_LAN2_LED1 = BIT(25), + OMNIA_INT_LAN3_LED0 = BIT(26), + OMNIA_INT_LAN3_LED1 = BIT(27), + OMNIA_INT_LAN4_LED0 = BIT(28), + OMNIA_INT_LAN4_LED1 = BIT(29), + OMNIA_INT_LAN5_LED0 = BIT(30), + OMNIA_INT_LAN5_LED1 = BIT(31), +}; + +enum omnia_cmd_poweroff_e { + OMNIA_CMD_POWER_OFF_POWERON_BUTTON = BIT(0), + OMNIA_CMD_POWER_OFF_MAGIC = 0xdead, +}; + +enum omnia_cmd_usb_ovc_prot_e { + OMNIA_CMD_xET_USB_OVC_PROT_PORT_MASK = GENMASK(3, 0), + OMNIA_CMD_xET_USB_OVC_PROT_ENABLE = BIT(4), +}; + +#endif /* __TURRIS_OMNIA_MCU_INTERFACE_H */ diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index f46e0ca0169c..b503fafb7fb3 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -138,6 +138,9 @@ extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check c extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len); +extern void uprobe_handle_trampoline(struct pt_regs *regs); +extern void *arch_uprobe_trampoline(unsigned long *psize); +extern unsigned long uprobe_get_trampoline_vaddr(void); #else /* !CONFIG_UPROBES */ struct uprobes_state { }; diff --git a/include/linux/usb.h b/include/linux/usb.h index 1913a13833f2..832997a9da0a 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1171,6 +1171,7 @@ extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf); * post_reset method is called. * @post_reset: Called by usb_reset_device() after the device * has been reset + * @shutdown: Called at shut-down time to quiesce the device. * @id_table: USB drivers use ID table to support hotplugging. * Export this with MODULE_DEVICE_TABLE(usb,...). This must be set * or your driver's probe function will never get called. @@ -1222,6 +1223,8 @@ struct usb_driver { int (*pre_reset)(struct usb_interface *intf); int (*post_reset)(struct usb_interface *intf); + void (*shutdown)(struct usb_interface *intf); + const struct usb_device_id *id_table; const struct attribute_group **dev_groups; diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 56dda8e1562d..df33333650a0 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -229,18 +229,18 @@ struct usb_ep { const char *name; const struct usb_ep_ops *ops; + const struct usb_endpoint_descriptor *desc; + const struct usb_ss_ep_comp_descriptor *comp_desc; struct list_head ep_list; struct usb_ep_caps caps; bool claimed; bool enabled; - unsigned maxpacket:16; - unsigned maxpacket_limit:16; - unsigned max_streams:16; unsigned mult:2; unsigned maxburst:5; u8 address; - const struct usb_endpoint_descriptor *desc; - const struct usb_ss_ep_comp_descriptor *comp_desc; + u16 maxpacket; + u16 maxpacket_limit; + u16 max_streams; }; /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h index 47a86b8a4a50..0ab39b6ea205 100644 --- a/include/linux/usb/tcpci.h +++ b/include/linux/usb/tcpci.h @@ -47,6 +47,9 @@ #define TCPC_SINK_FAST_ROLE_SWAP BIT(0) #define TCPC_CONFIG_STD_OUTPUT 0x18 +#define TCPC_CONFIG_STD_OUTPUT_ORIENTATION_MASK BIT(0) +#define TCPC_CONFIG_STD_OUTPUT_ORIENTATION_NORMAL 0 +#define TCPC_CONFIG_STD_OUTPUT_ORIENTATION_FLIPPED 1 #define TCPC_TCPC_CTRL 0x19 #define TCPC_TCPC_CTRL_ORIENTATION BIT(0) @@ -127,6 +130,7 @@ #define TCPC_DEV_CAP_2 0x26 #define TCPC_STD_INPUT_CAP 0x28 #define TCPC_STD_OUTPUT_CAP 0x29 +#define TCPC_STD_OUTPUT_CAP_ORIENTATION BIT(0) #define TCPC_MSG_HDR_INFO 0x2e #define TCPC_MSG_HDR_INFO_DATA_ROLE BIT(3) @@ -209,6 +213,9 @@ struct tcpci; * swap following Discover Identity on SOP' occurs. * Return true when the TCPM is allowed to request a Vconn swap * after Discovery Identity on SOP. + * @set_orientation: + * Optional; Enable setting the connector orientation + * CONFIG_STANDARD_OUTPUT (0x18) bit0. */ struct tcpci_data { struct regmap *regmap; @@ -216,6 +223,7 @@ struct tcpci_data { unsigned char auto_discharge_disconnect:1; unsigned char vbus_vsafe0v:1; unsigned char cable_comm_capable:1; + unsigned char set_orientation:1; int (*init)(struct tcpci *tcpci, struct tcpci_data *data); int (*set_vconn)(struct tcpci *tcpci, struct tcpci_data *data, diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index b35b427561ab..549275f8ac1b 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -167,6 +167,9 @@ struct typec_port *typec_altmode2port(struct typec_altmode *alt); void typec_altmode_update_active(struct typec_altmode *alt, bool active); +void typec_altmode_set_ops(struct typec_altmode *alt, + const struct typec_altmode_ops *ops); + enum typec_plug_index { TYPEC_PLUG_SOP_P, TYPEC_PLUG_SOP_PP, diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index f87067438ed4..fbb472dd99b3 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -137,23 +137,26 @@ bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt, loff_t *buf_offset, size_t *intersect_count, size_t *register_offset); -#define VFIO_IOWRITE_DECLATION(size) \ +#define VFIO_IOWRITE_DECLARATION(size) \ int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev, \ bool test_mem, u##size val, void __iomem *io); -VFIO_IOWRITE_DECLATION(8) -VFIO_IOWRITE_DECLATION(16) -VFIO_IOWRITE_DECLATION(32) +VFIO_IOWRITE_DECLARATION(8) +VFIO_IOWRITE_DECLARATION(16) +VFIO_IOWRITE_DECLARATION(32) #ifdef iowrite64 -VFIO_IOWRITE_DECLATION(64) +VFIO_IOWRITE_DECLARATION(64) #endif -#define VFIO_IOREAD_DECLATION(size) \ +#define VFIO_IOREAD_DECLARATION(size) \ int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev, \ bool test_mem, u##size *val, void __iomem *io); -VFIO_IOREAD_DECLATION(8) -VFIO_IOREAD_DECLATION(16) -VFIO_IOREAD_DECLATION(32) +VFIO_IOREAD_DECLARATION(8) +VFIO_IOREAD_DECLARATION(16) +VFIO_IOREAD_DECLARATION(32) +#ifdef ioread64 +VFIO_IOREAD_DECLARATION(64) +#endif #endif /* VFIO_PCI_CORE_H */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index da9b271b54db..ab4b9a3fef6b 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -19,6 +19,20 @@ struct virtio_shm_region { typedef void vq_callback_t(struct virtqueue *); /** + * struct virtqueue_info - Info for a virtqueue passed to find_vqs(). + * @name: virtqueue description. Used mainly for debugging, NULL for + * a virtqueue unused by the driver. + * @callback: A callback to invoke on a used buffer notification. + * NULL for a virtqueue that does not need a callback. + * @ctx: A flag to indicate to maintain an extra context per virtqueue. + */ +struct virtqueue_info { + const char *name; + vq_callback_t *callback; + bool ctx; +}; + +/** * struct virtio_config_ops - operations for configuring a virtio device * Note: Do not assume that a transport implements all of the operations * getting/setting a value as a simple read/write! Generally speaking, @@ -53,10 +67,7 @@ typedef void vq_callback_t(struct virtqueue *); * vdev: the virtio_device * nvqs: the number of virtqueues to find * vqs: on success, includes new virtqueues - * callbacks: array of callbacks, for each virtqueue - * include a NULL entry for vqs that do not need a callback - * names: array of virtqueue names (mainly for debugging) - * include a NULL entry for vqs unused by driver + * vqs_info: array of virtqueue info structures * Returns 0 on success or error status * @del_vqs: free virtqueues found by find_vqs(). * @synchronize_cbs: synchronize with the virtqueue callbacks (optional) @@ -105,9 +116,9 @@ struct virtio_config_ops { u8 (*get_status)(struct virtio_device *vdev); void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); - int (*find_vqs)(struct virtio_device *, unsigned nvqs, - struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], const bool *ctx, + int (*find_vqs)(struct virtio_device *vdev, unsigned int nvqs, + struct virtqueue *vqs[], + struct virtqueue_info vqs_info[], struct irq_affinity *desc); void (*del_vqs)(struct virtio_device *); void (*synchronize_cbs)(struct virtio_device *); @@ -117,7 +128,7 @@ struct virtio_config_ops { int (*set_vq_affinity)(struct virtqueue *vq, const struct cpumask *cpu_mask); const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, - int index); + int index); bool (*get_shm_region)(struct virtio_device *vdev, struct virtio_shm_region *region, u8 id); int (*disable_vq_and_reset)(struct virtqueue *vq); @@ -211,38 +222,29 @@ static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev) } static inline +int virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, + struct virtqueue *vqs[], + struct virtqueue_info vqs_info[], + struct irq_affinity *desc) +{ + return vdev->config->find_vqs(vdev, nvqs, vqs, vqs_info, desc); +} + +static inline struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *c, const char *n) { - vq_callback_t *callbacks[] = { c }; - const char *names[] = { n }; + struct virtqueue_info vqs_info[] = { + { n, c }, + }; struct virtqueue *vq; - int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL, - NULL); + int err = virtio_find_vqs(vdev, 1, &vq, vqs_info, NULL); + if (err < 0) return ERR_PTR(err); return vq; } -static inline -int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], - struct irq_affinity *desc) -{ - return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc); -} - -static inline -int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], const bool *ctx, - struct irq_affinity *desc) -{ - return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, - desc); -} - /** * virtio_synchronize_cbs - synchronize with virtqueue callbacks * @dev: the virtio device diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 4dfa9b69ca8d..d1d7825318c3 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -56,6 +56,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, unsigned int thlen = 0; unsigned int p_off = 0; unsigned int ip_proto; + u64 ret, remainder, gso_size; if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -98,6 +99,16 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16)); + if (hdr->gso_size) { + gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); + ret = div64_u64_rem(skb->len, gso_size, &remainder); + if (!(ret && (hdr->gso_size > needed) && + ((remainder > needed) || (remainder == 0)))) { + return -EINVAL; + } + skb_shinfo(skb)->tx_flags |= SKBFL_SHARED_FRAG; + } + if (!pskb_may_pull(skb, needed)) return -EINVAL; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 735eae6e272c..16b0cfa80502 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -624,4 +624,8 @@ static inline void lruvec_stat_sub_folio(struct folio *folio, { lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); } + +void __meminit mod_node_early_perpage_metadata(int nid, long delta); +void __meminit store_early_perpage_metadata(void); + #endif /* _LINUX_VMSTAT_H */ diff --git a/include/linux/w1.h b/include/linux/w1.h index 9a2a0ef39018..064805bfae3f 100644 --- a/include/linux/w1.h +++ b/include/linux/w1.h @@ -85,7 +85,8 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64); * * @data: the first parameter in all the functions below * - * @read_bit: Sample the line level @return the level read (0 or 1) + * @read_bit: Sample the line level + * @return the level read (0 or 1) * * @write_bit: Sets the line level * @@ -95,7 +96,7 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64); * touch_bit(1) = write-1 / read cycle * @return the bit read (0 or 1) * - * @read_byte: Reads a bytes. Same as 8 touch_bit(1) calls. + * @read_byte: Reads a byte. Same as 8 touch_bit(1) calls. * @return the byte read * * @write_byte: Writes a byte. Same as 8 touch_bit(x) calls. @@ -114,7 +115,7 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64); * @set_pullup: Put out a strong pull-up pulse of the specified duration. * @return -1=Error, 0=completed * - * @search: Really nice hardware can handles the different types of ROM search + * @search: Really nice hardware can handle the different types of ROM search * w1_master* is passed to the slave found callback. * u8 is search_type, W1_SEARCH or W1_ALARM_SEARCH * diff --git a/include/linux/wmi.h b/include/linux/wmi.h index 63cca3b58d6d..3275470b5531 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h @@ -16,12 +16,16 @@ * struct wmi_device - WMI device structure * @dev: Device associated with this WMI device * @setable: True for devices implementing the Set Control Method + * @driver_override: Driver name to force a match; do not set directly, + * because core frees it; use driver_set_override() to + * set or clear it. * * This represents WMI devices discovered by the WMI driver core. */ struct wmi_device { struct device dev; bool setable; + const char *driver_override; }; /** diff --git a/include/linux/wordpart.h b/include/linux/wordpart.h index 4ca1ba66d2f0..5a7b97bb7c95 100644 --- a/include/linux/wordpart.h +++ b/include/linux/wordpart.h @@ -39,6 +39,14 @@ */ #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) +/** + * REPEAT_BYTE_U32 - repeat the value @x multiple times as a u32 value + * @x: value to repeat + * + * NOTE: @x is not checked for > 0xff; larger values produce odd results. + */ +#define REPEAT_BYTE_U32(x) lower_32_bits(REPEAT_BYTE(x)) + /* Set bits in the first 'n' bytes when loaded from memory */ #ifdef __LITTLE_ENDIAN # define aligned_byte_mask(n) ((1UL << 8*(n))-1) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d9968bfc8eac..4eb8f9563136 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -12,7 +12,7 @@ #include <linux/lockdep.h> #include <linux/threads.h> #include <linux/atomic.h> -#include <linux/cpumask.h> +#include <linux/cpumask_types.h> #include <linux/rcupdate.h> #include <linux/workqueue_types.h> diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 2a85b941db97..6cecb4a4f68b 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -35,7 +35,8 @@ void zswap_swapoff(int type); void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg); void zswap_lruvec_state_init(struct lruvec *lruvec); void zswap_folio_swapin(struct folio *folio); -bool is_zswap_enabled(void); +bool zswap_is_enabled(void); +bool zswap_never_enabled(void); #else struct zswap_lruvec_state {}; @@ -60,11 +61,16 @@ static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {} static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {} static inline void zswap_folio_swapin(struct folio *folio) {} -static inline bool is_zswap_enabled(void) +static inline bool zswap_is_enabled(void) { return false; } +static inline bool zswap_never_enabled(void) +{ + return true; +} + #endif #endif /* _LINUX_ZSWAP_H */ |