diff options
Diffstat (limited to 'include')
615 files changed, 19203 insertions, 7851 deletions
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h index 5bdab6bffd23..928fd66b1271 100644 --- a/include/asm-generic/4level-fixup.h +++ b/include/asm-generic/4level-fixup.h @@ -15,7 +15,6 @@ ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ NULL: pmd_offset(pud, address)) -#define pud_alloc(mm, pgd, address) (pgd) #define pud_offset(pgd, start) (pgd) #define pud_none(pud) 0 #define pud_bad(pud) 0 @@ -35,4 +34,6 @@ #undef pud_addr_end #define pud_addr_end(addr, end) (end) +#include <asm-generic/5level-fixup.h> + #endif diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h new file mode 100644 index 000000000000..b5ca82dc4175 --- /dev/null +++ b/include/asm-generic/5level-fixup.h @@ -0,0 +1,41 @@ +#ifndef _5LEVEL_FIXUP_H +#define _5LEVEL_FIXUP_H + +#define __ARCH_HAS_5LEVEL_HACK +#define __PAGETABLE_P4D_FOLDED + +#define P4D_SHIFT PGDIR_SHIFT +#define P4D_SIZE PGDIR_SIZE +#define P4D_MASK PGDIR_MASK +#define PTRS_PER_P4D 1 + +#define p4d_t pgd_t + +#define pud_alloc(mm, p4d, address) \ + ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \ + NULL : pud_offset(p4d, address)) + +#define p4d_alloc(mm, pgd, address) (pgd) +#define p4d_offset(pgd, start) (pgd) +#define p4d_none(p4d) 0 +#define p4d_bad(p4d) 0 +#define p4d_present(p4d) 1 +#define p4d_ERROR(p4d) do { } while (0) +#define p4d_clear(p4d) pgd_clear(p4d) +#define p4d_val(p4d) pgd_val(p4d) +#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud) +#define p4d_page(p4d) pgd_page(p4d) +#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d) + +#define __p4d(x) __pgd(x) +#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d) + +#undef p4d_free_tlb +#define p4d_free_tlb(tlb, x, addr) do { } while (0) +#define p4d_free(mm, x) do { } while (0) +#define __p4d_free_tlb(tlb, x, addr) do { } while (0) + +#undef p4d_addr_end +#define p4d_addr_end(addr, end) (end) + +#endif diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 9ed8b987185b..3f38eb03649c 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -223,6 +223,7 @@ static inline void atomic_dec(atomic_t *v) #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) +#ifndef __atomic_add_unless static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; @@ -231,5 +232,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) c = old; return c; } +#endif #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h new file mode 100644 index 000000000000..57af9f21d148 --- /dev/null +++ b/include/asm-generic/kprobes.h @@ -0,0 +1,25 @@ +#ifndef _ASM_GENERIC_KPROBES_H +#define _ASM_GENERIC_KPROBES_H + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +#ifdef CONFIG_KPROBES +/* + * Blacklist ganerating macro. Specify functions which is not probed + * by using this macro. + */ +# define __NOKPROBE_SYMBOL(fname) \ +static unsigned long __used \ + __attribute__((__section__("_kprobe_blacklist"))) \ + _kbl_addr_##fname = (unsigned long)fname; +# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) +/* Use this to forbid a kprobes attach on very low level functions */ +# define __kprobes __attribute__((__section__(".kprobes.text"))) +# define nokprobe_inline __always_inline +#else +# define NOKPROBE_SYMBOL(fname) +# define __kprobes +# define nokprobe_inline inline +#endif +#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ + +#endif /* _ASM_GENERIC_KPROBES_H */ diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h new file mode 100644 index 000000000000..752fb7511750 --- /dev/null +++ b/include/asm-generic/pgtable-nop4d-hack.h @@ -0,0 +1,62 @@ +#ifndef _PGTABLE_NOP4D_HACK_H +#define _PGTABLE_NOP4D_HACK_H + +#ifndef __ASSEMBLY__ +#include <asm-generic/5level-fixup.h> + +#define __PAGETABLE_PUD_FOLDED + +/* + * Having the pud type consist of a pgd gets the size right, and allows + * us to conceptually access the pgd entry that this pud is folded into + * without casting. + */ +typedef struct { pgd_t pgd; } pud_t; + +#define PUD_SHIFT PGDIR_SHIFT +#define PTRS_PER_PUD 1 +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline void pgd_clear(pgd_t *pgd) { } +#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) + +#define pgd_populate(mm, pgd, pud) do { } while (0) +/* + * (puds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) + +static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) +{ + return (pud_t *)pgd; +} + +#define pud_val(x) (pgd_val((x).pgd)) +#define __pud(x) ((pud_t) { __pgd(x) }) + +#define pgd_page(pgd) (pud_page((pud_t){ pgd })) +#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) + +/* + * allocating and freeing a pud is trivial: the 1-entry pud is + * inside the pgd, so has no extra memory associated with it. + */ +#define pud_alloc_one(mm, address) NULL +#define pud_free(mm, x) do { } while (0) +#define __pud_free_tlb(tlb, x, a) do { } while (0) + +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ +#endif /* _PGTABLE_NOP4D_HACK_H */ diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h new file mode 100644 index 000000000000..de364ecb8df6 --- /dev/null +++ b/include/asm-generic/pgtable-nop4d.h @@ -0,0 +1,56 @@ +#ifndef _PGTABLE_NOP4D_H +#define _PGTABLE_NOP4D_H + +#ifndef __ASSEMBLY__ + +#define __PAGETABLE_P4D_FOLDED + +typedef struct { pgd_t pgd; } p4d_t; + +#define P4D_SHIFT PGDIR_SHIFT +#define PTRS_PER_P4D 1 +#define P4D_SIZE (1UL << P4D_SHIFT) +#define P4D_MASK (~(P4D_SIZE-1)) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the p4d is never bad, and a p4d always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline void pgd_clear(pgd_t *pgd) { } +#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) + +#define pgd_populate(mm, pgd, p4d) do { } while (0) +/* + * (p4ds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pgd(pgdptr, pgdval) set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval }) + +static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +{ + return (p4d_t *)pgd; +} + +#define p4d_val(x) (pgd_val((x).pgd)) +#define __p4d(x) ((p4d_t) { __pgd(x) }) + +#define pgd_page(pgd) (p4d_page((p4d_t){ pgd })) +#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd })) + +/* + * allocating and freeing a p4d is trivial: the 1-entry p4d is + * inside the pgd, so has no extra memory associated with it. + */ +#define p4d_alloc_one(mm, address) NULL +#define p4d_free(mm, x) do { } while (0) +#define __p4d_free_tlb(tlb, x, a) do { } while (0) + +#undef p4d_addr_end +#define p4d_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ +#endif /* _PGTABLE_NOP4D_H */ diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index 810431d8351b..c2b9b96d6268 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h @@ -3,52 +3,57 @@ #ifndef __ASSEMBLY__ +#ifdef __ARCH_USE_5LEVEL_HACK +#include <asm-generic/pgtable-nop4d-hack.h> +#else +#include <asm-generic/pgtable-nop4d.h> + #define __PAGETABLE_PUD_FOLDED /* - * Having the pud type consist of a pgd gets the size right, and allows - * us to conceptually access the pgd entry that this pud is folded into + * Having the pud type consist of a p4d gets the size right, and allows + * us to conceptually access the p4d entry that this pud is folded into * without casting. */ -typedef struct { pgd_t pgd; } pud_t; +typedef struct { p4d_t p4d; } pud_t; -#define PUD_SHIFT PGDIR_SHIFT +#define PUD_SHIFT P4D_SHIFT #define PTRS_PER_PUD 1 #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) /* - * The "pgd_xxx()" functions here are trivial for a folded two-level + * The "p4d_xxx()" functions here are trivial for a folded two-level * setup: the pud is never bad, and a pud always exists (as it's folded - * into the pgd entry) + * into the p4d entry) */ -static inline int pgd_none(pgd_t pgd) { return 0; } -static inline int pgd_bad(pgd_t pgd) { return 0; } -static inline int pgd_present(pgd_t pgd) { return 1; } -static inline void pgd_clear(pgd_t *pgd) { } -#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) +static inline int p4d_none(p4d_t p4d) { return 0; } +static inline int p4d_bad(p4d_t p4d) { return 0; } +static inline int p4d_present(p4d_t p4d) { return 1; } +static inline void p4d_clear(p4d_t *p4d) { } +#define pud_ERROR(pud) (p4d_ERROR((pud).p4d)) -#define pgd_populate(mm, pgd, pud) do { } while (0) +#define p4d_populate(mm, p4d, pud) do { } while (0) /* - * (puds are folded into pgds so this doesn't get actually called, + * (puds are folded into p4ds so this doesn't get actually called, * but the define is needed for a generic inline function.) */ -#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) +#define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval }) -static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) +static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) { - return (pud_t *)pgd; + return (pud_t *)p4d; } -#define pud_val(x) (pgd_val((x).pgd)) -#define __pud(x) ((pud_t) { __pgd(x) } ) +#define pud_val(x) (p4d_val((x).p4d)) +#define __pud(x) ((pud_t) { __p4d(x) }) -#define pgd_page(pgd) (pud_page((pud_t){ pgd })) -#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) +#define p4d_page(p4d) (pud_page((pud_t){ p4d })) +#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d })) /* * allocating and freeing a pud is trivial: the 1-entry pud is - * inside the pgd, so has no extra memory associated with it. + * inside the p4d, so has no extra memory associated with it. */ #define pud_alloc_one(mm, address) NULL #define pud_free(mm, x) do { } while (0) @@ -58,4 +63,5 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) #define pud_addr_end(addr, end) (end) #endif /* __ASSEMBLY__ */ +#endif /* !__ARCH_USE_5LEVEL_HACK */ #endif /* _PGTABLE_NOPUD_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 18af2bcefe6a..1fad160f35de 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -10,9 +10,9 @@ #include <linux/bug.h> #include <linux/errno.h> -#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \ - CONFIG_PGTABLE_LEVELS -#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED +#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ + defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS +#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED #endif /* @@ -36,6 +36,9 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); +extern int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty); #else static inline int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, @@ -44,6 +47,13 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma, BUILD_BUG(); return 0; } +static inline int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif @@ -121,8 +131,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, } #endif -#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR #ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) @@ -131,20 +141,40 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, pmd_clear(pmdp); return pmd; } +#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR +static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pud_t *pudp) +{ + pud_t pud = *pudp; + + pud_clear(pudp); + return pud; +} +#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -#endif -#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL #ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, unsigned long address, pmd_t *pmdp, int full) { return pmdp_huge_get_and_clear(mm, address, pmdp); } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL +static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pud_t *pudp, + int full) +{ + return pudp_huge_get_and_clear(mm, address, pudp); +} +#endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, @@ -181,6 +211,9 @@ extern pte_t ptep_clear_flush(struct vm_area_struct *vma, extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); +extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pud_t *pudp); #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT @@ -192,6 +225,30 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres } #endif +#ifndef pte_savedwrite +#define pte_savedwrite pte_write +#endif + +#ifndef pte_mk_savedwrite +#define pte_mk_savedwrite pte_mkwrite +#endif + +#ifndef pte_clear_savedwrite +#define pte_clear_savedwrite pte_wrprotect +#endif + +#ifndef pmd_savedwrite +#define pmd_savedwrite pmd_write +#endif + +#ifndef pmd_mk_savedwrite +#define pmd_mk_savedwrite pmd_mkwrite +#endif + +#ifndef pmd_clear_savedwrite +#define pmd_clear_savedwrite pmd_wrprotect +#endif + #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void pmdp_set_wrprotect(struct mm_struct *mm, @@ -208,6 +265,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + pud_t old_pud = *pudp; + + set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); +} +#else +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ +#endif #ifndef pmdp_collapse_flush #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -273,12 +347,23 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { return pmd_val(pmd_a) == pmd_val(pmd_b); } + +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + return pud_val(pud_a) == pud_val(pud_b); +} #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { BUILD_BUG(); return 0; } + +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + BUILD_BUG(); + return 0; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif @@ -339,6 +424,13 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) +#ifndef p4d_addr_end +#define p4d_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + #ifndef pud_addr_end #define pud_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ @@ -359,6 +451,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) * Do the tests inline, but report and clear the bad entry in mm/memory.c. */ void pgd_clear_bad(pgd_t *); +void p4d_clear_bad(p4d_t *); void pud_clear_bad(pud_t *); void pmd_clear_bad(pmd_t *); @@ -373,6 +466,17 @@ static inline int pgd_none_or_clear_bad(pgd_t *pgd) return 0; } +static inline int p4d_none_or_clear_bad(p4d_t *p4d) +{ + if (p4d_none(*p4d)) + return 1; + if (unlikely(p4d_bad(*p4d))) { + p4d_clear_bad(p4d); + return 1; + } + return 0; +} + static inline int pud_none_or_clear_bad(pud_t *pud) { if (pud_none(*pud)) @@ -640,6 +744,15 @@ static inline int pmd_write(pmd_t pmd) #endif /* __HAVE_ARCH_PMD_WRITE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ + (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) +static inline int pud_trans_huge(pud_t pud) +{ + return 0; +} +#endif + #ifndef pmd_read_atomic static inline pmd_t pmd_read_atomic(pmd_t *pmdp) { @@ -750,11 +863,30 @@ static inline int pmd_protnone(pmd_t pmd) #endif /* CONFIG_MMU */ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#ifndef __PAGETABLE_P4D_FOLDED +int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); +int p4d_clear_huge(p4d_t *p4d); +#else +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} +#endif /* !__PAGETABLE_P4D_FOLDED */ + int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); int pud_clear_huge(pud_t *pud); int pmd_clear_huge(pmd_t *pmd); #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) { return 0; @@ -763,6 +895,10 @@ static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) { return 0; } +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} static inline int pud_clear_huge(pud_t *pud) { return 0; @@ -785,8 +921,10 @@ static inline int pmd_clear_huge(pmd_t *pmd) * e.g. see arch/arc: flush_pmd_tlb_range */ #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) #else #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() +#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() #endif #endif diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 7eed8cf3130a..8afa4335e5b2 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -232,6 +232,20 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0) +/** + * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb + * invalidation. This is a nop so far, because only x86 needs it. + */ +#ifndef __tlb_remove_pud_tlb_entry +#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) +#endif + +#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ + do { \ + __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ + __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ + } while (0) + /* * For things like page tables caches (ie caching addresses "inside" the * page tables, like x86 does), for legacy reasons, flushing an @@ -256,6 +270,12 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, __pte_free_tlb(tlb, ptep, address); \ } while (0) +#define pmd_free_tlb(tlb, pmdp, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + __pmd_free_tlb(tlb, pmdp, address); \ + } while (0) + #ifndef __ARCH_HAS_4LEVEL_HACK #define pud_free_tlb(tlb, pudp, address) \ do { \ @@ -264,11 +284,13 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, } while (0) #endif -#define pmd_free_tlb(tlb, pmdp, address) \ +#ifndef __ARCH_HAS_5LEVEL_HACK +#define p4d_free_tlb(tlb, pudp, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ - __pmd_free_tlb(tlb, pmdp, address); \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + __p4d_free_tlb(tlb, pudp, address); \ } while (0) +#endif #define tlb_migrate_finish(mm) do {} while (0) diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 404e9558e879..436c4c2683c7 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -191,9 +191,25 @@ static inline unsigned int crypto_queue_len(struct crypto_queue *queue) return queue->qlen; } -/* These functions require the input/output to be aligned as u32. */ void crypto_inc(u8 *a, unsigned int size); -void crypto_xor(u8 *dst, const u8 *src, unsigned int size); +void __crypto_xor(u8 *dst, const u8 *src, unsigned int size); + +static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s = (unsigned long *)src; + + while (size > 0) { + *d++ ^= *s++; + size -= sizeof(unsigned long); + } + } else { + __crypto_xor(dst, src, size); + } +} int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err); @@ -344,13 +360,18 @@ static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, return crypto_attr_alg(tb[1], type, mask); } +static inline int crypto_requires_off(u32 type, u32 mask, u32 off) +{ + return (type ^ off) & mask & off; +} + /* * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. * Otherwise returns zero. */ static inline int crypto_requires_sync(u32 type, u32 mask) { - return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; + return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC); } noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h index 20d20f681a72..445fc45f4b5b 100644 --- a/include/crypto/chacha20.h +++ b/include/crypto/chacha20.h @@ -5,6 +5,7 @@ #ifndef _CRYPTO_CHACHA20_H #define _CRYPTO_CHACHA20_H +#include <crypto/skcipher.h> #include <linux/types.h> #include <linux/crypto.h> @@ -18,9 +19,8 @@ struct chacha20_ctx { void chacha20_block(u32 *state, void *stream); void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); -int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, +int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keysize); -int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes); +int crypto_chacha20_crypt(struct skcipher_request *req); #endif diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 216a2b876147..b5727bcd2336 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -329,6 +329,16 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) return crypto_hash_alg_common(tfm)->digestsize; } +/** + * crypto_ahash_statesize() - obtain size of the ahash state + * @tfm: cipher handle + * + * Return the size of the ahash state. With the crypto_ahash_export() + * function, the caller can export the state into a buffer whose size is + * defined with this function. + * + * Return: size of the ahash state + */ static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) { return crypto_hash_alg_common(tfm)->statesize; @@ -369,11 +379,7 @@ static inline struct crypto_ahash *crypto_ahash_reqtfm( * crypto_ahash_reqsize() - obtain size of the request data structure * @tfm: cipher handle * - * Return the size of the ahash state size. With the crypto_ahash_export - * function, the caller can export the state into a buffer whose size is - * defined with this function. - * - * Return: size of the ahash state + * Return: size of the request data */ static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) { @@ -453,7 +459,7 @@ int crypto_ahash_digest(struct ahash_request *req); * * This function exports the hash state of the ahash_request handle into the * caller-allocated output buffer out which must have sufficient size (e.g. by - * calling crypto_ahash_reqsize). + * calling crypto_ahash_statesize()). * * Return: 0 if the export was successful; < 0 if an error occurred */ diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index a2bfd7843f18..e2b9c6fe2714 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -73,7 +73,7 @@ int af_alg_unregister_type(const struct af_alg_type *type); int af_alg_release(struct socket *sock); void af_alg_release_parent(struct sock *sk); -int af_alg_accept(struct sock *sk, struct socket *newsock); +int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern); int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len); void af_alg_free_sg(struct af_alg_sgl *sgl); diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 8735979ed341..e42f7063f245 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -66,7 +66,7 @@ struct skcipher_walk { int flags; unsigned int blocksize; - unsigned int chunksize; + unsigned int stride; unsigned int alignmask; }; diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 750b14f1ada4..562001cb412b 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -115,6 +115,9 @@ struct crypto_skcipher { * IV of exactly that size to perform the encrypt or decrypt operation. * @chunksize: Equal to the block size except for stream ciphers such as * CTR where it is set to the underlying block size. + * @walksize: Equal to the chunk size except in cases where the algorithm is + * considerably more efficient if it can operate on multiple chunks + * in parallel. Should be a multiple of chunksize. * @base: Definition of a generic crypto algorithm. * * All fields except @ivsize are mandatory and must be filled. @@ -131,6 +134,7 @@ struct skcipher_alg { unsigned int max_keysize; unsigned int ivsize; unsigned int chunksize; + unsigned int walksize; struct crypto_alg base; }; @@ -289,6 +293,19 @@ static inline unsigned int crypto_skcipher_alg_chunksize( return alg->chunksize; } +static inline unsigned int crypto_skcipher_alg_walksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->walksize; +} + /** * crypto_skcipher_chunksize() - obtain chunk size * @tfm: cipher handle @@ -307,6 +324,23 @@ static inline unsigned int crypto_skcipher_chunksize( } /** + * crypto_skcipher_walksize() - obtain walk size + * @tfm: cipher handle + * + * In some cases, algorithms can only perform optimally when operating on + * multiple blocks in parallel. This is reflected by the walksize, which + * must be a multiple of the chunksize (or equal if the concern does not + * apply) + * + * Return: walk size in bytes + */ +static inline unsigned int crypto_skcipher_walksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); +} + +/** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle * diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index bae79f3c4d28..b080a171a23f 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -27,6 +27,16 @@ enum dw_hdmi_devtype { RK3288_HDMI, }; +enum dw_hdmi_phy_type { + DW_HDMI_PHY_DWC_HDMI_TX_PHY = 0x00, + DW_HDMI_PHY_DWC_MHL_PHY_HEAC = 0xb2, + DW_HDMI_PHY_DWC_MHL_PHY = 0xc2, + DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY_HEAC = 0xe2, + DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY = 0xf2, + DW_HDMI_PHY_DWC_HDMI20_TX_PHY = 0xf3, + DW_HDMI_PHY_VENDOR_PHY = 0xfe, +}; + struct dw_hdmi_mpll_config { unsigned long mpixelclock; struct { @@ -56,10 +66,11 @@ struct dw_hdmi_plat_data { struct drm_display_mode *mode); }; -void dw_hdmi_unbind(struct device *dev, struct device *master, void *data); -int dw_hdmi_bind(struct device *dev, struct device *master, - void *data, struct drm_encoder *encoder, - struct resource *iores, int irq, +int dw_hdmi_probe(struct platform_device *pdev, + const struct dw_hdmi_plat_data *plat_data); +void dw_hdmi_remove(struct platform_device *pdev); +void dw_hdmi_unbind(struct device *dev); +int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder, const struct dw_hdmi_plat_data *plat_data); void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h index 3629b2734db6..fbdfc8d7f3c7 100644 --- a/include/drm/bridge/mhl.h +++ b/include/drm/bridge/mhl.h @@ -15,6 +15,8 @@ #ifndef __MHL_H__ #define __MHL_H__ +#include <linux/types.h> + /* Device Capabilities Registers */ enum { MHL_DCAP_DEV_STATE, @@ -288,4 +290,87 @@ enum { /* Unsupported/unrecognized key code */ #define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 +enum mhl_burst_id { + MHL_BURST_ID_3D_VIC = 0x10, + MHL_BURST_ID_3D_DTD = 0x11, + MHL_BURST_ID_HEV_VIC = 0x20, + MHL_BURST_ID_HEV_DTDA = 0x21, + MHL_BURST_ID_HEV_DTDB = 0x22, + MHL_BURST_ID_VC_ASSIGN = 0x38, + MHL_BURST_ID_VC_CONFIRM = 0x39, + MHL_BURST_ID_AUD_DELAY = 0x40, + MHL_BURST_ID_ADT_BURSTID = 0x41, + MHL_BURST_ID_BIST_SETUP = 0x51, + MHL_BURST_ID_BIST_RETURN_STAT = 0x52, + MHL_BURST_ID_EMSC_SUPPORT = 0x61, + MHL_BURST_ID_HID_PAYLOAD = 0x62, + MHL_BURST_ID_BLK_RCV_BUFFER_INFO = 0x63, + MHL_BURST_ID_BITS_PER_PIXEL_FMT = 0x64, +}; + +struct mhl_burst_blk_rcv_buffer_info { + __be16 id; + __le16 size; +} __packed; + +struct mhl3_burst_header { + __be16 id; + u8 checksum; + u8 total_entries; + u8 sequence_index; +} __packed; + +struct mhl_burst_bits_per_pixel_fmt { + struct mhl3_burst_header hdr; + u8 num_entries; + struct { + u8 stream_id; + u8 pixel_format; + } __packed desc[0]; +} __packed; + +struct mhl_burst_emsc_support { + struct mhl3_burst_header hdr; + u8 num_entries; + __be16 burst_id[0]; +} __packed; + +struct mhl_burst_audio_descr { + struct mhl3_burst_header hdr; + u8 flags; + u8 short_desc[9]; +} __packed; + +/* + * MHL3 infoframe related definitions + */ + +#define MHL3_IEEE_OUI 0x7ca61d +#define MHL3_INFOFRAME_SIZE 15 + +enum mhl3_video_format { + MHL3_VIDEO_FORMAT_NONE, + MHL3_VIDEO_FORMAT_3D, + MHL3_VIDEO_FORMAT_MULTI_VIEW, + MHL3_VIDEO_FORMAT_DUAL_3D +}; + +enum mhl3_3d_format_type { + MHL3_3D_FORMAT_TYPE_FS, /* frame sequential */ + MHL3_3D_FORMAT_TYPE_TB, /* top-bottom */ + MHL3_3D_FORMAT_TYPE_LR, /* left-right */ + MHL3_3D_FORMAT_TYPE_FS_TB, /* frame sequential, top-bottom */ + MHL3_3D_FORMAT_TYPE_FS_LR, /* frame sequential, left-right */ + MHL3_3D_FORMAT_TYPE_TB_LR /* top-bottom, left-right */ +}; + +struct mhl3_infoframe { + unsigned char version; + enum mhl3_video_format video_format; + enum mhl3_3d_format_type format_type; + bool sep_audio; + int hev_format; + int av_delay; +}; + #endif /* __MHL_H__ */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 9c4ee144b5f6..6105c050d7bc 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -360,6 +360,7 @@ struct drm_ioctl_desc { /* Event queued up for userspace to read */ struct drm_pending_event { struct completion *completion; + void (*completion_release)(struct completion *completion); struct drm_event *event; struct dma_fence *fence; struct list_head link; @@ -635,6 +636,19 @@ struct drm_device { int switch_power_state; }; +/** + * drm_drv_uses_atomic_modeset - check if the driver implements + * atomic_commit() + * @dev: DRM device + * + * This check is useful if drivers do not have DRIVER_ATOMIC set but + * have atomic modesetting internally implemented. + */ +static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) +{ + return dev->mode_config.funcs->atomic_commit != NULL; +} + #include <drm/drm_irq.h> #define DRM_SWITCH_POWER_ON 0 @@ -718,11 +732,6 @@ int drm_noop(struct drm_device *dev, void *data, int drm_invalid_op(struct drm_device *dev, void *data, struct drm_file *file_priv); -/* Cache management (drm_cache.c) */ -void drm_clflush_pages(struct page *pages[], unsigned long num_pages); -void drm_clflush_sg(struct sg_table *st); -void drm_clflush_virt_range(void *addr, unsigned long length); - /* * These are exported to drivers so that they can implement fencing using * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. @@ -782,21 +791,6 @@ extern void drm_sysfs_hotplug_event(struct drm_device *dev); /*@}*/ -/* PCI section */ -static __inline__ int drm_pci_device_is_agp(struct drm_device *dev) -{ - if (dev->driver->device_is_agp != NULL) { - int err = (*dev->driver->device_is_agp) (dev); - - if (err != 2) { - return err; - } - } - - return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); -} -void drm_pci_agp_destroy(struct drm_device *dev); - extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); #ifdef CONFIG_PCI diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 56814e8ae7ea..052ab161b239 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -123,7 +123,8 @@ struct drm_crtc_commit { /** * @commit_entry: * - * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock. + * Entry on the per-CRTC &drm_crtc.commit_list. Protected by + * $drm_crtc.commit_lock. */ struct list_head commit_entry; @@ -145,6 +146,7 @@ struct __drm_crtcs_state { struct drm_crtc_state *state; struct drm_crtc_commit *commit; s32 __user *out_fence_ptr; + unsigned last_vblank_count; }; struct __drm_connnectors_state { @@ -188,12 +190,31 @@ struct drm_atomic_state { struct work_struct commit_work; }; -void drm_crtc_commit_put(struct drm_crtc_commit *commit); +void __drm_crtc_commit_free(struct kref *kref); + +/** + * drm_crtc_commit_get - acquire a reference to the CRTC commit + * @commit: CRTC commit + * + * Increases the reference of @commit. + */ static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit) { kref_get(&commit->ref); } +/** + * drm_crtc_commit_put - release a reference to the CRTC commmit + * @commit: CRTC commit + * + * This releases a reference to @commit which is freed after removing the + * final reference. No locking required and callable from any context. + */ +static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit) +{ + kref_put(&commit->ref, __drm_crtc_commit_free); +} + struct drm_atomic_state * __must_check drm_atomic_state_alloc(struct drm_device *dev); void drm_atomic_state_clear(struct drm_atomic_state *state); @@ -369,12 +390,6 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); void drm_state_dump(struct drm_device *dev, struct drm_printer *p); -#ifdef CONFIG_DEBUG_FS -struct drm_minor; -int drm_atomic_debugfs_init(struct drm_minor *minor); -int drm_atomic_debugfs_cleanup(struct drm_minor *minor); -#endif - #define for_each_connector_in_state(__state, connector, connector_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->num_connector && \ @@ -403,7 +418,7 @@ int drm_atomic_debugfs_cleanup(struct drm_minor *minor); * drm_atomic_crtc_needs_modeset - compute combined modeset need * @state: &drm_crtc_state for the CRTC * - * To give drivers flexibility struct &drm_crtc_state has 3 booleans to track + * To give drivers flexibility &struct drm_crtc_state has 3 booleans to track * whether the state CRTC changed enough to need a full modeset cycle: * connectors_changed, mode_changed and active_changed. This helper simply * combines these three to compute the overall need for a modeset for @state. @@ -415,7 +430,8 @@ int drm_atomic_debugfs_cleanup(struct drm_minor *minor); * * For example if the CRTC mode has changed, and the hardware is able to enact * the requested mode change without going through a full modeset, the driver - * should clear mode_changed during its ->atomic_check. + * should clear mode_changed in its &drm_mode_config_funcs.atomic_check + * implementation. */ static inline bool drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state) @@ -424,5 +440,4 @@ drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state) state->connectors_changed; } - #endif /* DRM_ATOMIC_H_ */ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 7ff92b09fd9c..d066e9491ae3 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -48,9 +48,6 @@ int drm_atomic_helper_commit(struct drm_device *dev, int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state, bool pre_swap); -bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev, - struct drm_atomic_state *old_state, - struct drm_crtc *crtc); void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state); @@ -124,6 +121,12 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags); +int drm_atomic_helper_page_flip_target( + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + uint32_t target); int drm_atomic_helper_connector_dpms(struct drm_connector *connector, int mode); struct drm_encoder * @@ -174,7 +177,8 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, * * This iterates over the current state, useful (for example) when applying * atomic state after it has been checked and swapped. To iterate over the - * planes which *will* be attached (for ->atomic_check()) see + * planes which *will* be attached (more useful in code called from + * &drm_mode_config_funcs.atomic_check) see * drm_atomic_crtc_state_for_each_plane(). */ #define drm_atomic_crtc_for_each_plane(plane, crtc) \ @@ -186,8 +190,9 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, * @crtc_state: the incoming crtc-state * * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be - * attached if the specified state is applied. Useful during (for example) - * ->atomic_check() operations, to validate the incoming state. + * attached if the specified state is applied. Useful during for example + * in code called from &drm_mode_config_funcs.atomic_check operations, to + * validate the incoming state. */ #define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) @@ -199,8 +204,9 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, * @crtc_state: the incoming crtc-state * * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be - * attached if the specified state is applied. Useful during (for example) - * ->atomic_check() operations, to validate the incoming state. + * attached if the specified state is applied. Useful during for example + * in code called from &drm_mode_config_funcs.atomic_check operations, to + * validate the incoming state. * * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a * const plane_state. This is useful when a driver just wants to peek at other diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index 610223b0481b..1eb4a52cad8d 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -33,10 +33,7 @@ * * @refcount: Refcount for this master object. * @dev: Link back to the DRM device - * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. - * @unique_len: Length of unique field. Protected by drm_global_mutex. - * @magic_map: Map of used authentication tokens. Protected by struct_mutex. - * @lock: DRI lock information. + * @lock: DRI1 lock information. * @driver_priv: Pointer to driver-private information. * * Note that master structures are only relevant for the legacy/primary device @@ -45,8 +42,20 @@ struct drm_master { struct kref refcount; struct drm_device *dev; + /** + * @unique: Unique identifier: e.g. busid. Protected by + * &drm_device.master_mutex. + */ char *unique; + /** + * @unique_len: Length of unique field. Protected by + * &drm_device.master_mutex. + */ int unique_len; + /** + * @magic_map: Map of used authentication tokens. Protected by + * &drm_device.master_mutex. + */ struct idr magic_map; struct drm_lock_data lock; void *driver_priv; diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 530a1d6e8cde..fdd82fcbf168 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -96,9 +96,10 @@ struct drm_bridge_funcs { * This callback should disable the bridge. It is called right before * the preceding element in the display pipe is disabled. If the * preceding element is a bridge this means it's called before that - * bridge's ->disable() function. If the preceding element is a - * &drm_encoder it's called right before the encoder's ->disable(), - * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * bridge's @disable vfunc. If the preceding element is a &drm_encoder + * it's called right before the &drm_encoder_helper_funcs.disable, + * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms + * hook. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is still running when this callback is called. @@ -110,12 +111,13 @@ struct drm_bridge_funcs { /** * @post_disable: * - * This callback should disable the bridge. It is called right after - * the preceding element in the display pipe is disabled. If the - * preceding element is a bridge this means it's called after that - * bridge's ->post_disable() function. If the preceding element is a - * &drm_encoder it's called right after the encoder's ->disable(), - * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * This callback should disable the bridge. It is called right after the + * preceding element in the display pipe is disabled. If the preceding + * element is a bridge this means it's called after that bridge's + * @post_disable function. If the preceding element is a &drm_encoder + * it's called right after the encoder's + * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare + * or &drm_encoder_helper_funcs.dpms hook. * * The bridge must assume that the display pipe (i.e. clocks and timing * singals) feeding it is no longer running when this callback is @@ -129,9 +131,11 @@ struct drm_bridge_funcs { * @mode_set: * * This callback should set the given mode on the bridge. It is called - * after the ->mode_set() callback for the preceding element in the - * display pipeline has been called already. The display pipe (i.e. - * clocks and timing signals) is off when this function is called. + * after the @mode_set callback for the preceding element in the display + * pipeline has been called already. If the bridge is the first element + * then this would be &drm_encoder_helper_funcs.mode_set. The display + * pipe (i.e. clocks and timing signals) is off when this function is + * called. */ void (*mode_set)(struct drm_bridge *bridge, struct drm_display_mode *mode, @@ -142,9 +146,10 @@ struct drm_bridge_funcs { * This callback should enable the bridge. It is called right before * the preceding element in the display pipe is enabled. If the * preceding element is a bridge this means it's called before that - * bridge's ->pre_enable() function. If the preceding element is a - * &drm_encoder it's called right before the encoder's ->enable(), - * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * bridge's @pre_enable function. If the preceding element is a + * &drm_encoder it's called right before the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. * * The display pipe (i.e. clocks and timing signals) feeding this bridge * will not yet be running when this callback is called. The bridge must @@ -161,9 +166,10 @@ struct drm_bridge_funcs { * This callback should enable the bridge. It is called right after * the preceding element in the display pipe is enabled. If the * preceding element is a bridge this means it's called after that - * bridge's ->enable() function. If the preceding element is a - * &drm_encoder it's called right after the encoder's ->enable(), - * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * bridge's @enable function. If the preceding element is a + * &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is running when this callback is called. This @@ -201,8 +207,8 @@ struct drm_bridge { int drm_bridge_add(struct drm_bridge *bridge); void drm_bridge_remove(struct drm_bridge *bridge); struct drm_bridge *of_drm_find_bridge(struct device_node *np); -int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge); -void drm_bridge_detach(struct drm_bridge *bridge); +int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, + struct drm_bridge *previous); bool drm_bridge_mode_fixup(struct drm_bridge *bridge, const struct drm_display_mode *mode, diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index cebecff536a3..beab0f0d0cfb 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -33,7 +33,11 @@ #ifndef _DRM_CACHE_H_ #define _DRM_CACHE_H_ +#include <linux/scatterlist.h> + void drm_clflush_pages(struct page *pages[], unsigned long num_pages); +void drm_clflush_sg(struct sg_table *st); +void drm_clflush_virt_range(void *addr, unsigned long length); static inline bool drm_arch_can_wc_memory(void) { diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h index c767238ac9d5..bce4a532836d 100644 --- a/include/drm/drm_color_mgmt.h +++ b/include/drm/drm_color_mgmt.h @@ -25,6 +25,8 @@ #include <linux/ctype.h> +uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision); + void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, uint degamma_lut_size, bool has_ctm, @@ -33,29 +35,4 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size); -/** - * drm_color_lut_extract - clamp&round LUT entries - * @user_input: input value - * @bit_precision: number of bits the hw LUT supports - * - * Extract a degamma/gamma LUT value provided by user (in the form of - * &drm_color_lut entries) and round it to the precision supported by the - * hardware. - */ -static inline uint32_t drm_color_lut_extract(uint32_t user_input, - uint32_t bit_precision) -{ - uint32_t val = user_input; - uint32_t max = 0xffff >> (16 - bit_precision); - - /* Round only if we're not using full precision. */ - if (bit_precision < 16) { - val += 1UL << (16 - bit_precision - 1); - val >>= 16 - bit_precision; - } - - return clamp_val(val, 0, max); -} - - #endif diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 045a97cbeba2..e5e1eddd19fb 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -94,7 +94,7 @@ enum subpixel_order { * * Describes a given display (e.g. CRT or flat panel) and its limitations. For * fixed display sinks like built-in panels there's not much difference between - * this and struct &drm_connector. But for sinks with a real cable this + * this and &struct drm_connector. But for sinks with a real cable this * structure is meant to describe all the things at the other end of the cable. * * For sinks which provide an EDID this can be filled out by calling @@ -117,7 +117,7 @@ struct drm_display_info { /** * @pixel_clock: Maximum pixel clock supported by the sink, in units of - * 100Hz. This mismatches the clok in &drm_display_mode (which is in + * 100Hz. This mismatches the clock in &drm_display_mode (which is in * kHZ), because that's what the EDID uses as base unit. */ unsigned int pixel_clock; @@ -331,15 +331,15 @@ struct drm_connector_funcs { * * Entry point for output detection and basic mode validation. The * driver should reprobe the output if needed (e.g. when hotplug - * handling is unreliable), add all detected modes to connector->modes + * handling is unreliable), add all detected modes to &drm_connector.modes * and filter out any the device can't support in any configuration. It * also needs to filter out any modes wider or higher than the * parameters max_width and max_height indicate. * * The drivers must also prune any modes no longer valid from - * connector->modes. Furthermore it must update connector->status and - * connector->edid. If no EDID has been received for this output - * connector->edid must be NULL. + * &drm_connector.modes. Furthermore it must update + * &drm_connector.status and &drm_connector.edid. If no EDID has been + * received for this output connector->edid must be NULL. * * Drivers using the probe helpers should use * drm_helper_probe_single_connector_modes() or @@ -348,7 +348,7 @@ struct drm_connector_funcs { * * RETURNS: * - * The number of modes detected and filled into connector->modes. + * The number of modes detected and filled into &drm_connector.modes. */ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); @@ -381,7 +381,7 @@ struct drm_connector_funcs { * core drm connector interfaces. Everything added from this callback * should be unregistered in the early_unregister callback. * - * This is called while holding drm_connector->mutex. + * This is called while holding &drm_connector.mutex. * * Returns: * @@ -398,7 +398,7 @@ struct drm_connector_funcs { * early in the driver unload sequence to disable userspace access * before data structures are torndown. * - * This is called while holding drm_connector->mutex. + * This is called while holding &drm_connector.mutex. */ void (*early_unregister)(struct drm_connector *connector); @@ -418,17 +418,17 @@ struct drm_connector_funcs { * Duplicate the current atomic state for this connector and return it. * The core and helpers guarantee that any atomic state duplicated with * this hook and still owned by the caller (i.e. not transferred to the - * driver by calling ->atomic_commit() from struct - * &drm_mode_config_funcs) will be cleaned up by calling the - * @atomic_destroy_state hook in this structure. + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. * - * Atomic drivers which don't subclass struct &drm_connector_state should use + * Atomic drivers which don't subclass &struct drm_connector_state should use * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the * state structure to extend it with driver-private state should use * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is * duplicated in a consistent fashion across drivers. * - * It is an error to call this hook before connector->state has been + * It is an error to call this hook before &drm_connector.state has been * initialized correctly. * * NOTE: @@ -525,7 +525,7 @@ struct drm_connector_funcs { /** * @atomic_print_state: * - * If driver subclasses struct &drm_connector_state, it should implement + * If driver subclasses &struct drm_connector_state, it should implement * this optional hook for printing additional driver specific state. * * Do not call this directly, use drm_atomic_connector_print_state() @@ -563,9 +563,6 @@ struct drm_cmdline_mode { * @interlace_allowed: can this connector handle interlaced modes? * @doublescan_allowed: can this connector handle doublescan? * @stereo_allowed: can this connector handle stereo modes? - * @modes: modes available on this connector (from fill_modes() + user) - * @status: one of the drm_connector_status enums (connected, not, or unknown) - * @probed_modes: list of modes derived directly from the display * @funcs: connector control functions * @edid_blob_ptr: DRM property containing EDID if present * @properties: property tracking for this connector @@ -612,8 +609,8 @@ struct drm_connector { /** * @mutex: Lock for general connector state, but currently only protects - * @registered. Most of the connector state is still protected by the - * mutex in &drm_mode_config. + * @registered. Most of the connector state is still protected by + * &drm_mode_config.mutex. */ struct mutex mutex; @@ -635,19 +632,37 @@ struct drm_connector { * Protected by @mutex. */ bool registered; - struct list_head modes; /* list of modes on this connector */ + /** + * @modes: + * Modes available on this connector (from fill_modes() + user). + * Protected by &drm_mode_config.mutex. + */ + struct list_head modes; + + /** + * @status: + * One of the drm_connector_status enums (connected, not, or unknown). + * Protected by &drm_mode_config.mutex. + */ enum drm_connector_status status; - /* these are modes added by probing with DDC or the BIOS */ + /** + * @probed_modes: + * These are modes added by probing with DDC or the BIOS, before + * filtering is applied. Used by the probe helpers. Protected by + * &drm_mode_config.mutex. + */ struct list_head probed_modes; /** * @display_info: Display information is filled from EDID information * when a display is detected. For non hot-pluggable displays such as * flat panels in embedded systems, the driver should initialize the - * display_info.width_mm and display_info.height_mm fields with the - * physical size of the display. + * &drm_display_info.width_mm and &drm_display_info.height_mm fields + * with the physical size of the display. + * + * Protected by &drm_mode_config.mutex. */ struct drm_display_info display_info; const struct drm_connector_funcs *funcs; @@ -853,12 +868,46 @@ void drm_mode_put_tile_group(struct drm_device *dev, * @dev: the DRM device * * Iterate over all connectors of @dev. + * + * WARNING: + * + * This iterator is not safe against hotadd/removal of connectors and is + * deprecated. Use drm_for_each_connector_iter() instead. */ #define drm_for_each_connector(connector, dev) \ - for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \ - connector = list_first_entry(&(dev)->mode_config.connector_list, \ - struct drm_connector, head); \ - &connector->head != (&(dev)->mode_config.connector_list); \ - connector = list_next_entry(connector, head)) + list_for_each_entry(connector, &(dev)->mode_config.connector_list, head) + +/** + * struct drm_connector_list_iter - connector_list iterator + * + * This iterator tracks state needed to be able to walk the connector_list + * within struct drm_mode_config. Only use together with + * drm_connector_list_iter_get(), drm_connector_list_iter_put() and + * drm_connector_list_iter_next() respectively the convenience macro + * drm_for_each_connector_iter(). + */ +struct drm_connector_list_iter { +/* private: */ + struct drm_device *dev; + struct drm_connector *conn; +}; + +void drm_connector_list_iter_get(struct drm_device *dev, + struct drm_connector_list_iter *iter); +struct drm_connector * +drm_connector_list_iter_next(struct drm_connector_list_iter *iter); +void drm_connector_list_iter_put(struct drm_connector_list_iter *iter); + +/** + * drm_for_each_connector_iter - connector_list iterator macro + * @connector: &struct drm_connector pointer used as cursor + * @iter: &struct drm_connector_list_iter + * + * Note that @connector is only valid within the list body, if you want to use + * @connector after calling drm_connector_list_iter_put() then you need to grab + * your own reference first using drm_connector_reference(). + */ +#define drm_for_each_connector_iter(connector, iter) \ + while ((connector = drm_connector_list_iter_next(iter))) #endif diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 946672f97e1e..8f0b195e4a59 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -39,7 +39,6 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_modes.h> #include <drm/drm_connector.h> -#include <drm/drm_encoder.h> #include <drm/drm_property.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> @@ -68,14 +67,12 @@ static inline uint64_t I642U64(int64_t val) } struct drm_crtc; -struct drm_encoder; struct drm_pending_vblank_event; struct drm_plane; struct drm_bridge; struct drm_atomic_state; struct drm_crtc_helper_funcs; -struct drm_encoder_helper_funcs; struct drm_plane_helper_funcs; /** @@ -84,8 +81,8 @@ struct drm_plane_helper_funcs; * @enable: whether the CRTC should be enabled, gates all other state * @active: whether the CRTC is actively displaying (used for DPMS) * @planes_changed: planes on this crtc are updated - * @mode_changed: crtc_state->mode or crtc_state->enable has been changed - * @active_changed: crtc_state->active has been toggled. + * @mode_changed: @mode or @enable has been changed + * @active_changed: @active has been toggled. * @connectors_changed: connectors to this crtc have been updated * @zpos_changed: zpos values of planes on this crtc have been updated * @color_mgmt_changed: color management properties have changed (degamma or @@ -93,8 +90,6 @@ struct drm_plane_helper_funcs; * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders - * @last_vblank_count: for helpers and drivers to capture the vblank of the - * update to ensure framebuffer cleanup isn't done too early * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings * @mode: current mode timings * @mode_blob: &drm_property_blob for @mode @@ -107,9 +102,10 @@ struct drm_plane_helper_funcs; * * Note that the distinction between @enable and @active is rather subtile: * Flipping @active while @enable is set without changing anything else may - * never return in a failure from the ->atomic_check callback. Userspace assumes - * that a DPMS On will always succeed. In other words: @enable controls resource - * assignment, @active controls the actual hardware state. + * never return in a failure from the &drm_mode_config_funcs.atomic_check + * callback. Userspace assumes that a DPMS On will always succeed. In other + * words: @enable controls resource assignment, @active controls the actual + * hardware state. * * The three booleans active_changed, connectors_changed and mode_changed are * intended to indicate whether a full modeset is needed, rather than strictly @@ -140,9 +136,6 @@ struct drm_crtc_state { u32 connector_mask; u32 encoder_mask; - /* last_vblank_count: for vblank waits before cleanup */ - u32 last_vblank_count; - /* adjusted_mode: for use by helpers and drivers */ struct drm_display_mode adjusted_mode; @@ -157,6 +150,15 @@ struct drm_crtc_state { struct drm_property_blob *gamma_lut; /** + * @target_vblank: + * + * Target vertical blank period when a page flip + * should take effect. + */ + + u32 target_vblank; + + /** * @event: * * Optional pointer to a DRM event to signal upon completion of the @@ -323,7 +325,7 @@ struct drm_crtc_funcs { * * This is the main legacy entry point to change the modeset state on a * CRTC. All the details of the desired configuration are passed in a - * struct &drm_mode_set - see there for details. + * &struct drm_mode_set - see there for details. * * Drivers implementing atomic modeset should use * drm_atomic_helper_set_config() to implement this hook. @@ -345,8 +347,8 @@ struct drm_crtc_funcs { * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application * requests a page flip the DRM core verifies that the new frame buffer * is large enough to be scanned out by the CRTC in the currently - * configured mode and then calls the CRTC ->page_flip() operation with a - * pointer to the new frame buffer. + * configured mode and then calls this hook with a pointer to the new + * frame buffer. * * The driver must wait for any pending rendering to the new framebuffer * to complete before executing the flip. It should also wait for any @@ -354,7 +356,7 @@ struct drm_crtc_funcs { * shared dma-buf. * * An application can request to be notified when the page flip has - * completed. The drm core will supply a struct &drm_event in the event + * completed. The drm core will supply a &struct drm_event in the event * parameter in this case. This can be handled by the * drm_crtc_send_vblank_event() function, which the driver should call on * the provided event upon completion of the flip. Note that if @@ -381,7 +383,7 @@ struct drm_crtc_funcs { * RETURNS: * * 0 on success or a negative error code on failure. Note that if a - * ->page_flip() operation is already pending the callback should return + * page flip operation is already pending the callback should return * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode * or just runtime disabled through DPMS respectively the new atomic * "ACTIVE" state) should result in an -EINVAL error code. Note that @@ -433,19 +435,19 @@ struct drm_crtc_funcs { * @atomic_duplicate_state: * * Duplicate the current atomic state for this CRTC and return it. - * The core and helpers gurantee that any atomic state duplicated with + * The core and helpers guarantee that any atomic state duplicated with * this hook and still owned by the caller (i.e. not transferred to the - * driver by calling ->atomic_commit() from struct - * &drm_mode_config_funcs) will be cleaned up by calling the - * @atomic_destroy_state hook in this structure. + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. * - * Atomic drivers which don't subclass struct &drm_crtc should use + * Atomic drivers which don't subclass &struct drm_crtc_state should use * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the * state structure to extend it with driver-private state should use * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is * duplicated in a consistent fashion across drivers. * - * It is an error to call this hook before crtc->state has been + * It is an error to call this hook before &drm_crtc.state has been * initialized correctly. * * NOTE: @@ -558,7 +560,7 @@ struct drm_crtc_funcs { * * This optional hook should be used to unregister the additional * userspace interfaces attached to the crtc from - * late_unregister(). It is called from drm_dev_unregister(), + * @late_register. It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ @@ -591,7 +593,7 @@ struct drm_crtc_funcs { /** * @atomic_print_state: * - * If driver subclasses struct &drm_crtc_state, it should implement + * If driver subclasses &struct drm_crtc_state, it should implement * this optional hook for printing additional driver specific state. * * Do not call this directly, use drm_atomic_crtc_print_state() @@ -639,8 +641,8 @@ struct drm_crtc { * * This provides a read lock for the overall crtc state (mode, dpms * state, ...) and a write lock for everything which can be update - * without a full modeset (fb, cursor data, crtc properties ...). Full - * modeset also need to grab dev->mode_config.connection_mutex. + * without a full modeset (fb, cursor data, crtc properties ...). A full + * modeset also need to grab &drm_mode_config.connection_mutex. */ struct drm_modeset_lock mutex; @@ -772,10 +774,8 @@ struct drm_crtc { * @connectors: array of connectors to drive with this CRTC if possible * @num_connectors: size of @connectors array * - * Represents a single crtc the connectors that it drives with what mode - * and from which framebuffer it scans out from. - * - * This is used to set modes. + * This represents a modeset configuration for the legacy SETCRTC ioctl and is + * also used internally. Atomic drivers instead use &drm_atomic_state. */ struct drm_mode_set { struct drm_framebuffer *fb; @@ -824,14 +824,21 @@ static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc) return 1 << drm_crtc_index(crtc); } -void drm_crtc_get_hv_timing(const struct drm_display_mode *mode, - int *hdisplay, int *vdisplay); int drm_crtc_force_disable(struct drm_crtc *crtc); int drm_crtc_force_disable_all(struct drm_device *dev); int drm_mode_set_config_internal(struct drm_mode_set *set); +struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx); -/* Helpers */ +/** + * drm_crtc_find - look up a CRTC object from its ID + * @dev: DRM device + * @id: &drm_mode_object ID + * + * This can be used to look up a CRTC from its userspace ID. Only used by + * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS + * userspace interface should be done using &drm_property. + */ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, uint32_t id) { @@ -840,21 +847,14 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, return mo ? obj_to_crtc(mo) : NULL; } +/** + * drm_for_each_crtc - iterate over all CRTCs + * @crtc: a &struct drm_crtc as the loop cursor + * @dev: the &struct drm_device + * + * Iterate over all CRTCs of @dev. + */ #define drm_for_each_crtc(crtc, dev) \ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) -static inline void -assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config) -{ - /* - * The connector hotadd/remove code currently grabs both locks when - * updating lists. Hence readers need only hold either of them to be - * safe and the check amounts to - * - * WARN_ON(not_holding(A) && not_holding(B)). - */ - WARN_ON(!mutex_is_locked(&mode_config->mutex) && - !drm_modeset_is_locked(&mode_config->connection_mutex)); -} - #endif /* __DRM_CRTC_H__ */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 982c299e435a..d026f5017c33 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -73,6 +73,5 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); -extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 55bbeb0ff594..04681359a6f5 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -194,7 +194,8 @@ # define DP_PSR_SETUP_TIME_0 (6 << 1) # define DP_PSR_SETUP_TIME_MASK (7 << 1) # define DP_PSR_SETUP_TIME_SHIFT 1 - +# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */ +# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */ /* * 0x80-0x8f describe downstream port capabilities, but there are two layouts * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not, @@ -568,6 +569,16 @@ #define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */ # define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0) +#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */ +# define DP_GTC_CAP (1 << 0) /* DP 1.3 */ +# define DP_SST_SPLIT_SDP_CAP (1 << 1) /* DP 1.4 */ +# define DP_AV_SYNC_CAP (1 << 2) /* DP 1.3 */ +# define DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED (1 << 3) /* DP 1.3 */ +# define DP_VSC_EXT_VESA_SDP_SUPPORTED (1 << 4) /* DP 1.4 */ +# define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED (1 << 5) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */ + /* DP 1.2 Sideband message defines */ /* peer device type - DP 1.2a Table 2-92 */ #define DP_PEER_DEVICE_NONE 0x0 diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 003207670597..f4b4d154b98e 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -414,7 +414,7 @@ struct drm_dp_mst_topology_mgr { /** * @dev: device pointer for adding i2c devices etc. */ - struct device *dev; + struct drm_device *dev; /** * @cbs: callbacks for connector addition and destruction. */ @@ -493,8 +493,8 @@ struct drm_dp_mst_topology_mgr { int total_pbn; /** - * @qlock: protects @tx_msg_downq, the tx_slots in struct - * &drm_dp_mst_branch and txmsg->state once they are queued + * @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and + * &drm_dp_sideband_msg_tx.state once they are queued */ struct mutex qlock; /** @@ -508,8 +508,7 @@ struct drm_dp_mst_topology_mgr { struct mutex payload_lock; /** * @proposed_vcpis: Array of pointers for the new VCPI allocation. The - * VCPI structure itself is embedded into the corresponding - * &drm_dp_mst_port structure. + * VCPI structure itself is &drm_dp_mst_port.vcpi. */ struct drm_dp_vcpi **proposed_vcpis; /** @@ -556,7 +555,10 @@ struct drm_dp_mst_topology_mgr { struct work_struct destroy_connector_work; }; -int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id); +int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, + struct drm_device *dev, struct drm_dp_aux *aux, + int max_dpcd_transaction_bytes, + int max_payloads, int conn_base_id); void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index c4fc49583dc0..5699f42195fe 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -64,16 +64,55 @@ struct drm_mode_create_dumb; * structure for GEM drivers. */ struct drm_driver { + + /** + * @load: + * + * Backward-compatible driver callback to complete + * initialization steps after the driver is registered. For + * this reason, may suffer from race conditions and its use is + * deprecated for new drivers. It is therefore only supported + * for existing drivers not yet converted to the new scheme. + * See drm_dev_init() and drm_dev_register() for proper and + * race-free way to set up a &struct drm_device. + * + * Returns: + * + * Zero on success, non-zero value on failure. + */ int (*load) (struct drm_device *, unsigned long flags); - int (*firstopen) (struct drm_device *); int (*open) (struct drm_device *, struct drm_file *); void (*preclose) (struct drm_device *, struct drm_file *file_priv); void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); - int (*unload) (struct drm_device *); - int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); - int (*dma_quiescent) (struct drm_device *); - int (*context_dtor) (struct drm_device *dev, int context); + + /** + * @unload: + * + * Reverse the effects of the driver load callback. Ideally, + * the clean up performed by the driver should happen in the + * reverse order of the initialization. Similarly to the load + * hook, this handler is deprecated and its usage should be + * dropped in favor of an open-coded teardown function at the + * driver layer. See drm_dev_unregister() and drm_dev_unref() + * for the proper way to remove a &struct drm_device. + * + * The unload() hook is called right after unregistering + * the device. + * + */ + void (*unload) (struct drm_device *); + + /** + * @release: + * + * Optional callback for destroying device data after the final + * reference is released, i.e. the device is being destroyed. Drivers + * using this callback are responsible for calling drm_dev_fini() + * to finalize the device and then freeing the struct themselves. + */ + void (*release) (struct drm_device *); + int (*set_busid)(struct drm_device *dev, struct drm_master *master); /** @@ -119,20 +158,6 @@ struct drm_driver { void (*disable_vblank) (struct drm_device *dev, unsigned int pipe); /** - * @device_is_agp: - * - * Called by drm_device_is_agp(). Typically used to determine if a card - * is really attached to AGP or not. - * - * Returns: - * - * One of three values is returned depending on whether or not the - * card is absolutely not AGP (return of 0), absolutely is AGP - * (return of 1), or may or may not be AGP (return of 2). - */ - int (*device_is_agp) (struct drm_device *dev); - - /** * @get_scanout_position: * * Called by vblank timestamping code. @@ -282,7 +307,7 @@ struct drm_driver { /** * @gem_free_object_unlocked: deconstructor for drm_gem_objects * - * This is for drivers which are not encumbered with dev->struct_mutex + * This is for drivers which are not encumbered with &drm_device.struct_mutex * legacy locking schemes. Use this hook instead of @gem_free_object. */ void (*gem_free_object_unlocked) (struct drm_gem_object *obj); @@ -327,9 +352,6 @@ struct drm_driver { int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); - /* vga arb irq handler */ - void (*vgaarb_irq)(struct drm_device *dev, bool state); - /** * @dumb_create: * @@ -398,13 +420,20 @@ struct drm_driver { char *date; u32 driver_features; - int dev_priv_size; const struct drm_ioctl_desc *ioctls; int num_ioctls; const struct file_operations *fops; + /* Everything below here is for legacy driver, never use! */ + /* private: */ + /* List of devices hanging off this driver with stealth attach. */ struct list_head legacy_dev_list; + int (*firstopen) (struct drm_device *); + int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); + int (*dma_quiescent) (struct drm_device *); + int (*context_dtor) (struct drm_device *dev, int context); + int dev_priv_size; }; extern __printf(6, 7) @@ -419,6 +448,8 @@ extern unsigned int drm_debug; int drm_dev_init(struct drm_device *dev, struct drm_driver *driver, struct device *parent); +void drm_dev_fini(struct drm_device *dev); + struct drm_device *drm_dev_alloc(struct drm_driver *driver, struct device *parent); int drm_dev_register(struct drm_device *dev, unsigned long flags); diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 38eabf65f19d..577d5063e63d 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -24,6 +24,7 @@ #define __DRM_EDID_H__ #include <linux/types.h> +#include <linux/hdmi.h> struct drm_device; struct i2c_adapter; @@ -248,6 +249,7 @@ struct detailed_timing { # define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ #define DRM_ELD_SPEAKER 7 +# define DRM_ELD_SPEAKER_MASK 0x7f # define DRM_ELD_SPEAKER_RLRC (1 << 6) # define DRM_ELD_SPEAKER_FLRC (1 << 5) # define DRM_ELD_SPEAKER_RC (1 << 4) @@ -322,8 +324,6 @@ struct cea_sad { struct drm_encoder; struct drm_connector; struct drm_display_mode; -struct hdmi_avi_infoframe; -struct hdmi_vendor_infoframe; void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); @@ -346,6 +346,11 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, const struct drm_display_mode *mode); +void +drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, + const struct drm_display_mode *mode, + enum hdmi_quantization_range rgb_quant_range, + bool rgb_quant_range_selectable); /** * drm_eld_mnl - Get ELD monitor name length in bytes. @@ -414,6 +419,18 @@ static inline int drm_eld_size(const uint8_t *eld) } /** + * drm_eld_get_spk_alloc - Get speaker allocation + * @eld: pointer to an ELD memory structure + * + * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER + * field definitions to identify speakers. + */ +static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) +{ + return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; +} + +/** * drm_eld_get_conn_type - Get device type hdmi/dp connected * @eld: pointer to an ELD memory structure * @@ -442,6 +459,8 @@ enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code); bool drm_detect_hdmi_monitor(struct edid *edid); bool drm_detect_monitor_audio(struct edid *edid); bool drm_rgb_quant_range_selectable(struct edid *edid); +enum hdmi_quantization_range +drm_default_rgb_quant_range(const struct drm_display_mode *mode); int drm_add_modes_noedid(struct drm_connector *connector, int hdisplay, int vdisplay); void drm_set_preferred_mode(struct drm_connector *connector, diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index c7438ff0d609..8d8245ec0181 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -25,8 +25,12 @@ #include <linux/list.h> #include <linux/ctype.h> +#include <drm/drm_crtc.h> +#include <drm/drm_mode.h> #include <drm/drm_mode_object.h> +struct drm_encoder; + /** * struct drm_encoder_funcs - encoder controls * @@ -71,7 +75,7 @@ struct drm_encoder_funcs { * * This optional hook should be used to unregister the additional * userspace interfaces attached to the encoder from - * late_unregister(). It is called from drm_dev_unregister(), + * @late_register. It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ @@ -188,9 +192,6 @@ static inline unsigned int drm_encoder_index(struct drm_encoder *encoder) return encoder->index; } -/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */ -static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc); - /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? * @encoder: encoder to test diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index 82cdf611393d..1107b4b1c599 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -29,6 +29,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> /** * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index 3b00f6480b83..a5ecc0a58260 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -16,19 +16,17 @@ struct drm_plane; struct drm_plane_state; struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, - unsigned int preferred_bpp, unsigned int num_crtc, - unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs); + unsigned int preferred_bpp, unsigned int max_conn_count, + const struct drm_framebuffer_funcs *funcs); struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, - unsigned int preferred_bpp, unsigned int num_crtc, - unsigned int max_conn_count); + unsigned int preferred_bpp, unsigned int max_conn_count); void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state); -int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes, - const struct drm_framebuffer_funcs *funcs); +void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma, + int state); void drm_fb_cma_destroy(struct drm_framebuffer *fb); int drm_fb_cma_create_handle(struct drm_framebuffer *fb, diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 975deedd593e..6f5acebb266a 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -181,7 +181,7 @@ struct drm_fb_helper_connector { * * This is the main structure used by the fbdev helpers. Drivers supporting * fbdev emulation should embedded this into their overall driver structure. - * Drivers must also fill out a struct &drm_fb_helper_funcs with a few + * Drivers must also fill out a &struct drm_fb_helper_funcs with a few * operations. */ struct drm_fb_helper { @@ -236,8 +236,7 @@ struct drm_fb_helper { void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, const struct drm_fb_helper_funcs *funcs); int drm_fb_helper_init(struct drm_device *dev, - struct drm_fb_helper *helper, int crtc_count, - int max_conn); + struct drm_fb_helper *helper, int max_conn); void drm_fb_helper_fini(struct drm_fb_helper *helper); int drm_fb_helper_blank(int blank, struct fb_info *info); int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, @@ -295,8 +294,7 @@ struct drm_display_mode * drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height); struct drm_display_mode * -drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, - int width, int height); +drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn); int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, @@ -309,7 +307,7 @@ static inline void drm_fb_helper_prepare(struct drm_device *dev, } static inline int drm_fb_helper_init(struct drm_device *dev, - struct drm_fb_helper *helper, int crtc_count, + struct drm_fb_helper *helper, int max_conn) { return 0; diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h index d387cf06ae05..21c3d512d25c 100644 --- a/include/drm/drm_flip_work.h +++ b/include/drm/drm_flip_work.h @@ -54,7 +54,7 @@ typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); /** * struct drm_flip_task - flip work task * @node: list entry element - * @data: data to pass to work->func + * @data: data to pass to &drm_flip_work.func */ struct drm_flip_task { struct list_head node; diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index a232e7f0c869..dd1e3e99dcff 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -40,8 +40,8 @@ struct drm_framebuffer_funcs { * * Clean up framebuffer resources, specifically also unreference the * backing storage. The core guarantees to call this function for every - * framebuffer successfully created by ->fb_create() in - * &drm_mode_config_funcs. Drivers must also call + * framebuffer successfully created by calling + * &drm_mode_config_funcs.fb_create. Drivers must also call * drm_framebuffer_cleanup() to release DRM core resources for this * framebuffer. */ @@ -51,7 +51,7 @@ struct drm_framebuffer_funcs { * @create_handle: * * Create a buffer handle in the driver-specific buffer manager (either - * GEM or TTM) valid for the passed-in struct &drm_file. This is used by + * GEM or TTM) valid for the passed-in &struct drm_file. This is used by * the core to implement the GETFB IOCTL, which returns (for * sufficiently priviledged user) also a native buffer handle. This can * be used for seamless transitions between modesetting clients by @@ -112,8 +112,8 @@ struct drm_framebuffer { */ struct drm_device *dev; /** - * @head: Place on the dev->mode_config.fb_list, access protected by - * dev->mode_config.fb_lock. + * @head: Place on the &drm_mode_config.fb_list, access protected by + * &drm_mode_config.fb_lock. */ struct list_head head; @@ -122,6 +122,10 @@ struct drm_framebuffer { */ struct drm_mode_object base; /** + * @format: framebuffer format information + */ + const struct drm_format_info *format; + /** * @funcs: framebuffer vfunc table */ const struct drm_framebuffer_funcs *funcs; @@ -145,7 +149,7 @@ struct drm_framebuffer { * * This should not be used to specifiy x/y pixel offsets into the buffer * data (even for linear buffers). Specifying an x/y pixel offset is - * instead done through the source rectangle in struct &drm_plane_state. + * instead done through the source rectangle in &struct drm_plane_state. */ unsigned int offsets[4]; /** @@ -166,28 +170,11 @@ struct drm_framebuffer { */ unsigned int height; /** - * @depth: Depth in bits per pixel for RGB formats. 0 for everything - * else. Legacy information derived from @pixel_format, it's suggested to use - * the DRM FOURCC codes and helper functions directly instead. - */ - unsigned int depth; - /** - * @bits_per_pixel: Storage used bits per pixel for RGB formats. 0 for - * everything else. Legacy information derived from @pixel_format, it's - * suggested to use the DRM FOURCC codes and helper functions directly - * instead. - */ - int bits_per_pixel; - /** * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or * DRM_MODE_FB_MODIFIERS. */ int flags; /** - * @pixel_format: DRM FOURCC code describing the pixel format. - */ - uint32_t pixel_format; /* fourcc format */ - /** * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR * universal plane. @@ -200,8 +187,7 @@ struct drm_framebuffer { */ int hot_y; /** - * @filp_head: Placed on struct &drm_file fbs list_head, protected by - * fbs_lock in the same structure. + * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. */ struct list_head filp_head; }; @@ -273,8 +259,8 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p, * @fb: the loop cursor * @dev: the DRM device * - * Iterate over all framebuffers of @dev. User must hold the fb_lock from - * &drm_mode_config. + * Iterate over all framebuffers of @dev. User must hold + * &drm_mode_config.fb_lock. */ #define drm_for_each_fb(fb, dev) \ for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \ @@ -282,4 +268,10 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p, struct drm_framebuffer, head); \ &fb->head != (&(dev)->mode_config.fb_list); \ fb = list_next_entry(fb, head)) + +int drm_framebuffer_plane_width(int width, + const struct drm_framebuffer *fb, int plane); +int drm_framebuffer_plane_height(int height, + const struct drm_framebuffer *fb, int plane); + #endif diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 9f63736e6163..449a41b56ffc 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -63,7 +63,7 @@ struct drm_gem_object { * drops to 0 any global names (e.g. the id in the flink namespace) will * be cleared. * - * Protected by dev->object_name_lock. + * Protected by &drm_device.object_name_lock. */ unsigned handle_count; @@ -106,8 +106,8 @@ struct drm_gem_object { * @name: * * Global name for this object, starts at 1. 0 means unnamed. - * Access is covered by dev->object_name_lock. This is used by the GEM_FLINK - * and GEM_OPEN ioctls. + * Access is covered by &drm_device.object_name_lock. This is used by + * the GEM_FLINK and GEM_OPEN ioctls. */ int name; @@ -150,7 +150,7 @@ struct drm_gem_object { * through importing or exporting). We break the resulting reference * loop when the last gem handle for this object is released. * - * Protected by obj->object_name_lock. + * Protected by &drm_device.object_name_lock. */ struct dma_buf *dma_buf; @@ -163,7 +163,7 @@ struct drm_gem_object { * attachment point for the device. This is invariant over the lifetime * of a gem object. * - * The driver's ->gem_free_object callback is responsible for cleaning + * The &drm_driver.gem_free_object callback is responsible for cleaning * up the dma_buf attachment and references acquired at import time. * * Note that the drm gem/prime core does not depend upon drivers setting @@ -204,7 +204,7 @@ drm_gem_object_reference(struct drm_gem_object *obj) * @obj: GEM buffer object * * This function is meant to be used by drivers which are not encumbered with - * dev->struct_mutex legacy locking and which are using the + * &drm_device.struct_mutex legacy locking and which are using the * gem_free_object_unlocked callback. It avoids all the locking checks and * locking overhead of drm_gem_object_unreference() and * drm_gem_object_unreference_unlocked(). @@ -212,8 +212,8 @@ drm_gem_object_reference(struct drm_gem_object *obj) * Drivers should never call this directly in their code. Instead they should * wrap it up into a ``driver_gem_object_unreference(struct driver_gem_object * *obj)`` wrapper function, and use that. Shared code should never call this, to - * avoid breaking drivers by accident which still depend upon dev->struct_mutex - * locking. + * avoid breaking drivers by accident which still depend upon + * &drm_device.struct_mutex locking. */ static inline void __drm_gem_object_unreference(struct drm_gem_object *obj) diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index acd6af8a8e67..2abcd5190cc1 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -53,6 +53,23 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, extern const struct vm_operations_struct drm_gem_cma_vm_ops; +#ifndef CONFIG_MMU +unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#else +static inline unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + return -EINVAL; +} +#endif + #ifdef CONFIG_DEBUG_FS void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); #endif diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h index 293d08caab60..2fb880462a57 100644 --- a/include/drm/drm_irq.h +++ b/include/drm/drm_irq.h @@ -51,8 +51,8 @@ struct drm_pending_vblank_event { * * Note that for historical reasons - the vblank handling code is still shared * with legacy/non-kms drivers - this is a free-standing structure not directly - * connected to struct &drm_crtc. But all public interface functions are taking - * a struct &drm_crtc to hide this implementation detail. + * connected to &struct drm_crtc. But all public interface functions are taking + * a &struct drm_crtc to hide this implementation detail. */ struct drm_vblank_crtc { /** @@ -67,7 +67,7 @@ struct drm_vblank_crtc { * @disable_timer: Disable timer for the delayed vblank disabling * hysteresis logic. Vblank disabling is controlled through the * drm_vblank_offdelay module option and the setting of the - * max_vblank_count value in the &drm_device structure. + * &drm_device.max_vblank_count value. */ struct timer_list disable_timer; @@ -92,7 +92,7 @@ struct drm_vblank_crtc { */ atomic_t refcount; /* number of users of vblank interruptsper crtc */ /** - * @last: Protected by dev->vbl_lock, used for wraparound handling. + * @last: Protected by &drm_device.vbl_lock, used for wraparound handling. */ u32 last; /** diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 0b8371795aeb..2ef16bf25826 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -1,6 +1,7 @@ /************************************************************************** * * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. + * Copyright 2016 Intel Corporation * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -39,50 +40,132 @@ #include <linux/bug.h> #include <linux/rbtree.h> #include <linux/kernel.h> +#include <linux/mm_types.h> #include <linux/list.h> #include <linux/spinlock.h> -#ifdef CONFIG_DEBUG_FS -#include <linux/seq_file.h> -#endif #ifdef CONFIG_DRM_DEBUG_MM #include <linux/stackdepot.h> #endif +#include <drm/drm_print.h> -enum drm_mm_search_flags { - DRM_MM_SEARCH_DEFAULT = 0, - DRM_MM_SEARCH_BEST = 1 << 0, - DRM_MM_SEARCH_BELOW = 1 << 1, -}; +#ifdef CONFIG_DRM_DEBUG_MM +#define DRM_MM_BUG_ON(expr) BUG_ON(expr) +#else +#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#endif -enum drm_mm_allocator_flags { - DRM_MM_CREATE_DEFAULT = 0, - DRM_MM_CREATE_TOP = 1 << 0, -}; +/** + * enum drm_mm_insert_mode - control search and allocation behaviour + * + * The &struct drm_mm range manager supports finding a suitable modes using + * a number of search trees. These trees are oranised by size, by address and + * in most recent eviction order. This allows the user to find either the + * smallest hole to reuse, the lowest or highest address to reuse, or simply + * reuse the most recent eviction that fits. When allocating the &drm_mm_node + * from within the hole, the &drm_mm_insert_mode also dictate whether to + * allocate the lowest matching address or the highest. + */ +enum drm_mm_insert_mode { + /** + * @DRM_MM_INSERT_BEST: + * + * Search for the smallest hole (within the search range) that fits + * the desired node. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_BEST = 0, + + /** + * @DRM_MM_INSERT_LOW: + * + * Search for the lowest hole (address closest to 0, within the search + * range) that fits the desired node. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_LOW, -#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT -#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP + /** + * @DRM_MM_INSERT_HIGH: + * + * Search for the highest hole (address closest to U64_MAX, within the + * search range) that fits the desired node. + * + * Allocates the node from the *top* of the found hole. The specified + * alignment for the node is applied to the base of the node + * (&drm_mm_node.start). + */ + DRM_MM_INSERT_HIGH, + /** + * @DRM_MM_INSERT_EVICT: + * + * Search for the most recently evicted hole (within the search range) + * that fits the desired node. This is appropriate for use immediately + * after performing an eviction scan (see drm_mm_scan_init()) and + * removing the selected nodes to form a hole. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_EVICT, +}; + +/** + * struct drm_mm_node - allocated block in the DRM allocator + * + * This represents an allocated block in a &drm_mm allocator. Except for + * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is + * entirely opaque and should only be accessed through the provided funcions. + * Since allocation of these nodes is entirely handled by the driver they can be + * embedded. + */ struct drm_mm_node { - struct list_head node_list; - struct list_head hole_stack; - struct rb_node rb; - unsigned hole_follows : 1; - unsigned scanned_block : 1; - unsigned scanned_prev_free : 1; - unsigned scanned_next_free : 1; - unsigned scanned_preceeds_hole : 1; - unsigned allocated : 1; + /** @color: Opaque driver-private tag. */ unsigned long color; + /** @start: Start address of the allocated block. */ u64 start; + /** @size: Size of the allocated block. */ u64 size; - u64 __subtree_last; + /* private: */ struct drm_mm *mm; + struct list_head node_list; + struct list_head hole_stack; + struct rb_node rb; + struct rb_node rb_hole_size; + struct rb_node rb_hole_addr; + u64 __subtree_last; + u64 hole_size; + bool allocated : 1; + bool scanned_block : 1; #ifdef CONFIG_DRM_DEBUG_MM depot_stack_handle_t stack; #endif }; +/** + * struct drm_mm - DRM allocator + * + * DRM range allocator with a few special functions and features geared towards + * managing GPU memory. Except for the @color_adjust callback the structure is + * entirely opaque and should only be accessed through the provided functions + * and macros. This structure can be embedded into larger driver structures. + */ struct drm_mm { + /** + * @color_adjust: + * + * Optional driver callback to further apply restrictions on a hole. The + * node argument points at the node containing the hole from which the + * block would be allocated (see drm_mm_hole_follows() and friends). The + * other arguments are the size of the block to be allocated. The driver + * can adjust the start and end as needed to e.g. insert guard pages. + */ + void (*color_adjust)(const struct drm_mm_node *node, + unsigned long color, + u64 *start, u64 *end); + + /* private: */ /* List of all memory nodes that immediately precede a free hole. */ struct list_head hole_stack; /* head_node.node_list is the list of all memory nodes, ordered @@ -90,33 +173,53 @@ struct drm_mm { struct drm_mm_node head_node; /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ struct rb_root interval_tree; + struct rb_root holes_size; + struct rb_root holes_addr; - unsigned int scan_check_range : 1; - unsigned scan_alignment; - unsigned long scan_color; - u64 scan_size; - u64 scan_hit_start; - u64 scan_hit_end; - unsigned scanned_blocks; - u64 scan_start; - u64 scan_end; - struct drm_mm_node *prev_scanned_node; - - void (*color_adjust)(struct drm_mm_node *node, unsigned long color, - u64 *start, u64 *end); + unsigned long scan_active; +}; + +/** + * struct drm_mm_scan - DRM allocator eviction roaster data + * + * This structure tracks data needed for the eviction roaster set up using + * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and + * drm_mm_scan_remove_block(). The structure is entirely opaque and should only + * be accessed through the provided functions and macros. It is meant to be + * allocated temporarily by the driver on the stack. + */ +struct drm_mm_scan { + /* private: */ + struct drm_mm *mm; + + u64 size; + u64 alignment; + u64 remainder_mask; + + u64 range_start; + u64 range_end; + + u64 hit_start; + u64 hit_end; + + unsigned long color; + enum drm_mm_insert_mode mode; }; /** * drm_mm_node_allocated - checks whether a node is allocated * @node: drm_mm_node to check * - * Drivers should use this helpers for proper encapusulation of drm_mm + * Drivers are required to clear a node prior to using it with the + * drm_mm range manager. + * + * Drivers should use this helper for proper encapsulation of drm_mm * internals. * * Returns: * True if the @node is allocated. */ -static inline bool drm_mm_node_allocated(struct drm_mm_node *node) +static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) { return node->allocated; } @@ -125,18 +228,38 @@ static inline bool drm_mm_node_allocated(struct drm_mm_node *node) * drm_mm_initialized - checks whether an allocator is initialized * @mm: drm_mm to check * - * Drivers should use this helpers for proper encapusulation of drm_mm + * Drivers should clear the struct drm_mm prior to initialisation if they + * want to use this function. + * + * Drivers should use this helper for proper encapsulation of drm_mm * internals. * * Returns: * True if the @mm is initialized. */ -static inline bool drm_mm_initialized(struct drm_mm *mm) +static inline bool drm_mm_initialized(const struct drm_mm *mm) { return mm->hole_stack.next; } -static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) +/** + * drm_mm_hole_follows - checks whether a hole follows this node + * @node: drm_mm_node to check + * + * Holes are embedded into the drm_mm using the tail of a drm_mm_node. + * If you wish to know whether a hole follows this particular node, + * query this function. See also drm_mm_hole_node_start() and + * drm_mm_hole_node_end(). + * + * Returns: + * True if a hole follows the @node. + */ +static inline bool drm_mm_hole_follows(const struct drm_mm_node *node) +{ + return node->hole_size; +} + +static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) { return hole_node->start + hole_node->size; } @@ -145,20 +268,20 @@ static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) * drm_mm_hole_node_start - computes the start of the hole following @node * @hole_node: drm_mm_node which implicitly tracks the following hole * - * This is useful for driver-sepific debug dumpers. Otherwise drivers should not - * inspect holes themselves. Drivers must check first whether a hole indeed - * follows by looking at node->hole_follows. + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows() * * Returns: * Start of the subsequent hole. */ -static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) { - BUG_ON(!hole_node->hole_follows); + DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node)); return __drm_mm_hole_node_start(hole_node); } -static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) { return list_next_entry(hole_node, node_list)->start; } @@ -167,148 +290,162 @@ static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) * drm_mm_hole_node_end - computes the end of the hole following @node * @hole_node: drm_mm_node which implicitly tracks the following hole * - * This is useful for driver-sepific debug dumpers. Otherwise drivers should not - * inspect holes themselves. Drivers must check first whether a hole indeed - * follows by looking at node->hole_follows. + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows(). * * Returns: * End of the subsequent hole. */ -static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) { return __drm_mm_hole_node_end(hole_node); } /** + * drm_mm_nodes - list of nodes under the drm_mm range manager + * @mm: the struct drm_mm range manger + * + * As the drm_mm range manager hides its node_list deep with its + * structure, extracting it looks painful and repetitive. This is + * not expected to be used outside of the drm_mm_for_each_node() + * macros and similar internal functions. + * + * Returns: + * The node list, may be empty. + */ +#define drm_mm_nodes(mm) (&(mm)->head_node.node_list) + +/** * drm_mm_for_each_node - iterator to walk over all allocated nodes - * @entry: drm_mm_node structure to assign to in each iteration step - * @mm: drm_mm allocator to walk + * @entry: &struct drm_mm_node to assign to in each iteration step + * @mm: &drm_mm allocator to walk * * This iterator walks over all nodes in the range allocator. It is implemented - * with list_for_each, so not save against removal of elements. + * with list_for_each(), so not save against removal of elements. */ -#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ - &(mm)->head_node.node_list, \ - node_list) - -#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ - for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ - &entry->hole_stack != &(mm)->hole_stack ? \ - hole_start = drm_mm_hole_node_start(entry), \ - hole_end = drm_mm_hole_node_end(entry), \ - 1 : 0; \ - entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) +#define drm_mm_for_each_node(entry, mm) \ + list_for_each_entry(entry, drm_mm_nodes(mm), node_list) + +/** + * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes + * @entry: &struct drm_mm_node to assign to in each iteration step + * @next: &struct drm_mm_node to store the next step + * @mm: &drm_mm allocator to walk + * + * This iterator walks over all nodes in the range allocator. It is implemented + * with list_for_each_safe(), so save against removal of elements. + */ +#define drm_mm_for_each_node_safe(entry, next, mm) \ + list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) /** * drm_mm_for_each_hole - iterator to walk over all holes - * @entry: drm_mm_node used internally to track progress - * @mm: drm_mm allocator to walk + * @pos: &drm_mm_node used internally to track progress + * @mm: &drm_mm allocator to walk * @hole_start: ulong variable to assign the hole start to on each iteration * @hole_end: ulong variable to assign the hole end to on each iteration * * This iterator walks over all holes in the range allocator. It is implemented - * with list_for_each, so not save against removal of elements. @entry is used + * with list_for_each(), so not save against removal of elements. @entry is used * internally and will not reflect a real drm_mm_node for the very first hole. * Hence users of this iterator may not access it. * * Implementation Note: * We need to inline list_for_each_entry in order to be able to set hole_start * and hole_end on each iteration while keeping the macro sane. - * - * The __drm_mm_for_each_hole version is similar, but with added support for - * going backwards. */ -#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ - __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0) +#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \ + for (pos = list_first_entry(&(mm)->hole_stack, \ + typeof(*pos), hole_stack); \ + &pos->hole_stack != &(mm)->hole_stack ? \ + hole_start = drm_mm_hole_node_start(pos), \ + hole_end = hole_start + pos->hole_size, \ + 1 : 0; \ + pos = list_next_entry(pos, hole_stack)) /* * Basic range manager support (drm_mm.c) */ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); +int drm_mm_insert_node_in_range(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size, + u64 alignment, + unsigned long color, + u64 start, + u64 end, + enum drm_mm_insert_mode mode); -int drm_mm_insert_node_generic(struct drm_mm *mm, - struct drm_mm_node *node, - u64 size, - unsigned alignment, - unsigned long color, - enum drm_mm_search_flags sflags, - enum drm_mm_allocator_flags aflags); /** - * drm_mm_insert_node - search for space and insert @node + * drm_mm_insert_node_generic - search for space and insert @node * @mm: drm_mm to allocate from * @node: preallocate node to insert * @size: size of the allocation * @alignment: alignment of the allocation - * @flags: flags to fine-tune the allocation + * @color: opaque tag value to use for this node + * @mode: fine-tune the allocation search and placement * - * This is a simplified version of drm_mm_insert_node_generic() with @color set - * to 0. + * This is a simplified version of drm_mm_insert_node_in_range_generic() with no + * range restrictions applied. * * The preallocated node must be cleared to 0. * * Returns: * 0 on success, -ENOSPC if there's no suitable hole. */ -static inline int drm_mm_insert_node(struct drm_mm *mm, - struct drm_mm_node *node, - u64 size, - unsigned alignment, - enum drm_mm_search_flags flags) +static inline int +drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, + u64 size, u64 alignment, + unsigned long color, + enum drm_mm_insert_mode mode) { - return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, - DRM_MM_CREATE_DEFAULT); + return drm_mm_insert_node_in_range(mm, node, + size, alignment, color, + 0, U64_MAX, mode); } -int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, - struct drm_mm_node *node, - u64 size, - unsigned alignment, - unsigned long color, - u64 start, - u64 end, - enum drm_mm_search_flags sflags, - enum drm_mm_allocator_flags aflags); /** - * drm_mm_insert_node_in_range - ranged search for space and insert @node + * drm_mm_insert_node - search for space and insert @node * @mm: drm_mm to allocate from * @node: preallocate node to insert * @size: size of the allocation - * @alignment: alignment of the allocation - * @start: start of the allowed range for this node - * @end: end of the allowed range for this node - * @flags: flags to fine-tune the allocation * - * This is a simplified version of drm_mm_insert_node_in_range_generic() with - * @color set to 0. + * This is a simplified version of drm_mm_insert_node_generic() with @color set + * to 0. * * The preallocated node must be cleared to 0. * * Returns: * 0 on success, -ENOSPC if there's no suitable hole. */ -static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, - struct drm_mm_node *node, - u64 size, - unsigned alignment, - u64 start, - u64 end, - enum drm_mm_search_flags flags) +static inline int drm_mm_insert_node(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size) { - return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, - 0, start, end, flags, - DRM_MM_CREATE_DEFAULT); + return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0); } void drm_mm_remove_node(struct drm_mm_node *node); void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); -void drm_mm_init(struct drm_mm *mm, - u64 start, - u64 size); +void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); void drm_mm_takedown(struct drm_mm *mm); -bool drm_mm_clean(struct drm_mm *mm); + +/** + * drm_mm_clean - checks whether an allocator is clean + * @mm: drm_mm allocator to check + * + * Returns: + * True if the allocator is completely free, false if there's still a node + * allocated in it. + */ +static inline bool drm_mm_clean(const struct drm_mm *mm) +{ + return list_empty(drm_mm_nodes(mm)); +} struct drm_mm_node * -__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last); +__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); /** * drm_mm_for_each_node_in_range - iterator to walk over a range of @@ -329,22 +466,49 @@ __drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last); node__ && node__->start < (end__); \ node__ = list_next_entry(node__, node_list)) -void drm_mm_init_scan(struct drm_mm *mm, - u64 size, - unsigned alignment, - unsigned long color); -void drm_mm_init_scan_with_range(struct drm_mm *mm, - u64 size, - unsigned alignment, - unsigned long color, - u64 start, - u64 end); -bool drm_mm_scan_add_block(struct drm_mm_node *node); -bool drm_mm_scan_remove_block(struct drm_mm_node *node); - -void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); -#ifdef CONFIG_DEBUG_FS -int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); -#endif +void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, u64 alignment, unsigned long color, + u64 start, u64 end, + enum drm_mm_insert_mode mode); + +/** + * drm_mm_scan_init - initialize lru scanning + * @scan: scan state + * @mm: drm_mm to scan + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for the allocation + * @mode: fine-tune the allocation search and placement + * + * This is a simplified version of drm_mm_scan_init_with_range() with no range + * restrictions applied. + * + * This simply sets up the scanning routines with the parameters for the desired + * hole. + * + * Warning: + * As long as the scan list is non-empty, no other operations than + * adding/removing nodes to/from the scan list are allowed. + */ +static inline void drm_mm_scan_init(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, + u64 alignment, + unsigned long color, + enum drm_mm_insert_mode mode) +{ + drm_mm_scan_init_with_range(scan, mm, + size, alignment, color, + 0, U64_MAX, mode); +} + +bool drm_mm_scan_add_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan); + +void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p); #endif diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 137432386310..26ff46ab26fb 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -47,7 +47,7 @@ struct drm_mode_config_funcs { * * Create a new framebuffer object. The core does basic checks on the * requested metadata, but most of that is left to the driver. See - * struct &drm_mode_fb_cmd2 for details. + * &struct drm_mode_fb_cmd2 for details. * * If the parameters are deemed valid and the backing storage objects in * the underlying memory manager all exist, then the driver allocates @@ -132,10 +132,10 @@ struct drm_mode_config_funcs { * that before calling this hook. * * See the documentation of @atomic_commit for an exhaustive list of - * error conditions which don't have to be checked at the - * ->atomic_check() stage? + * error conditions which don't have to be checked at the in this + * callback. * - * See the documentation for struct &drm_atomic_state for how exactly + * See the documentation for &struct drm_atomic_state for how exactly * an atomic modeset update is described. * * Drivers using the atomic helpers can implement this hook using @@ -171,7 +171,7 @@ struct drm_mode_config_funcs { * calling this function, and that nothing has been changed in the * interim. * - * See the documentation for struct &drm_atomic_state for how exactly + * See the documentation for &struct drm_atomic_state for how exactly * an atomic modeset update is described. * * Drivers using the atomic helpers can implement this hook using @@ -198,10 +198,10 @@ struct drm_mode_config_funcs { * completed. These events are per-CRTC and can be distinguished by the * CRTC index supplied in &drm_event to userspace. * - * The drm core will supply a struct &drm_event in the event - * member of each CRTC's &drm_crtc_state structure. See the - * documentation for &drm_crtc_state for more details about the precise - * semantics of this event. + * The drm core will supply a &struct drm_event in each CRTC's + * &drm_crtc_state.event. See the documentation for + * &drm_crtc_state.event for more details about the precise semantics of + * this event. * * NOTE: * @@ -365,7 +365,13 @@ struct drm_mode_config { struct list_head fb_list; /** - * @num_connector: Number of connectors on this device. + * @connector_list_lock: Protects @num_connector and + * @connector_list. + */ + spinlock_t connector_list_lock; + /** + * @num_connector: Number of connectors on this device. Protected by + * @connector_list_lock. */ int num_connector; /** @@ -373,7 +379,9 @@ struct drm_mode_config { */ struct ida connector_ida; /** - * @connector_list: List of connector objects. + * @connector_list: List of connector objects. Protected by + * @connector_list_lock. Only use drm_for_each_connector_iter() and + * &struct drm_connector_list_iter to walk this list. */ struct list_head connector_list; int num_encoder; diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h index 43460b21d112..2c017adf6d74 100644 --- a/include/drm/drm_mode_object.h +++ b/include/drm/drm_mode_object.h @@ -86,10 +86,15 @@ struct drm_object_properties { * * Note that atomic drivers do not store mutable properties in this * array, but only the decoded values in the corresponding state - * structure. The decoding is done using the ->atomic_get_property and - * ->atomic_set_property hooks of the corresponding object. Hence atomic - * drivers should not use drm_object_property_set_value() and - * drm_object_property_get_value() on mutable objects, i.e. those + * structure. The decoding is done using the &drm_crtc.atomic_get_property and + * &drm_crtc.atomic_set_property hooks for &struct drm_crtc. For + * &struct drm_plane the hooks are &drm_plane_funcs.atomic_get_property and + * &drm_plane_funcs.atomic_set_property. And for &struct drm_connector + * the hooks are &drm_connector_funcs.atomic_get_property and + * &drm_connector_funcs.atomic_set_property . + * + * Hence atomic drivers should not use drm_object_property_set_value() + * and drm_object_property_get_value() on mutable objects, i.e. those * without the DRM_MODE_PROP_IMMUTABLE flag set. */ uint64_t values[DRM_OBJECT_MAX_PROPERTY]; diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 9934d91619c1..6dd34280e892 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -459,6 +459,8 @@ int of_get_drm_display_mode(struct device_node *np, void drm_mode_set_name(struct drm_display_mode *mode); int drm_mode_hsync(const struct drm_display_mode *mode); int drm_mode_vrefresh(const struct drm_display_mode *mode); +void drm_mode_get_hv_timing(const struct drm_display_mode *mode, + int *hdisplay, int *vdisplay); void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags); diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h index b8051d5abe10..cb0ec92e11e6 100644 --- a/include/drm/drm_modeset_helper.h +++ b/include/drm/drm_modeset_helper.h @@ -27,7 +27,8 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *); -void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, +void drm_helper_mode_fill_fb_struct(struct drm_device *dev, + struct drm_framebuffer *fb, const struct drm_mode_fb_cmd2 *mode_cmd); int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 69c3974bf133..091c42205667 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -30,6 +30,7 @@ #define __DRM_MODESET_HELPER_VTABLES_H__ #include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> /** * DOC: overview @@ -110,9 +111,9 @@ struct drm_crtc_helper_funcs { * This callback is used to validate a mode. The parameter mode is the * display mode that userspace requested, adjusted_mode is the mode the * encoders need to be fed with. Note that this is the inverse semantics - * of the meaning for the &drm_encoder and &drm_bridge - * ->mode_fixup() functions. If the CRTC cannot support the requested - * conversion from mode to adjusted_mode it should reject the modeset. + * of the meaning for the &drm_encoder and &drm_bridge_funcs.mode_fixup + * vfunc. If the CRTC cannot support the requested conversion from mode + * to adjusted_mode it should reject the modeset. * * This function is used by both legacy CRTC helpers and atomic helpers. * With atomic helpers it is optional. @@ -133,17 +134,18 @@ struct drm_crtc_helper_funcs { * * Also beware that neither core nor helpers filter modes before * passing them to the driver: While the list of modes that is - * advertised to userspace is filtered using the connector's - * ->mode_valid() callback, neither the core nor the helpers do any - * filtering on modes passed in from userspace when setting a mode. It - * is therefore possible for userspace to pass in a mode that was - * previously filtered out using ->mode_valid() or add a custom mode - * that wasn't probed from EDID or similar to begin with. Even though - * this is an advanced feature and rarely used nowadays, some users rely - * on being able to specify modes manually so drivers must be prepared - * to deal with it. Specifically this means that all drivers need not - * only validate modes in ->mode_valid() but also in ->mode_fixup() to - * make sure invalid modes passed in from userspace are rejected. + * advertised to userspace is filtered using the + * &drm_connector.mode_valid callback, neither the core nor the helpers + * do any filtering on modes passed in from userspace when setting a + * mode. It is therefore possible for userspace to pass in a mode that + * was previously filtered out using &drm_connector.mode_valid or add a + * custom mode that wasn't probed from EDID or similar to begin with. + * Even though this is an advanced feature and rarely used nowadays, + * some users rely on being able to specify modes manually so drivers + * must be prepared to deal with it. Specifically this means that all + * drivers need not only validate modes in &drm_connector.mode_valid but + * also in this or in the &drm_encoder_helper_funcs.mode_fixup callback + * to make sure invalid modes passed in from userspace are rejected. * * RETURNS: * @@ -204,7 +206,7 @@ struct drm_crtc_helper_funcs { * optimized fast-path instead of a full mode set operation with all the * resulting flickering. If it is not present * drm_crtc_helper_set_config() will fall back to a full modeset, using - * the ->mode_set() callback. Since it can't update other planes it's + * the @mode_set callback. Since it can't update other planes it's * incompatible with atomic modeset support. * * This callback is only used by the CRTC helpers and deprecated. @@ -237,8 +239,7 @@ struct drm_crtc_helper_funcs { /** * @load_lut: * - * Load a LUT prepared with the @gamma_set functions from - * &drm_fb_helper_funcs. + * Load a LUT prepared with the &drm_fb_helper_funcs.gamma_set vfunc. * * This callback is optional and is only used by the fbdev emulation * helpers. @@ -256,10 +257,11 @@ struct drm_crtc_helper_funcs { * * This callback should be used to disable the CRTC. With the atomic * drivers it is called after all encoders connected to this CRTC have - * been shut off already using their own ->disable hook. If that - * sequence is too simple drivers can just add their own hooks and call - * it from this CRTC callback here by looping over all encoders - * connected to it using for_each_encoder_on_crtc(). + * been shut off already using their own + * &drm_encoder_helper_funcs.disable hook. If that sequence is too + * simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). * * This hook is used both by legacy CRTC helpers and atomic helpers. * Atomic drivers don't need to implement it if there's no need to @@ -288,10 +290,10 @@ struct drm_crtc_helper_funcs { * * This callback should be used to enable the CRTC. With the atomic * drivers it is called before all encoders connected to this CRTC are - * enabled through the encoder's own ->enable hook. If that sequence is - * too simple drivers can just add their own hooks and call it from this - * CRTC callback here by looping over all encoders connected to it using - * for_each_encoder_on_crtc(). + * enabled through the encoder's own &drm_encoder_helper_funcs.enable + * hook. If that sequence is too simple drivers can just add their own + * hooks and call it from this CRTC callback here by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). * * This hook is used only by atomic helpers, for symmetry with @disable. * Atomic drivers don't need to implement it if there's no need to @@ -315,16 +317,16 @@ struct drm_crtc_helper_funcs { * beforehand. This is calling order used by the default helper * implementation in drm_atomic_helper_check(). * - * When using drm_atomic_helper_check_planes() CRTCs' ->atomic_check() - * hooks are called after the ones for planes, which allows drivers to - * assign shared resources requested by planes in the CRTC callback - * here. For more complicated dependencies the driver can call the provided - * check helpers multiple times until the computed state has a final - * configuration and everything has been checked. + * When using drm_atomic_helper_check_planes() this hook is called + * after the &drm_plane_helper_funcs.atomc_check hook for planes, which + * allows drivers to assign shared resources requested by planes in this + * callback here. For more complicated dependencies the driver can call + * the provided check helpers multiple times until the computed state + * has a final configuration and everything has been checked. * * This function is also allowed to inspect any other object's state and * can add more state objects to the atomic commit if needed. Care must - * be taken though to ensure that state check&compute functions for + * be taken though to ensure that state check and compute functions for * these added states are all called, and derived state in other objects * all updated. Again the recommendation is to just call check helpers * until a maximal configuration is reached. @@ -399,10 +401,11 @@ struct drm_crtc_helper_funcs { * * This callback should be used to disable the CRTC. With the atomic * drivers it is called after all encoders connected to this CRTC have - * been shut off already using their own ->disable hook. If that - * sequence is too simple drivers can just add their own hooks and call - * it from this CRTC callback here by looping over all encoders - * connected to it using for_each_encoder_on_crtc(). + * been shut off already using their own + * &drm_encoder_helper_funcs.disable hook. If that sequence is too + * simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). * * This hook is used only by atomic helpers. Atomic drivers don't * need to implement it if there's no need to disable anything at the @@ -482,16 +485,18 @@ struct drm_encoder_helper_funcs { * Also beware that neither core nor helpers filter modes before * passing them to the driver: While the list of modes that is * advertised to userspace is filtered using the connector's - * ->mode_valid() callback, neither the core nor the helpers do any - * filtering on modes passed in from userspace when setting a mode. It - * is therefore possible for userspace to pass in a mode that was - * previously filtered out using ->mode_valid() or add a custom mode - * that wasn't probed from EDID or similar to begin with. Even though - * this is an advanced feature and rarely used nowadays, some users rely - * on being able to specify modes manually so drivers must be prepared - * to deal with it. Specifically this means that all drivers need not - * only validate modes in ->mode_valid() but also in ->mode_fixup() to - * make sure invalid modes passed in from userspace are rejected. + * &drm_connector_helper_funcs.mode_valid callback, neither the core nor + * the helpers do any filtering on modes passed in from userspace when + * setting a mode. It is therefore possible for userspace to pass in a + * mode that was previously filtered out using + * &drm_connector_helper_funcs.mode_valid or add a custom mode that + * wasn't probed from EDID or similar to begin with. Even though this + * is an advanced feature and rarely used nowadays, some users rely on + * being able to specify modes manually so drivers must be prepared to + * deal with it. Specifically this means that all drivers need not only + * validate modes in &drm_connector.mode_valid but also in this or in + * the &drm_crtc_helper_funcs.mode_fixup callback to make sure + * invalid modes passed in from userspace are rejected. * * RETURNS: * @@ -543,7 +548,7 @@ struct drm_encoder_helper_funcs { * use this hook, because the helper library calls it only once and not * every time the display pipeline is suspend using either DPMS or the * new "ACTIVE" property. Such drivers should instead move all their - * encoder setup into the ->enable() callback. + * encoder setup into the @enable callback. * * This callback is used both by the legacy CRTC helpers and the atomic * modeset helpers. It is optional in the atomic helpers. @@ -569,7 +574,7 @@ struct drm_encoder_helper_funcs { * use this hook, because the helper library calls it only once and not * every time the display pipeline is suspended using either DPMS or the * new "ACTIVE" property. Such drivers should instead move all their - * encoder setup into the ->enable() callback. + * encoder setup into the @enable callback. * * This callback is used by the atomic modeset helpers in place of the * @mode_set callback, if set by the driver. It is optional and should @@ -620,10 +625,10 @@ struct drm_encoder_helper_funcs { * * This callback should be used to disable the encoder. With the atomic * drivers it is called before this encoder's CRTC has been shut off - * using the CRTC's own ->disable hook. If that sequence is too simple - * drivers can just add their own driver private encoder hooks and call - * them from CRTC's callback by looping over all encoders connected to - * it using for_each_encoder_on_crtc(). + * using their own &drm_crtc_helper_funcs.disable hook. If that + * sequence is too simple drivers can just add their own driver private + * encoder hooks and call them from CRTC's callback by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). * * This hook is used both by legacy CRTC helpers and atomic helpers. * Atomic drivers don't need to implement it if there's no need to @@ -650,10 +655,10 @@ struct drm_encoder_helper_funcs { * * This callback should be used to enable the encoder. With the atomic * drivers it is called after this encoder's CRTC has been enabled using - * the CRTC's own ->enable hook. If that sequence is too simple drivers - * can just add their own driver private encoder hooks and call them - * from CRTC's callback by looping over all encoders connected to it - * using for_each_encoder_on_crtc(). + * their own &drm_crtc_helper_funcs.enable hook. If that sequence is + * too simple drivers can just add their own driver private encoder + * hooks and call them from CRTC's callback by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). * * This hook is used only by atomic helpers, for symmetry with @disable. * Atomic drivers don't need to implement it if there's no need to @@ -715,7 +720,7 @@ struct drm_connector_helper_funcs { * @get_modes: * * This function should fill in all modes currently valid for the sink - * into the connector->probed_modes list. It should also update the + * into the &drm_connector.probed_modes list. It should also update the * EDID property by calling drm_mode_connector_update_edid_property(). * * The usual way to implement this is to cache the EDID retrieved in the @@ -724,8 +729,9 @@ struct drm_connector_helper_funcs { * them by calling drm_add_edid_modes(). But connectors that driver a * fixed panel can also manually add specific modes using * drm_mode_probed_add(). Drivers which manually add modes should also - * make sure that the @display_info, @width_mm and @height_mm fields of the - * struct &drm_connector are filled in. + * make sure that the &drm_connector.display_info, + * &drm_connector.width_mm and &drm_connector.height_mm fields are + * filled in. * * Virtual drivers that just want some standard VESA mode with a given * resolution can call drm_add_modes_noedid(), and mark the preferred @@ -734,7 +740,7 @@ struct drm_connector_helper_funcs { * Finally drivers that support audio probably want to update the ELD * data, too, using drm_edid_to_eld(). * - * This function is only called after the ->detect() hook has indicated + * This function is only called after the @detect hook has indicated * that a sink is connected and when the EDID isn't overridden through * sysfs or the kernel commandline. * @@ -767,8 +773,8 @@ struct drm_connector_helper_funcs { * * RETURNS: * - * Either MODE_OK or one of the failure reasons in enum - * &drm_mode_status. + * Either &drm_mode_status.MODE_OK or one of the failure reasons in &enum + * drm_mode_status. */ enum drm_mode_status (*mode_valid)(struct drm_connector *connector, struct drm_display_mode *mode); @@ -874,7 +880,7 @@ struct drm_plane_helper_funcs { * RETURNS: * * 0 on success or one of the following negative error codes allowed by - * the atomic_commit hook in &drm_mode_config_funcs. When using helpers + * the &drm_mode_config_funcs.atomic_commit vfunc. When using helpers * this callback is the only one which can fail an atomic commit, * everything else must complete successfully. */ @@ -897,7 +903,7 @@ struct drm_plane_helper_funcs { * * Drivers should check plane specific constraints in this hook. * - * When using drm_atomic_helper_check_planes() plane's ->atomic_check() + * When using drm_atomic_helper_check_planes() plane's @atomic_check * hooks are called before the ones for CRTCs, which allows drivers to * request shared resources that the CRTC controls here. For more * complicated dependencies the driver can call the provided check helpers @@ -906,7 +912,7 @@ struct drm_plane_helper_funcs { * * This function is also allowed to inspect any other object's state and * can add more state objects to the atomic commit if needed. Care must - * be taken though to ensure that state check&compute functions for + * be taken though to ensure that state check and compute functions for * these added states are all called, and derived state in other objects * all updated. Again the recommendation is to just call check helpers * until a maximal configuration is reached. @@ -935,8 +941,8 @@ struct drm_plane_helper_funcs { * @atomic_update: * * Drivers should use this function to update the plane state. This - * hook is called in-between the ->atomic_begin() and - * ->atomic_flush() of &drm_crtc_helper_funcs. + * hook is called in-between the &drm_crtc_helper_funcs.atomic_begin and + * drm_crtc_helper_funcs.atomic_flush callbacks. * * Note that the power state of the display pipe when this function is * called depends upon the exact helpers and calling sequence the driver @@ -952,14 +958,15 @@ struct drm_plane_helper_funcs { * @atomic_disable: * * Drivers should use this function to unconditionally disable a plane. - * This hook is called in-between the ->atomic_begin() and - * ->atomic_flush() of &drm_crtc_helper_funcs. It is an alternative to + * This hook is called in-between the + * &drm_crtc_helper_funcs.atomic_begin and + * drm_crtc_helper_funcs.atomic_flush callbacks. It is an alternative to * @atomic_update, which will be called for disabling planes, too, if * the @atomic_disable hook isn't implemented. * * This hook is also useful to disable planes in preparation of a modeset, * by calling drm_atomic_helper_disable_planes_on_crtc() from the - * ->disable() hook in &drm_crtc_helper_funcs. + * &drm_crtc_helper_funcs.disable hook. * * Note that the power state of the display pipe when this function is * called depends upon the exact helpers and calling sequence the driver diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index d918ce45ec2c..96d39fbd12ca 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -64,7 +64,7 @@ struct drm_modeset_acquire_ctx { /** * struct drm_modeset_lock - used for locking modeset resources. * @mutex: resource locking - * @head: used to hold it's place on state->locked list when + * @head: used to hold it's place on &drm_atomi_state.locked list when * part of an atomic update * * Used for locking CRTCs and other modeset resources. diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h index 86ab99bc0ac5..35e1482ba8a1 100644 --- a/include/drm/drm_os_linux.h +++ b/include/drm/drm_os_linux.h @@ -4,6 +4,7 @@ */ #include <linux/interrupt.h> /* For task queue support */ +#include <linux/sched/signal.h> #include <linux/delay.h> #ifndef readq diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 220d1e2b3db1..4b76cf2d5a7b 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -193,9 +193,9 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); int drm_panel_detach(struct drm_panel *panel); #ifdef CONFIG_OF -struct drm_panel *of_drm_find_panel(struct device_node *np); +struct drm_panel *of_drm_find_panel(const struct device_node *np); #else -static inline struct drm_panel *of_drm_find_panel(struct device_node *np) +static inline struct drm_panel *of_drm_find_panel(const struct device_node *np) { return NULL; } diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index db3bbdeb36d5..20867b4371ab 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -247,19 +247,19 @@ struct drm_plane_funcs { * @atomic_duplicate_state: * * Duplicate the current atomic state for this plane and return it. - * The core and helpers gurantee that any atomic state duplicated with + * The core and helpers guarantee that any atomic state duplicated with * this hook and still owned by the caller (i.e. not transferred to the - * driver by calling ->atomic_commit() from struct - * &drm_mode_config_funcs) will be cleaned up by calling the - * @atomic_destroy_state hook in this structure. + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. * - * Atomic drivers which don't subclass struct &drm_plane_state should use + * Atomic drivers which don't subclass &struct drm_plane_state should use * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the * state structure to extend it with driver-private state should use * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is * duplicated in a consistent fashion across drivers. * - * It is an error to call this hook before plane->state has been + * It is an error to call this hook before &drm_plane.state has been * initialized correctly. * * NOTE: @@ -372,7 +372,7 @@ struct drm_plane_funcs { * * This optional hook should be used to unregister the additional * userspace interfaces attached to the plane from - * late_unregister(). It is called from drm_dev_unregister(), + * @late_register. It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ @@ -381,7 +381,7 @@ struct drm_plane_funcs { /** * @atomic_print_state: * - * If driver subclasses struct &drm_plane_state, it should implement + * If driver subclasses &struct drm_plane_state, it should implement * this optional hook for printing additional driver specific state. * * Do not call this directly, use drm_atomic_plane_print_state() @@ -423,8 +423,8 @@ enum drm_plane_type { * * Primary planes represent a "main" plane for a CRTC. Primary planes * are the planes operated upon by CRTC modesetting and flipping - * operations described in the page_flip and set_config hooks in struct - * &drm_crtc_funcs. + * operations described in the &drm_crtc_funcs.page_flip and + * &drm_crtc_funcs.set_config hooks. */ DRM_PLANE_TYPE_PRIMARY, @@ -470,9 +470,9 @@ struct drm_plane { /** * @mutex: * - * Protects modeset plane state, together with the mutex of &drm_crtc - * this plane is linked to (when active, getting actived or getting - * disabled). + * Protects modeset plane state, together with the &drm_crtc.mutex of + * CRTC this plane is linked to (when active, getting activated or + * getting disabled). */ struct drm_modeset_lock mutex; @@ -580,7 +580,7 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, * * Iterate over all legacy planes of @dev, excluding primary and cursor planes. * This is useful for implementing userspace apis when userspace is not - * universal plane aware. See also enum &drm_plane_type. + * universal plane aware. See also &enum drm_plane_type. */ #define drm_for_each_legacy_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index 1adf84aea622..7d98763c0444 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -60,26 +60,27 @@ /** * struct drm_printer - drm output "stream" - * @printfn: actual output fxn - * @arg: output fxn specific data * * Do not use struct members directly. Use drm_printer_seq_file(), * drm_printer_info(), etc to initialize. And drm_printf() for output. */ struct drm_printer { + /* private: */ void (*printfn)(struct drm_printer *p, struct va_format *vaf); void *arg; + const char *prefix; }; void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf); void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); +void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf); void drm_printf(struct drm_printer *p, const char *f, ...); /** * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file - * @f: the struct &seq_file to output to + * @f: the &struct seq_file to output to * * RETURNS: * The &drm_printer object @@ -95,7 +96,7 @@ static inline struct drm_printer drm_seq_file_printer(struct seq_file *f) /** * drm_info_printer - construct a &drm_printer that outputs to dev_printk() - * @dev: the struct &device pointer + * @dev: the &struct device pointer * * RETURNS: * The &drm_printer object @@ -109,4 +110,19 @@ static inline struct drm_printer drm_info_printer(struct device *dev) return p; } +/** + * drm_debug_printer - construct a &drm_printer that outputs to pr_debug() + * @prefix: debug output prefix + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_debug_printer(const char *prefix) +{ + struct drm_printer p = { + .printfn = __drm_printfn_debug, + .prefix = prefix + }; + return p; +} #endif /* DRM_PRINT_H_ */ diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index 43c4b6a2046d..f66fdb47551c 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -30,7 +30,7 @@ /** * struct drm_property_enum - symbolic values for enumerations * @value: numeric property value for this enum entry - * @head: list of enum values, linked to enum_list in &drm_property + * @head: list of enum values, linked to &drm_property.enum_list * @name: symbolic name for the enum * * For enumeration and bitmask properties this structure stores the symbolic @@ -191,9 +191,9 @@ struct drm_property { * struct drm_property_blob - Blob data for &drm_property * @base: base KMS object * @dev: DRM device - * @head_global: entry on the global blob list in &drm_mode_config - * property_blob_list. - * @head_file: entry on the per-file blob list in &drm_file blobs list. + * @head_global: entry on the global blob list in + * &drm_mode_config.property_blob_list. + * @head_file: entry on the per-file blob list in &drm_file.blobs list. * @length: size of the blob in bytes, invariant over the lifetime of the object * @data: actual data, embedded at the end of this structure * diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h index 01a8436ccb0a..fffbb95a0915 100644 --- a/include/drm/drm_simple_kms_helper.h +++ b/include/drm/drm_simple_kms_helper.h @@ -10,6 +10,10 @@ #ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H #define __LINUX_DRM_SIMPLE_KMS_HELPER_H +#include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> +#include <drm/drm_plane.h> + struct drm_simple_display_pipe; /** @@ -73,9 +77,9 @@ struct drm_simple_display_pipe_funcs { /** * @prepare_fb: * - * Optional, called by struct &drm_plane_helper_funcs ->prepare_fb . - * Please read the documentation for the ->prepare_fb hook in - * struct &drm_plane_helper_funcs for more details. + * Optional, called by &drm_plane_helper_funcs.prepare_fb. Please read + * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for + * more details. */ int (*prepare_fb)(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state); @@ -83,9 +87,9 @@ struct drm_simple_display_pipe_funcs { /** * @cleanup_fb: * - * Optional, called by struct &drm_plane_helper_funcs ->cleanup_fb . - * Please read the documentation for the ->cleanup_fb hook in - * struct &drm_plane_helper_funcs for more details. + * Optional, called by &drm_plane_helper_funcs.cleanup_fb. Please read + * the documentation for the &drm_plane_helper_funcs.cleanup_fb hook for + * more details. */ void (*cleanup_fb)(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state); @@ -114,8 +118,6 @@ struct drm_simple_display_pipe { int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe, struct drm_bridge *bridge); -void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe); - int drm_simple_display_pipe_init(struct drm_device *dev, struct drm_simple_display_pipe *pipe, const struct drm_simple_display_pipe_funcs *funcs, diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 0d5f4268d75f..a1dd21d6b723 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -226,23 +226,18 @@ INTEL_VGA_DEVICE(0x162A, info), /* Server */ \ INTEL_VGA_DEVICE(0x162D, info) /* Workstation */ -#define INTEL_BDW_RSVDM_IDS(info) \ +#define INTEL_BDW_RSVD_IDS(info) \ INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \ INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \ - INTEL_VGA_DEVICE(0x163E, info) /* ULX */ - -#define INTEL_BDW_RSVDD_IDS(info) \ + INTEL_VGA_DEVICE(0x163E, info), /* ULX */ \ INTEL_VGA_DEVICE(0x163A, info), /* Server */ \ INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ #define INTEL_BDW_IDS(info) \ INTEL_BDW_GT12_IDS(info), \ INTEL_BDW_GT3_IDS(info), \ - INTEL_BDW_RSVDM_IDS(info), \ - INTEL_BDW_GT12_IDS(info), \ - INTEL_BDW_GT3_IDS(info), \ - INTEL_BDW_RSVDD_IDS(info) + INTEL_BDW_RSVD_IDS(info) #define INTEL_CHV_IDS(info) \ INTEL_VGA_DEVICE(0x22b0, info), \ @@ -270,14 +265,14 @@ INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ - INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ + INTEL_VGA_DEVICE(0x192B, info) /* Halo GT3 */ \ #define INTEL_SKL_GT4_IDS(info) \ INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \ INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \ - INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4 */ + INTEL_VGA_DEVICE(0x192A, info), /* SRV GT4 */ \ + INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4e */ #define INTEL_SKL_IDS(info) \ INTEL_SKL_GT1_IDS(info), \ @@ -292,6 +287,10 @@ INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ +#define INTEL_GLK_IDS(info) \ + INTEL_VGA_DEVICE(0x3184, info), \ + INTEL_VGA_DEVICE(0x3185, info) + #define INTEL_KBL_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index f49edecd66a3..b3bf717cfc45 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -3,8 +3,10 @@ #ifndef _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H -void intel_gtt_get(u64 *gtt_total, size_t *stolen_size, - phys_addr_t *mappable_base, u64 *mappable_end); +void intel_gtt_get(u64 *gtt_total, + u32 *stolen_size, + phys_addr_t *mappable_base, + u64 *mappable_end); int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, struct agp_bridge_data *bridge); diff --git a/include/drm/intel_lpe_audio.h b/include/drm/intel_lpe_audio.h new file mode 100644 index 000000000000..e9892b4c3af1 --- /dev/null +++ b/include/drm/intel_lpe_audio.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _INTEL_LPE_AUDIO_H_ +#define _INTEL_LPE_AUDIO_H_ + +#include <linux/types.h> +#include <linux/spinlock_types.h> + +struct platform_device; + +#define HDMI_MAX_ELD_BYTES 128 + +struct intel_hdmi_lpe_audio_eld { + int port_id; + int pipe_id; + unsigned char eld_data[HDMI_MAX_ELD_BYTES]; +}; + +struct intel_hdmi_lpe_audio_pdata { + bool notify_pending; + int tmds_clock_speed; + bool hdmi_connected; + bool dp_output; + int link_rate; + struct intel_hdmi_lpe_audio_eld eld; + void (*notify_audio_lpe)(struct platform_device *pdev); + spinlock_t lpe_audio_slock; +}; + +#endif /* _I915_LPE_AUDIO_H_ */ diff --git a/include/drm/tinydrm/ili9341.h b/include/drm/tinydrm/ili9341.h new file mode 100644 index 000000000000..807a09f43cad --- /dev/null +++ b/include/drm/tinydrm/ili9341.h @@ -0,0 +1,54 @@ +/* + * ILI9341 LCD controller + * + * Copyright 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_ILI9341_H +#define __LINUX_ILI9341_H + +#define ILI9341_FRMCTR1 0xb1 +#define ILI9341_FRMCTR2 0xb2 +#define ILI9341_FRMCTR3 0xb3 +#define ILI9341_INVTR 0xb4 +#define ILI9341_PRCTR 0xb5 +#define ILI9341_DISCTRL 0xb6 +#define ILI9341_ETMOD 0xb7 + +#define ILI9341_PWCTRL1 0xc0 +#define ILI9341_PWCTRL2 0xc1 +#define ILI9341_VMCTRL1 0xc5 +#define ILI9341_VMCTRL2 0xc7 +#define ILI9341_PWCTRLA 0xcb +#define ILI9341_PWCTRLB 0xcf + +#define ILI9341_RDID1 0xda +#define ILI9341_RDID2 0xdb +#define ILI9341_RDID3 0xdc +#define ILI9341_RDID4 0xd3 + +#define ILI9341_PGAMCTRL 0xe0 +#define ILI9341_NGAMCTRL 0xe1 +#define ILI9341_DGAMCTRL1 0xe2 +#define ILI9341_DGAMCTRL2 0xe3 +#define ILI9341_DTCTRLA 0xe8 +#define ILI9341_DTCTRLB 0xea +#define ILI9341_PWRSEQ 0xed + +#define ILI9341_EN3GAM 0xf2 +#define ILI9341_IFCTRL 0xf6 +#define ILI9341_PUMPCTRL 0xf7 + +#define ILI9341_MADCTL_MH BIT(2) +#define ILI9341_MADCTL_BGR BIT(3) +#define ILI9341_MADCTL_ML BIT(4) +#define ILI9341_MADCTL_MV BIT(5) +#define ILI9341_MADCTL_MX BIT(6) +#define ILI9341_MADCTL_MY BIT(7) + +#endif /* __LINUX_ILI9341_H */ diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h new file mode 100644 index 000000000000..d137b16ee873 --- /dev/null +++ b/include/drm/tinydrm/mipi-dbi.h @@ -0,0 +1,107 @@ +/* + * MIPI Display Bus Interface (DBI) LCD controller support + * + * Copyright 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_MIPI_DBI_H +#define __LINUX_MIPI_DBI_H + +#include <drm/tinydrm/tinydrm.h> + +struct spi_device; +struct gpio_desc; +struct regulator; + +/** + * struct mipi_dbi - MIPI DBI controller + * @tinydrm: tinydrm base + * @spi: SPI device + * @enabled: Pipeline is enabled + * @cmdlock: Command lock + * @command: Bus specific callback executing commands. + * @read_commands: Array of read commands terminated by a zero entry. + * Reading is disabled if this is NULL. + * @dc: Optional D/C gpio. + * @tx_buf: Buffer used for transfer (copy clip rect area) + * @tx_buf9: Buffer used for Option 1 9-bit conversion + * @tx_buf9_len: Size of tx_buf9. + * @swap_bytes: Swap bytes in buffer before transfer + * @reset: Optional reset gpio + * @rotation: initial rotation in degrees Counter Clock Wise + * @backlight: backlight device (optional) + * @regulator: power regulator (optional) + */ +struct mipi_dbi { + struct tinydrm_device tinydrm; + struct spi_device *spi; + bool enabled; + struct mutex cmdlock; + int (*command)(struct mipi_dbi *mipi, u8 cmd, u8 *param, size_t num); + const u8 *read_commands; + struct gpio_desc *dc; + u16 *tx_buf; + void *tx_buf9; + size_t tx_buf9_len; + bool swap_bytes; + struct gpio_desc *reset; + unsigned int rotation; + struct backlight_device *backlight; + struct regulator *regulator; +}; + +static inline struct mipi_dbi * +mipi_dbi_from_tinydrm(struct tinydrm_device *tdev) +{ + return container_of(tdev, struct mipi_dbi, tinydrm); +} + +int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi, + struct gpio_desc *dc, + const struct drm_simple_display_pipe_funcs *pipe_funcs, + struct drm_driver *driver, + const struct drm_display_mode *mode, + unsigned int rotation); +int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, + const struct drm_simple_display_pipe_funcs *pipe_funcs, + struct drm_driver *driver, + const struct drm_display_mode *mode, unsigned int rotation); +void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state); +void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); +void mipi_dbi_hw_reset(struct mipi_dbi *mipi); +bool mipi_dbi_display_is_on(struct mipi_dbi *mipi); + +int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val); +int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); + +/** + * mipi_dbi_command - MIPI DCS command with optional parameter(s) + * @mipi: MIPI structure + * @cmd: Command + * @seq...: Optional parameter(s) + * + * Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for + * get/read. + * + * Returns: + * Zero on success, negative error code on failure. + */ +#define mipi_dbi_command(mipi, cmd, seq...) \ +({ \ + u8 d[] = { seq }; \ + mipi_dbi_command_buf(mipi, cmd, d, ARRAY_SIZE(d)); \ +}) + +#ifdef CONFIG_DEBUG_FS +int mipi_dbi_debugfs_init(struct drm_minor *minor); +#else +#define mipi_dbi_debugfs_init NULL +#endif + +#endif /* __LINUX_MIPI_DBI_H */ diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h new file mode 100644 index 000000000000..9b9b6cfe3ba5 --- /dev/null +++ b/include/drm/tinydrm/tinydrm-helpers.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_TINYDRM_HELPERS_H +#define __LINUX_TINYDRM_HELPERS_H + +struct backlight_device; +struct tinydrm_device; +struct drm_clip_rect; +struct spi_transfer; +struct spi_message; +struct spi_device; +struct device; + +/** + * tinydrm_machine_little_endian - Machine is little endian + * + * Returns: + * true if *defined(__LITTLE_ENDIAN)*, false otherwise + */ +static inline bool tinydrm_machine_little_endian(void) +{ +#if defined(__LITTLE_ENDIAN) + return true; +#else + return false; +#endif +} + +bool tinydrm_merge_clips(struct drm_clip_rect *dst, + struct drm_clip_rect *src, unsigned int num_clips, + unsigned int flags, u32 max_width, u32 max_height); +void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_clip_rect *clip); +void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_clip_rect *clip); +void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr, + struct drm_framebuffer *fb, + struct drm_clip_rect *clip, bool swap); + +struct backlight_device *tinydrm_of_find_backlight(struct device *dev); +int tinydrm_enable_backlight(struct backlight_device *backlight); +int tinydrm_disable_backlight(struct backlight_device *backlight); + +size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len); +bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw); +int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz, + struct spi_transfer *header, u8 bpw, const void *buf, + size_t len); +void _tinydrm_dbg_spi_message(struct spi_device *spi, struct spi_message *m); + +#ifdef DEBUG +/** + * tinydrm_dbg_spi_message - Dump SPI message + * @spi: SPI device + * @m: SPI message + * + * Dumps info about the transfers in a SPI message including buffer content. + * DEBUG has to be defined for this function to be enabled alongside setting + * the DRM_UT_DRIVER bit of &drm_debug. + */ +static inline void tinydrm_dbg_spi_message(struct spi_device *spi, + struct spi_message *m) +{ + if (drm_debug & DRM_UT_DRIVER) + _tinydrm_dbg_spi_message(spi, m); +} +#else +static inline void tinydrm_dbg_spi_message(struct spi_device *spi, + struct spi_message *m) +{ +} +#endif /* DEBUG */ + +#endif /* __LINUX_TINYDRM_HELPERS_H */ diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h new file mode 100644 index 000000000000..cf9ca207b8b1 --- /dev/null +++ b/include/drm/tinydrm/tinydrm.h @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_TINYDRM_H +#define __LINUX_TINYDRM_H + +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_simple_kms_helper.h> + +/** + * struct tinydrm_device - tinydrm device + * @drm: DRM device + * @pipe: Display pipe structure + * @dirty_lock: Serializes framebuffer flushing + * @fbdev_cma: CMA fbdev structure + * @suspend_state: Atomic state when suspended + * @fb_funcs: Framebuffer functions used when creating framebuffers + */ +struct tinydrm_device { + struct drm_device *drm; + struct drm_simple_display_pipe pipe; + struct mutex dirty_lock; + struct drm_fbdev_cma *fbdev_cma; + struct drm_atomic_state *suspend_state; + const struct drm_framebuffer_funcs *fb_funcs; +}; + +static inline struct tinydrm_device * +pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) +{ + return container_of(pipe, struct tinydrm_device, pipe); +} + +/** + * TINYDRM_GEM_DRIVER_OPS - default tinydrm gem operations + * + * This macro provides a shortcut for setting the tinydrm GEM operations in + * the &drm_driver structure. + */ +#define TINYDRM_GEM_DRIVER_OPS \ + .gem_free_object = tinydrm_gem_cma_free_object, \ + .gem_vm_ops = &drm_gem_cma_vm_ops, \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import = drm_gem_prime_import, \ + .gem_prime_export = drm_gem_prime_export, \ + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, \ + .gem_prime_import_sg_table = tinydrm_gem_cma_prime_import_sg_table, \ + .gem_prime_vmap = drm_gem_cma_prime_vmap, \ + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, \ + .gem_prime_mmap = drm_gem_cma_prime_mmap, \ + .dumb_create = drm_gem_cma_dumb_create, \ + .dumb_map_offset = drm_gem_cma_dumb_map_offset, \ + .dumb_destroy = drm_gem_dumb_destroy, \ + .fops = &tinydrm_fops + +/** + * TINYDRM_MODE - tinydrm display mode + * @hd: Horizontal resolution, width + * @vd: Vertical resolution, height + * @hd_mm: Display width in millimeters + * @vd_mm: Display height in millimeters + * + * This macro creates a &drm_display_mode for use with tinydrm. + */ +#define TINYDRM_MODE(hd, vd, hd_mm, vd_mm) \ + .hdisplay = (hd), \ + .hsync_start = (hd), \ + .hsync_end = (hd), \ + .htotal = (hd), \ + .vdisplay = (vd), \ + .vsync_start = (vd), \ + .vsync_end = (vd), \ + .vtotal = (vd), \ + .width_mm = (hd_mm), \ + .height_mm = (vd_mm), \ + .type = DRM_MODE_TYPE_DRIVER, \ + .clock = 1 /* pass validation */ + +extern const struct file_operations tinydrm_fops; +void tinydrm_lastclose(struct drm_device *drm); +void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj); +struct drm_gem_object * +tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, + struct dma_buf_attachment *attach, + struct sg_table *sgt); +int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev, + const struct drm_framebuffer_funcs *fb_funcs, + struct drm_driver *driver); +int devm_tinydrm_register(struct tinydrm_device *tdev); +void tinydrm_shutdown(struct tinydrm_device *tdev); +int tinydrm_suspend(struct tinydrm_device *tdev); +int tinydrm_resume(struct tinydrm_device *tdev); + +void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state); +int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); +int +tinydrm_display_pipe_init(struct tinydrm_device *tdev, + const struct drm_simple_display_pipe_funcs *funcs, + int connector_type, + const uint32_t *formats, + unsigned int format_count, + const struct drm_display_mode *mode, + unsigned int rotation); + +#endif /* __LINUX_TINYDRM_H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 9a465314572c..8f619f499e55 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -215,6 +215,8 @@ struct ttm_buffer_object { struct drm_vma_offset_node vma_node; + unsigned priority; + /** * Special members that are protected by the reserve lock * and the bo::lock when written to. Can be read with diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index feecf33a1212..8145773c582c 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -42,6 +42,8 @@ #include <linux/spinlock.h> #include <linux/reservation.h> +#define TTM_MAX_BO_PRIORITY 16U + struct ttm_backend_func { /** * struct ttm_backend_func member bind @@ -298,7 +300,7 @@ struct ttm_mem_type_manager { * Protected by the global->lru_lock. */ - struct list_head lru; + struct list_head lru[TTM_MAX_BO_PRIORITY]; /* * Protected by @move_lock. @@ -431,9 +433,15 @@ struct ttm_bo_driver { int (*verify_access)(struct ttm_buffer_object *bo, struct file *filp); - /* hook to notify driver about a driver move so it - * can do tiling things */ + /** + * Hook to notify driver about a driver move so it + * can do tiling things and book-keeping. + * + * @evict: whether this move is evicting the buffer from the graphics + * address space + */ void (*move_notify)(struct ttm_buffer_object *bo, + bool evict, struct ttm_mem_reg *new_mem); /* notify the driver we are taking a fault on this BO * and have reserved it */ @@ -454,18 +462,6 @@ struct ttm_bo_driver { struct ttm_mem_reg *mem); void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); - - /** - * Optional driver callback for when BO is removed from the LRU. - * Called with LRU lock held immediately before the removal. - */ - void (*lru_removal)(struct ttm_buffer_object *bo); - - /** - * Return the list_head after which a BO should be inserted in the LRU. - */ - struct list_head *(*lru_tail)(struct ttm_buffer_object *bo); - struct list_head *(*swap_lru_tail)(struct ttm_buffer_object *bo); }; /** @@ -512,7 +508,7 @@ struct ttm_bo_global { /** * Protected by the lru_lock. */ - struct list_head swap_lru; + struct list_head swap_lru[TTM_MAX_BO_PRIORITY]; /** * Internal protection. @@ -780,9 +776,6 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); -struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo); -struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo); - /** * __ttm_bo_reserve: * diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h index 360e00cefd35..a0c812b0fa39 100644 --- a/include/dt-bindings/clock/bcm2835.h +++ b/include/dt-bindings/clock/bcm2835.h @@ -64,3 +64,5 @@ #define BCM2835_CLOCK_CAM1 46 #define BCM2835_CLOCK_DSI0E 47 #define BCM2835_CLOCK_DSI1E 48 +#define BCM2835_CLOCK_DSI0P 49 +#define BCM2835_CLOCK_DSI1P 50 diff --git a/include/dt-bindings/clock/exynos4415.h b/include/dt-bindings/clock/exynos4415.h deleted file mode 100644 index 7eed55100721..000000000000 --- a/include/dt-bindings/clock/exynos4415.h +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright (c) 2014 Samsung Electronics Co., Ltd. - * Author: Chanwoo Choi <cw00.choi@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Device Tree binding constants for Samsung Exynos4415 clock controllers. - */ - -#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H -#define _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H - -/* - * Let each exported clock get a unique index, which is used on DT-enabled - * platforms to lookup the clock from a clock specifier. These indices are - * therefore considered an ABI and so must not be changed. This implies - * that new clocks should be added either in free spaces between clock groups - * or at the end. - */ - -/* - * Main CMU - */ - -#define CLK_OSCSEL 1 -#define CLK_FIN_PLL 2 -#define CLK_FOUT_APLL 3 -#define CLK_FOUT_MPLL 4 -#define CLK_FOUT_EPLL 5 -#define CLK_FOUT_G3D_PLL 6 -#define CLK_FOUT_ISP_PLL 7 -#define CLK_FOUT_DISP_PLL 8 - -/* Muxes */ -#define CLK_MOUT_MPLL_USER_L 16 -#define CLK_MOUT_GDL 17 -#define CLK_MOUT_MPLL_USER_R 18 -#define CLK_MOUT_GDR 19 -#define CLK_MOUT_EBI 20 -#define CLK_MOUT_ACLK_200 21 -#define CLK_MOUT_ACLK_160 22 -#define CLK_MOUT_ACLK_100 23 -#define CLK_MOUT_ACLK_266 24 -#define CLK_MOUT_G3D_PLL 25 -#define CLK_MOUT_EPLL 26 -#define CLK_MOUT_EBI_1 27 -#define CLK_MOUT_ISP_PLL 28 -#define CLK_MOUT_DISP_PLL 29 -#define CLK_MOUT_MPLL_USER_T 30 -#define CLK_MOUT_ACLK_400_MCUISP 31 -#define CLK_MOUT_G3D_PLLSRC 32 -#define CLK_MOUT_CSIS1 33 -#define CLK_MOUT_CSIS0 34 -#define CLK_MOUT_CAM1 35 -#define CLK_MOUT_FIMC3_LCLK 36 -#define CLK_MOUT_FIMC2_LCLK 37 -#define CLK_MOUT_FIMC1_LCLK 38 -#define CLK_MOUT_FIMC0_LCLK 39 -#define CLK_MOUT_MFC 40 -#define CLK_MOUT_MFC_1 41 -#define CLK_MOUT_MFC_0 42 -#define CLK_MOUT_G3D 43 -#define CLK_MOUT_G3D_1 44 -#define CLK_MOUT_G3D_0 45 -#define CLK_MOUT_MIPI0 46 -#define CLK_MOUT_FIMD0 47 -#define CLK_MOUT_TSADC_ISP 48 -#define CLK_MOUT_UART_ISP 49 -#define CLK_MOUT_SPI1_ISP 50 -#define CLK_MOUT_SPI0_ISP 51 -#define CLK_MOUT_PWM_ISP 52 -#define CLK_MOUT_AUDIO0 53 -#define CLK_MOUT_TSADC 54 -#define CLK_MOUT_MMC2 55 -#define CLK_MOUT_MMC1 56 -#define CLK_MOUT_MMC0 57 -#define CLK_MOUT_UART3 58 -#define CLK_MOUT_UART2 59 -#define CLK_MOUT_UART1 60 -#define CLK_MOUT_UART0 61 -#define CLK_MOUT_SPI2 62 -#define CLK_MOUT_SPI1 63 -#define CLK_MOUT_SPI0 64 -#define CLK_MOUT_SPDIF 65 -#define CLK_MOUT_AUDIO2 66 -#define CLK_MOUT_AUDIO1 67 -#define CLK_MOUT_MPLL_USER_C 68 -#define CLK_MOUT_HPM 69 -#define CLK_MOUT_CORE 70 -#define CLK_MOUT_APLL 71 -#define CLK_MOUT_PXLASYNC_CSIS1_FIMC 72 -#define CLK_MOUT_PXLASYNC_CSIS0_FIMC 73 -#define CLK_MOUT_JPEG 74 -#define CLK_MOUT_JPEG1 75 -#define CLK_MOUT_JPEG0 76 -#define CLK_MOUT_ACLK_ISP0_300 77 -#define CLK_MOUT_ACLK_ISP0_400 78 -#define CLK_MOUT_ACLK_ISP0_300_USER 79 -#define CLK_MOUT_ACLK_ISP1_300 80 -#define CLK_MOUT_ACLK_ISP1_300_USER 81 -#define CLK_MOUT_HDMI 82 - -/* Dividers */ -#define CLK_DIV_GPL 90 -#define CLK_DIV_GDL 91 -#define CLK_DIV_GPR 92 -#define CLK_DIV_GDR 93 -#define CLK_DIV_ACLK_400_MCUISP 94 -#define CLK_DIV_EBI 95 -#define CLK_DIV_ACLK_200 96 -#define CLK_DIV_ACLK_160 97 -#define CLK_DIV_ACLK_100 98 -#define CLK_DIV_ACLK_266 99 -#define CLK_DIV_CSIS1 100 -#define CLK_DIV_CSIS0 101 -#define CLK_DIV_CAM1 102 -#define CLK_DIV_FIMC3_LCLK 103 -#define CLK_DIV_FIMC2_LCLK 104 -#define CLK_DIV_FIMC1_LCLK 105 -#define CLK_DIV_FIMC0_LCLK 106 -#define CLK_DIV_TV_BLK 107 -#define CLK_DIV_MFC 108 -#define CLK_DIV_G3D 109 -#define CLK_DIV_MIPI0_PRE 110 -#define CLK_DIV_MIPI0 111 -#define CLK_DIV_FIMD0 112 -#define CLK_DIV_UART_ISP 113 -#define CLK_DIV_SPI1_ISP_PRE 114 -#define CLK_DIV_SPI1_ISP 115 -#define CLK_DIV_SPI0_ISP_PRE 116 -#define CLK_DIV_SPI0_ISP 117 -#define CLK_DIV_PWM_ISP 118 -#define CLK_DIV_PCM0 119 -#define CLK_DIV_AUDIO0 120 -#define CLK_DIV_TSADC_PRE 121 -#define CLK_DIV_TSADC 122 -#define CLK_DIV_MMC1_PRE 123 -#define CLK_DIV_MMC1 124 -#define CLK_DIV_MMC0_PRE 125 -#define CLK_DIV_MMC0 126 -#define CLK_DIV_MMC2_PRE 127 -#define CLK_DIV_MMC2 128 -#define CLK_DIV_UART3 129 -#define CLK_DIV_UART2 130 -#define CLK_DIV_UART1 131 -#define CLK_DIV_UART0 132 -#define CLK_DIV_SPI1_PRE 133 -#define CLK_DIV_SPI1 134 -#define CLK_DIV_SPI0_PRE 135 -#define CLK_DIV_SPI0 136 -#define CLK_DIV_SPI2_PRE 137 -#define CLK_DIV_SPI2 138 -#define CLK_DIV_PCM2 139 -#define CLK_DIV_AUDIO2 140 -#define CLK_DIV_PCM1 141 -#define CLK_DIV_AUDIO1 142 -#define CLK_DIV_I2S1 143 -#define CLK_DIV_PXLASYNC_CSIS1_FIMC 144 -#define CLK_DIV_PXLASYNC_CSIS0_FIMC 145 -#define CLK_DIV_JPEG 146 -#define CLK_DIV_CORE2 147 -#define CLK_DIV_APLL 148 -#define CLK_DIV_PCLK_DBG 149 -#define CLK_DIV_ATB 150 -#define CLK_DIV_PERIPH 151 -#define CLK_DIV_COREM1 152 -#define CLK_DIV_COREM0 153 -#define CLK_DIV_CORE 154 -#define CLK_DIV_HPM 155 -#define CLK_DIV_COPY 156 - -/* Gates */ -#define CLK_ASYNC_G3D 180 -#define CLK_ASYNC_MFCL 181 -#define CLK_ASYNC_TVX 182 -#define CLK_PPMULEFT 183 -#define CLK_GPIO_LEFT 184 -#define CLK_PPMUIMAGE 185 -#define CLK_QEMDMA2 186 -#define CLK_QEROTATOR 187 -#define CLK_SMMUMDMA2 188 -#define CLK_SMMUROTATOR 189 -#define CLK_MDMA2 190 -#define CLK_ROTATOR 191 -#define CLK_ASYNC_ISPMX 192 -#define CLK_ASYNC_MAUDIOX 193 -#define CLK_ASYNC_MFCR 194 -#define CLK_ASYNC_FSYSD 195 -#define CLK_ASYNC_LCD0X 196 -#define CLK_ASYNC_CAMX 197 -#define CLK_PPMURIGHT 198 -#define CLK_GPIO_RIGHT 199 -#define CLK_ANTIRBK_APBIF 200 -#define CLK_EFUSE_WRITER_APBIF 201 -#define CLK_MONOCNT 202 -#define CLK_TZPC6 203 -#define CLK_PROVISIONKEY1 204 -#define CLK_PROVISIONKEY0 205 -#define CLK_CMU_ISPPART 206 -#define CLK_TMU_APBIF 207 -#define CLK_KEYIF 208 -#define CLK_RTC 209 -#define CLK_WDT 210 -#define CLK_MCT 211 -#define CLK_SECKEY 212 -#define CLK_HDMI_CEC 213 -#define CLK_TZPC5 214 -#define CLK_TZPC4 215 -#define CLK_TZPC3 216 -#define CLK_TZPC2 217 -#define CLK_TZPC1 218 -#define CLK_TZPC0 219 -#define CLK_CMU_COREPART 220 -#define CLK_CMU_TOPPART 221 -#define CLK_PMU_APBIF 222 -#define CLK_SYSREG 223 -#define CLK_CHIP_ID 224 -#define CLK_SMMUFIMC_LITE2 225 -#define CLK_FIMC_LITE2 226 -#define CLK_PIXELASYNCM1 227 -#define CLK_PIXELASYNCM0 228 -#define CLK_PPMUCAMIF 229 -#define CLK_SMMUJPEG 230 -#define CLK_SMMUFIMC3 231 -#define CLK_SMMUFIMC2 232 -#define CLK_SMMUFIMC1 233 -#define CLK_SMMUFIMC0 234 -#define CLK_JPEG 235 -#define CLK_CSIS1 236 -#define CLK_CSIS0 237 -#define CLK_FIMC3 238 -#define CLK_FIMC2 239 -#define CLK_FIMC1 240 -#define CLK_FIMC0 241 -#define CLK_PPMUTV 242 -#define CLK_SMMUTV 243 -#define CLK_HDMI 244 -#define CLK_MIXER 245 -#define CLK_VP 246 -#define CLK_PPMUMFC_R 247 -#define CLK_PPMUMFC_L 248 -#define CLK_SMMUMFC_R 249 -#define CLK_SMMUMFC_L 250 -#define CLK_MFC 251 -#define CLK_PPMUG3D 252 -#define CLK_G3D 253 -#define CLK_PPMULCD0 254 -#define CLK_SMMUFIMD0 255 -#define CLK_DSIM0 256 -#define CLK_SMIES 257 -#define CLK_MIE0 258 -#define CLK_FIMD0 259 -#define CLK_TSADC 260 -#define CLK_PPMUFILE 261 -#define CLK_NFCON 262 -#define CLK_USBDEVICE 263 -#define CLK_USBHOST 264 -#define CLK_SROMC 265 -#define CLK_SDMMC2 266 -#define CLK_SDMMC1 267 -#define CLK_SDMMC0 268 -#define CLK_PDMA1 269 -#define CLK_PDMA0 270 -#define CLK_SPDIF 271 -#define CLK_PWM 272 -#define CLK_PCM2 273 -#define CLK_PCM1 274 -#define CLK_I2S1 275 -#define CLK_SPI2 276 -#define CLK_SPI1 277 -#define CLK_SPI0 278 -#define CLK_I2CHDMI 279 -#define CLK_I2C7 280 -#define CLK_I2C6 281 -#define CLK_I2C5 282 -#define CLK_I2C4 283 -#define CLK_I2C3 284 -#define CLK_I2C2 285 -#define CLK_I2C1 286 -#define CLK_I2C0 287 -#define CLK_UART3 288 -#define CLK_UART2 289 -#define CLK_UART1 290 -#define CLK_UART0 291 - -/* Special clocks */ -#define CLK_SCLK_PXLAYSNC_CSIS1_FIMC 330 -#define CLK_SCLK_PXLAYSNC_CSIS0_FIMC 331 -#define CLK_SCLK_JPEG 332 -#define CLK_SCLK_CSIS1 333 -#define CLK_SCLK_CSIS0 334 -#define CLK_SCLK_CAM1 335 -#define CLK_SCLK_FIMC3_LCLK 336 -#define CLK_SCLK_FIMC2_LCLK 337 -#define CLK_SCLK_FIMC1_LCLK 338 -#define CLK_SCLK_FIMC0_LCLK 339 -#define CLK_SCLK_PIXEL 340 -#define CLK_SCLK_HDMI 341 -#define CLK_SCLK_MIXER 342 -#define CLK_SCLK_MFC 343 -#define CLK_SCLK_G3D 344 -#define CLK_SCLK_MIPIDPHY4L 345 -#define CLK_SCLK_MIPI0 346 -#define CLK_SCLK_MDNIE0 347 -#define CLK_SCLK_FIMD0 348 -#define CLK_SCLK_PCM0 349 -#define CLK_SCLK_AUDIO0 350 -#define CLK_SCLK_TSADC 351 -#define CLK_SCLK_EBI 352 -#define CLK_SCLK_MMC2 353 -#define CLK_SCLK_MMC1 354 -#define CLK_SCLK_MMC0 355 -#define CLK_SCLK_I2S 356 -#define CLK_SCLK_PCM2 357 -#define CLK_SCLK_PCM1 358 -#define CLK_SCLK_AUDIO2 359 -#define CLK_SCLK_AUDIO1 360 -#define CLK_SCLK_SPDIF 361 -#define CLK_SCLK_SPI2 362 -#define CLK_SCLK_SPI1 363 -#define CLK_SCLK_SPI0 364 -#define CLK_SCLK_UART3 365 -#define CLK_SCLK_UART2 366 -#define CLK_SCLK_UART1 367 -#define CLK_SCLK_UART0 368 -#define CLK_SCLK_HDMIPHY 369 - -/* - * Total number of clocks of main CMU. - * NOTE: Must be equal to last clock ID increased by one. - */ -#define CLK_NR_CLKS 370 - -/* - * CMU DMC - */ -#define CLK_DMC_FOUT_MPLL 1 -#define CLK_DMC_FOUT_BPLL 2 - -#define CLK_DMC_MOUT_MPLL 3 -#define CLK_DMC_MOUT_BPLL 4 -#define CLK_DMC_MOUT_DPHY 5 -#define CLK_DMC_MOUT_DMC_BUS 6 - -#define CLK_DMC_DIV_DMC 7 -#define CLK_DMC_DIV_DPHY 8 -#define CLK_DMC_DIV_DMC_PRE 9 -#define CLK_DMC_DIV_DMCP 10 -#define CLK_DMC_DIV_DMCD 11 -#define CLK_DMC_DIV_MPLL_PRE 12 - -/* - * Total number of clocks of CMU_DMC. - * NOTE: Must be equal to highest clock ID increased by one. - */ -#define NR_CLKS_DMC 13 - -#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H */ diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h index 4fa6bb2136e3..be39d23e6a32 100644 --- a/include/dt-bindings/clock/exynos5433.h +++ b/include/dt-bindings/clock/exynos5433.h @@ -771,7 +771,10 @@ #define CLK_PCLK_DECON 113 -#define DISP_NR_CLK 114 +#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114 +#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115 + +#define DISP_NR_CLK 116 /* CMU_AUD */ #define CLK_MOUT_AUD_PLL_USER 1 diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h index baade6f429d0..692846c7941b 100644 --- a/include/dt-bindings/clock/gxbb-clkc.h +++ b/include/dt-bindings/clock/gxbb-clkc.h @@ -14,15 +14,21 @@ #define CLKID_MPLL2 15 #define CLKID_SPI 34 #define CLKID_I2C 22 +#define CLKID_SAR_ADC 23 #define CLKID_ETH 36 #define CLKID_USB0 50 #define CLKID_USB1 51 #define CLKID_USB 55 +#define CLKID_HDMI_PCLK 63 #define CLKID_USB1_DDR_BRIDGE 64 #define CLKID_USB0_DDR_BRIDGE 65 +#define CLKID_SANA 69 +#define CLKID_GCLK_VENCI_INT0 77 #define CLKID_AO_I2C 93 #define CLKID_SD_EMMC_A 94 #define CLKID_SD_EMMC_B 95 #define CLKID_SD_EMMC_C 96 +#define CLKID_SAR_ADC_CLK 97 +#define CLKID_SAR_ADC_SEL 98 #endif /* __GXBB_CLKC_H */ diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h new file mode 100644 index 000000000000..1c00b7fe296f --- /dev/null +++ b/include/dt-bindings/clock/hi3660-clock.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2016-2017 Linaro Ltd. + * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DTS_HI3660_CLOCK_H +#define __DTS_HI3660_CLOCK_H + +/* fixed rate clocks */ +#define HI3660_CLKIN_SYS 0 +#define HI3660_CLKIN_REF 1 +#define HI3660_CLK_FLL_SRC 2 +#define HI3660_CLK_PPLL0 3 +#define HI3660_CLK_PPLL1 4 +#define HI3660_CLK_PPLL2 5 +#define HI3660_CLK_PPLL3 6 +#define HI3660_CLK_SCPLL 7 +#define HI3660_PCLK 8 +#define HI3660_CLK_UART0_DBG 9 +#define HI3660_CLK_UART6 10 +#define HI3660_OSC32K 11 +#define HI3660_OSC19M 12 +#define HI3660_CLK_480M 13 +#define HI3660_CLK_INV 14 + +/* clk in crgctrl */ +#define HI3660_FACTOR_UART3 15 +#define HI3660_CLK_FACTOR_MMC 16 +#define HI3660_CLK_GATE_I2C0 17 +#define HI3660_CLK_GATE_I2C1 18 +#define HI3660_CLK_GATE_I2C2 19 +#define HI3660_CLK_GATE_I2C6 20 +#define HI3660_CLK_DIV_SYSBUS 21 +#define HI3660_CLK_DIV_320M 22 +#define HI3660_CLK_DIV_A53 23 +#define HI3660_CLK_GATE_SPI0 24 +#define HI3660_CLK_GATE_SPI2 25 +#define HI3660_PCIEPHY_REF 26 +#define HI3660_CLK_ABB_USB 27 +#define HI3660_HCLK_GATE_SDIO0 28 +#define HI3660_HCLK_GATE_SD 29 +#define HI3660_CLK_GATE_AOMM 30 +#define HI3660_PCLK_GPIO0 31 +#define HI3660_PCLK_GPIO1 32 +#define HI3660_PCLK_GPIO2 33 +#define HI3660_PCLK_GPIO3 34 +#define HI3660_PCLK_GPIO4 35 +#define HI3660_PCLK_GPIO5 36 +#define HI3660_PCLK_GPIO6 37 +#define HI3660_PCLK_GPIO7 38 +#define HI3660_PCLK_GPIO8 39 +#define HI3660_PCLK_GPIO9 40 +#define HI3660_PCLK_GPIO10 41 +#define HI3660_PCLK_GPIO11 42 +#define HI3660_PCLK_GPIO12 43 +#define HI3660_PCLK_GPIO13 44 +#define HI3660_PCLK_GPIO14 45 +#define HI3660_PCLK_GPIO15 46 +#define HI3660_PCLK_GPIO16 47 +#define HI3660_PCLK_GPIO17 48 +#define HI3660_PCLK_GPIO18 49 +#define HI3660_PCLK_GPIO19 50 +#define HI3660_PCLK_GPIO20 51 +#define HI3660_PCLK_GPIO21 52 +#define HI3660_CLK_GATE_SPI3 53 +#define HI3660_CLK_GATE_I2C7 54 +#define HI3660_CLK_GATE_I2C3 55 +#define HI3660_CLK_GATE_SPI1 56 +#define HI3660_CLK_GATE_UART1 57 +#define HI3660_CLK_GATE_UART2 58 +#define HI3660_CLK_GATE_UART4 59 +#define HI3660_CLK_GATE_UART5 60 +#define HI3660_CLK_GATE_I2C4 61 +#define HI3660_CLK_GATE_DMAC 62 +#define HI3660_PCLK_GATE_DSS 63 +#define HI3660_ACLK_GATE_DSS 64 +#define HI3660_CLK_GATE_LDI1 65 +#define HI3660_CLK_GATE_LDI0 66 +#define HI3660_CLK_GATE_VIVOBUS 67 +#define HI3660_CLK_GATE_EDC0 68 +#define HI3660_CLK_GATE_TXDPHY0_CFG 69 +#define HI3660_CLK_GATE_TXDPHY0_REF 70 +#define HI3660_CLK_GATE_TXDPHY1_CFG 71 +#define HI3660_CLK_GATE_TXDPHY1_REF 72 +#define HI3660_ACLK_GATE_USB3OTG 73 +#define HI3660_CLK_GATE_SPI4 74 +#define HI3660_CLK_GATE_SD 75 +#define HI3660_CLK_GATE_SDIO0 76 +#define HI3660_CLK_GATE_UFS_SUBSYS 77 +#define HI3660_PCLK_GATE_DSI0 78 +#define HI3660_PCLK_GATE_DSI1 79 +#define HI3660_ACLK_GATE_PCIE 80 +#define HI3660_PCLK_GATE_PCIE_SYS 81 +#define HI3660_CLK_GATE_PCIEAUX 82 +#define HI3660_PCLK_GATE_PCIE_PHY 83 +#define HI3660_CLK_ANDGT_LDI0 84 +#define HI3660_CLK_ANDGT_LDI1 85 +#define HI3660_CLK_ANDGT_EDC0 86 +#define HI3660_CLK_GATE_UFSPHY_GT 87 +#define HI3660_CLK_ANDGT_MMC 88 +#define HI3660_CLK_ANDGT_SD 89 +#define HI3660_CLK_A53HPM_ANDGT 90 +#define HI3660_CLK_ANDGT_SDIO 91 +#define HI3660_CLK_ANDGT_UART0 92 +#define HI3660_CLK_ANDGT_UART1 93 +#define HI3660_CLK_ANDGT_UARTH 94 +#define HI3660_CLK_ANDGT_SPI 95 +#define HI3660_CLK_VIVOBUS_ANDGT 96 +#define HI3660_CLK_AOMM_ANDGT 97 +#define HI3660_CLK_320M_PLL_GT 98 +#define HI3660_AUTODIV_EMMC0BUS 99 +#define HI3660_AUTODIV_SYSBUS 100 +#define HI3660_CLK_GATE_UFSPHY_CFG 101 +#define HI3660_CLK_GATE_UFSIO_REF 102 +#define HI3660_CLK_MUX_SYSBUS 103 +#define HI3660_CLK_MUX_UART0 104 +#define HI3660_CLK_MUX_UART1 105 +#define HI3660_CLK_MUX_UARTH 106 +#define HI3660_CLK_MUX_SPI 107 +#define HI3660_CLK_MUX_I2C 108 +#define HI3660_CLK_MUX_MMC_PLL 109 +#define HI3660_CLK_MUX_LDI1 110 +#define HI3660_CLK_MUX_LDI0 111 +#define HI3660_CLK_MUX_SD_PLL 112 +#define HI3660_CLK_MUX_SD_SYS 113 +#define HI3660_CLK_MUX_EDC0 114 +#define HI3660_CLK_MUX_SDIO_SYS 115 +#define HI3660_CLK_MUX_SDIO_PLL 116 +#define HI3660_CLK_MUX_VIVOBUS 117 +#define HI3660_CLK_MUX_A53HPM 118 +#define HI3660_CLK_MUX_320M 119 +#define HI3660_CLK_MUX_IOPERI 120 +#define HI3660_CLK_DIV_UART0 121 +#define HI3660_CLK_DIV_UART1 122 +#define HI3660_CLK_DIV_UARTH 123 +#define HI3660_CLK_DIV_MMC 124 +#define HI3660_CLK_DIV_SD 125 +#define HI3660_CLK_DIV_EDC0 126 +#define HI3660_CLK_DIV_LDI0 127 +#define HI3660_CLK_DIV_SDIO 128 +#define HI3660_CLK_DIV_LDI1 129 +#define HI3660_CLK_DIV_SPI 130 +#define HI3660_CLK_DIV_VIVOBUS 131 +#define HI3660_CLK_DIV_I2C 132 +#define HI3660_CLK_DIV_UFSPHY 133 +#define HI3660_CLK_DIV_CFGBUS 134 +#define HI3660_CLK_DIV_MMC0BUS 135 +#define HI3660_CLK_DIV_MMC1BUS 136 +#define HI3660_CLK_DIV_UFSPERI 137 +#define HI3660_CLK_DIV_AOMM 138 +#define HI3660_CLK_DIV_IOPERI 139 + +/* clk in pmuctrl */ +#define HI3660_GATE_ABB_192 0 + +/* clk in pctrl */ +#define HI3660_GATE_UFS_TCXO_EN 0 +#define HI3660_GATE_USB_TCXO_EN 1 + +/* clk in sctrl */ +#define HI3660_PCLK_AO_GPIO0 0 +#define HI3660_PCLK_AO_GPIO1 1 +#define HI3660_PCLK_AO_GPIO2 2 +#define HI3660_PCLK_AO_GPIO3 3 +#define HI3660_PCLK_AO_GPIO4 4 +#define HI3660_PCLK_AO_GPIO5 5 +#define HI3660_PCLK_AO_GPIO6 6 +#define HI3660_PCLK_GATE_MMBUF 7 +#define HI3660_CLK_GATE_DSS_AXI_MM 8 +#define HI3660_PCLK_MMBUF_ANDGT 9 +#define HI3660_CLK_MMBUF_PLL_ANDGT 10 +#define HI3660_CLK_FLL_MMBUF_ANDGT 11 +#define HI3660_CLK_SYS_MMBUF_ANDGT 12 +#define HI3660_CLK_GATE_PCIEPHY_GT 13 +#define HI3660_ACLK_MUX_MMBUF 14 +#define HI3660_CLK_SW_MMBUF 15 +#define HI3660_CLK_DIV_AOBUS 16 +#define HI3660_PCLK_DIV_MMBUF 17 +#define HI3660_ACLK_DIV_MMBUF 18 +#define HI3660_CLK_DIV_PCIEPHY 19 + +/* clk in iomcu */ +#define HI3660_CLK_I2C0_IOMCU 0 +#define HI3660_CLK_I2C1_IOMCU 1 +#define HI3660_CLK_I2C2_IOMCU 2 +#define HI3660_CLK_I2C6_IOMCU 3 +#define HI3660_CLK_IOMCU_PERI0 4 + +#endif /* __DTS_HI3660_CLOCK_H */ diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h index 1183347c383f..a7a1a50f33ef 100644 --- a/include/dt-bindings/clock/imx7d-clock.h +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -449,5 +449,6 @@ #define IMX7D_ADC_ROOT_CLK 436 #define IMX7D_CLK_ARM 437 #define IMX7D_CKIL 438 -#define IMX7D_CLK_END 439 +#define IMX7D_OCOTP_CLK 439 +#define IMX7D_CLK_END 440 #endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h index 6240e5b0e900..7e8a7be6dcda 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h @@ -81,6 +81,17 @@ #define GCC_WCSS5G_CLK 62 #define GCC_WCSS5G_REF_CLK 63 #define GCC_WCSS5G_RTC_CLK 64 +#define GCC_APSS_DDRPLL_VCO 65 +#define GCC_SDCC_PLLDIV_CLK 66 +#define GCC_FEPLL_VCO 67 +#define GCC_FEPLL125_CLK 68 +#define GCC_FEPLL125DLY_CLK 69 +#define GCC_FEPLL200_CLK 70 +#define GCC_FEPLL500_CLK 71 +#define GCC_FEPLL_WCSS2G_CLK 72 +#define GCC_FEPLL_WCSS5G_CLK 73 +#define GCC_APSS_CPU_PLLDIV_CLK 74 +#define GCC_PCNOC_AHB_CLK_SRC 75 #define WIFI0_CPU_INIT_RESET 0 #define WIFI0_RADIO_SRIF_RESET 1 diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9615.h b/include/dt-bindings/clock/qcom,gcc-mdm9615.h index 9ab2c4087120..787e448958bd 100644 --- a/include/dt-bindings/clock/qcom,gcc-mdm9615.h +++ b/include/dt-bindings/clock/qcom,gcc-mdm9615.h @@ -323,5 +323,7 @@ #define CE3_H_CLK 305 #define USB_HS1_SYSTEM_CLK_SRC 306 #define USB_HS1_SYSTEM_CLK 307 +#define EBI2_CLK 308 +#define EBI2_AON_CLK 309 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h index 8fa535be2ebc..df47da0860f7 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8994.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h @@ -133,5 +133,6 @@ #define GCC_USB30_MOCK_UTMI_CLK 115 #define GCC_USB3_PHY_AUX_CLK 116 #define GCC_USB_HS_SYSTEM_CLK 117 +#define GCC_SDCC1_AHB_CLK 118 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h index 1828723eb621..1f5c42254798 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8996.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h @@ -339,6 +339,7 @@ #define GCC_PCIE_PHY_COM_NOCSR_BCR 102 #define GCC_USB3_PHY_BCR 103 #define GCC_USB3PHY_PHY_BCR 104 +#define GCC_MSS_RESTART 105 /* Indexes for GDSCs */ diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index 5924cdb71336..96b63c00249e 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -14,7 +14,7 @@ #ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H #define _DT_BINDINGS_CLK_MSM_RPMCC_H -/* apq8064 */ +/* RPM clocks */ #define RPM_PXO_CLK 0 #define RPM_PXO_A_CLK 1 #define RPM_CXO_CLK 2 @@ -38,7 +38,7 @@ #define RPM_SFPB_CLK 20 #define RPM_SFPB_A_CLK 21 -/* msm8916 */ +/* SMD RPM clocks */ #define RPM_SMD_XO_CLK_SRC 0 #define RPM_SMD_XO_A_CLK_SRC 1 #define RPM_SMD_PCNOC_CLK 2 @@ -65,5 +65,41 @@ #define RPM_SMD_RF_CLK1_A_PIN 23 #define RPM_SMD_RF_CLK2_PIN 24 #define RPM_SMD_RF_CLK2_A_PIN 25 +#define RPM_SMD_PNOC_CLK 26 +#define RPM_SMD_PNOC_A_CLK 27 +#define RPM_SMD_CNOC_CLK 28 +#define RPM_SMD_CNOC_A_CLK 29 +#define RPM_SMD_MMSSNOC_AHB_CLK 30 +#define RPM_SMD_MMSSNOC_AHB_A_CLK 31 +#define RPM_SMD_GFX3D_CLK_SRC 32 +#define RPM_SMD_GFX3D_A_CLK_SRC 33 +#define RPM_SMD_OCMEMGX_CLK 34 +#define RPM_SMD_OCMEMGX_A_CLK 35 +#define RPM_SMD_CXO_D0 36 +#define RPM_SMD_CXO_D0_A 37 +#define RPM_SMD_CXO_D1 38 +#define RPM_SMD_CXO_D1_A 39 +#define RPM_SMD_CXO_A0 40 +#define RPM_SMD_CXO_A0_A 41 +#define RPM_SMD_CXO_A1 42 +#define RPM_SMD_CXO_A1_A 43 +#define RPM_SMD_CXO_A2 44 +#define RPM_SMD_CXO_A2_A 45 +#define RPM_SMD_DIV_CLK1 46 +#define RPM_SMD_DIV_A_CLK1 47 +#define RPM_SMD_DIV_CLK2 48 +#define RPM_SMD_DIV_A_CLK2 49 +#define RPM_SMD_DIFF_CLK 50 +#define RPM_SMD_DIFF_A_CLK 51 +#define RPM_SMD_CXO_D0_PIN 52 +#define RPM_SMD_CXO_D0_A_PIN 53 +#define RPM_SMD_CXO_D1_PIN 54 +#define RPM_SMD_CXO_D1_A_PIN 55 +#define RPM_SMD_CXO_A0_PIN 56 +#define RPM_SMD_CXO_A0_A_PIN 57 +#define RPM_SMD_CXO_A1_PIN 58 +#define RPM_SMD_CXO_A1_A_PIN 59 +#define RPM_SMD_CXO_A2_PIN 60 +#define RPM_SMD_CXO_A2_A_PIN 61 #endif diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h index 29e01ed10e74..ce09915c298f 100644 --- a/include/dt-bindings/clock/r7s72100-clock.h +++ b/include/dt-bindings/clock/r7s72100-clock.h @@ -25,6 +25,10 @@ #define R7S72100_CLK_SCIF6 1 #define R7S72100_CLK_SCIF7 0 +/* MSTP5 */ +#define R7S72100_CLK_OSTM0 1 +#define R7S72100_CLK_OSTM1 0 + /* MSTP7 */ #define R7S72100_CLK_ETHER 4 diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h index d141c1f0c778..eff4319d008b 100644 --- a/include/dt-bindings/clock/rk3188-cru-common.h +++ b/include/dt-bindings/clock/rk3188-cru-common.h @@ -108,6 +108,8 @@ #define PCLK_TSADC 349 #define PCLK_CPU 350 #define PCLK_PERI 351 +#define PCLK_DDRUPCTL 352 +#define PCLK_PUBL 353 /* hclk gates */ #define HCLK_SDMMC 448 diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h index 9a586e2d9c91..d7b6c83ea63f 100644 --- a/include/dt-bindings/clock/rk3288-cru.h +++ b/include/dt-bindings/clock/rk3288-cru.h @@ -88,6 +88,7 @@ #define SCLK_PVTM_GPU 124 #define SCLK_CRYPTO 125 #define SCLK_MIPIDSI_24M 126 +#define SCLK_VIP_OUT 127 #define SCLK_MAC 151 #define SCLK_MACREF_OUT 152 @@ -168,6 +169,7 @@ #define PCLK_WDT 368 #define PCLK_EFUSE256 369 #define PCLK_EFUSE1024 370 +#define PCLK_ISP_IN 371 /* hclk gates */ #define HCLK_GPS 448 diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h new file mode 100644 index 000000000000..ee702c8e4c09 --- /dev/null +++ b/include/dt-bindings/clock/rk3328-cru.h @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Elaine <zhangqing@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define PLL_NPLL 5 +#define ARMCLK 6 + +/* sclk gates (special clocks) */ +#define SCLK_RTC32K 30 +#define SCLK_SDMMC_EXT 31 +#define SCLK_SPI 32 +#define SCLK_SDMMC 33 +#define SCLK_SDIO 34 +#define SCLK_EMMC 35 +#define SCLK_TSADC 36 +#define SCLK_SARADC 37 +#define SCLK_UART0 38 +#define SCLK_UART1 39 +#define SCLK_UART2 40 +#define SCLK_I2S0 41 +#define SCLK_I2S1 42 +#define SCLK_I2S2 43 +#define SCLK_I2S1_OUT 44 +#define SCLK_I2S2_OUT 45 +#define SCLK_SPDIF 46 +#define SCLK_TIMER0 47 +#define SCLK_TIMER1 48 +#define SCLK_TIMER2 49 +#define SCLK_TIMER3 50 +#define SCLK_TIMER4 51 +#define SCLK_TIMER5 52 +#define SCLK_WIFI 53 +#define SCLK_CIF_OUT 54 +#define SCLK_I2C0 55 +#define SCLK_I2C1 56 +#define SCLK_I2C2 57 +#define SCLK_I2C3 58 +#define SCLK_CRYPTO 59 +#define SCLK_PWM 60 +#define SCLK_PDM 61 +#define SCLK_EFUSE 62 +#define SCLK_OTP 63 +#define SCLK_DDRCLK 64 +#define SCLK_VDEC_CABAC 65 +#define SCLK_VDEC_CORE 66 +#define SCLK_VENC_DSP 67 +#define SCLK_VENC_CORE 68 +#define SCLK_RGA 69 +#define SCLK_HDMI_SFC 70 +#define SCLK_HDMI_CEC 71 +#define SCLK_USB3_REF 72 +#define SCLK_USB3_SUSPEND 73 +#define SCLK_SDMMC_DRV 74 +#define SCLK_SDIO_DRV 75 +#define SCLK_EMMC_DRV 76 +#define SCLK_SDMMC_EXT_DRV 77 +#define SCLK_SDMMC_SAMPLE 78 +#define SCLK_SDIO_SAMPLE 79 +#define SCLK_EMMC_SAMPLE 80 +#define SCLK_SDMMC_EXT_SAMPLE 81 +#define SCLK_VOP 82 +#define SCLK_MAC2PHY_RXTX 83 +#define SCLK_MAC2PHY_SRC 84 +#define SCLK_MAC2PHY_REF 85 +#define SCLK_MAC2PHY_OUT 86 +#define SCLK_MAC2IO_RX 87 +#define SCLK_MAC2IO_TX 88 +#define SCLK_MAC2IO_REFOUT 89 +#define SCLK_MAC2IO_REF 90 +#define SCLK_MAC2IO_OUT 91 +#define SCLK_TSP 92 +#define SCLK_HSADC_TSP 93 +#define SCLK_USB3PHY_REF 94 +#define SCLK_REF_USB3OTG 95 +#define SCLK_USB3OTG_REF 96 +#define SCLK_USB3OTG_SUSPEND 97 +#define SCLK_REF_USB3OTG_SRC 98 +#define SCLK_MAC2IO_SRC 99 +#define SCLK_MAC2IO 100 +#define SCLK_MAC2PHY 101 + +/* dclk gates */ +#define DCLK_LCDC 120 +#define DCLK_HDMIPHY 121 +#define HDMIPHY 122 +#define USB480M 123 +#define DCLK_LCDC_SRC 124 + +/* aclk gates */ +#define ACLK_AXISRAM 130 +#define ACLK_VOP_PRE 131 +#define ACLK_USB3OTG 132 +#define ACLK_RGA_PRE 133 +#define ACLK_DMAC 134 +#define ACLK_GPU 135 +#define ACLK_BUS_PRE 136 +#define ACLK_PERI_PRE 137 +#define ACLK_RKVDEC_PRE 138 +#define ACLK_RKVDEC 139 +#define ACLK_RKVENC 140 +#define ACLK_VPU_PRE 141 +#define ACLK_VIO_PRE 142 +#define ACLK_VPU 143 +#define ACLK_VIO 144 +#define ACLK_VOP 145 +#define ACLK_GMAC 146 +#define ACLK_H265 147 +#define ACLK_H264 148 +#define ACLK_MAC2PHY 149 +#define ACLK_MAC2IO 150 +#define ACLK_DCF 151 +#define ACLK_TSP 152 +#define ACLK_PERI 153 +#define ACLK_RGA 154 +#define ACLK_IEP 155 +#define ACLK_CIF 156 +#define ACLK_HDCP 157 + +/* pclk gates */ +#define PCLK_GPIO0 200 +#define PCLK_GPIO1 201 +#define PCLK_GPIO2 202 +#define PCLK_GPIO3 203 +#define PCLK_GRF 204 +#define PCLK_I2C0 205 +#define PCLK_I2C1 206 +#define PCLK_I2C2 207 +#define PCLK_I2C3 208 +#define PCLK_SPI 209 +#define PCLK_UART0 210 +#define PCLK_UART1 211 +#define PCLK_UART2 212 +#define PCLK_TSADC 213 +#define PCLK_PWM 214 +#define PCLK_TIMER 215 +#define PCLK_BUS_PRE 216 +#define PCLK_PERI_PRE 217 +#define PCLK_HDMI_CTRL 218 +#define PCLK_HDMI_PHY 219 +#define PCLK_GMAC 220 +#define PCLK_H265 221 +#define PCLK_MAC2PHY 222 +#define PCLK_MAC2IO 223 +#define PCLK_USB3PHY_OTG 224 +#define PCLK_USB3PHY_PIPE 225 +#define PCLK_USB3_GRF 226 +#define PCLK_USB2_GRF 227 +#define PCLK_HDMIPHY 228 +#define PCLK_DDR 229 +#define PCLK_PERI 230 +#define PCLK_HDMI 231 +#define PCLK_HDCP 232 +#define PCLK_DCF 233 +#define PCLK_SARADC 234 + +/* hclk gates */ +#define HCLK_PERI 308 +#define HCLK_TSP 309 +#define HCLK_GMAC 310 +#define HCLK_I2S0_8CH 311 +#define HCLK_I2S1_8CH 313 +#define HCLK_I2S2_2CH 313 +#define HCLK_SPDIF_8CH 314 +#define HCLK_VOP 315 +#define HCLK_NANDC 316 +#define HCLK_SDMMC 317 +#define HCLK_SDIO 318 +#define HCLK_EMMC 319 +#define HCLK_SDMMC_EXT 320 +#define HCLK_RKVDEC_PRE 321 +#define HCLK_RKVDEC 322 +#define HCLK_RKVENC 323 +#define HCLK_VPU_PRE 324 +#define HCLK_VIO_PRE 325 +#define HCLK_VPU 326 +#define HCLK_VIO 327 +#define HCLK_BUS_PRE 328 +#define HCLK_PERI_PRE 329 +#define HCLK_H264 330 +#define HCLK_CIF 331 +#define HCLK_OTG_PMU 332 +#define HCLK_OTG 333 +#define HCLK_HOST0 334 +#define HCLK_HOST0_ARB 335 +#define HCLK_CRYPTO_MST 336 +#define HCLK_CRYPTO_SLV 337 +#define HCLK_PDM 338 +#define HCLK_IEP 339 +#define HCLK_RGA 340 +#define HCLK_HDCP 341 + +#define CLK_NR_CLKS (HCLK_HDCP + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_CORE_NIU 13 +#define SRST_STRC_A 14 +#define SRST_L2C 15 + +#define SRST_A53_GIC 18 +#define SRST_DAP 19 +#define SRST_PMU_P 21 +#define SRST_EFUSE 22 +#define SRST_BUSSYS_H 23 +#define SRST_BUSSYS_P 24 +#define SRST_SPDIF 25 +#define SRST_INTMEM 26 +#define SRST_ROM 27 +#define SRST_GPIO0 28 +#define SRST_GPIO1 29 +#define SRST_GPIO2 30 +#define SRST_GPIO3 31 + +#define SRST_I2S0 32 +#define SRST_I2S1 33 +#define SRST_I2S2 34 +#define SRST_I2S0_H 35 +#define SRST_I2S1_H 36 +#define SRST_I2S2_H 37 +#define SRST_UART0 38 +#define SRST_UART1 39 +#define SRST_UART2 40 +#define SRST_UART0_P 41 +#define SRST_UART1_P 42 +#define SRST_UART2_P 43 +#define SRST_I2C0 44 +#define SRST_I2C1 45 +#define SRST_I2C2 46 +#define SRST_I2C3 47 + +#define SRST_I2C0_P 48 +#define SRST_I2C1_P 49 +#define SRST_I2C2_P 50 +#define SRST_I2C3_P 51 +#define SRST_EFUSE_SE_P 52 +#define SRST_EFUSE_NS_P 53 +#define SRST_PWM0 54 +#define SRST_PWM0_P 55 +#define SRST_DMA 56 +#define SRST_TSP_A 57 +#define SRST_TSP_H 58 +#define SRST_TSP 59 +#define SRST_TSP_HSADC 60 +#define SRST_DCF_A 61 +#define SRST_DCF_P 62 + +#define SRST_SCR 64 +#define SRST_SPI 65 +#define SRST_TSADC 66 +#define SRST_TSADC_P 67 +#define SRST_CRYPTO 68 +#define SRST_SGRF 69 +#define SRST_GRF 70 +#define SRST_USB_GRF 71 +#define SRST_TIMER_6CH_P 72 +#define SRST_TIMER0 73 +#define SRST_TIMER1 74 +#define SRST_TIMER2 75 +#define SRST_TIMER3 76 +#define SRST_TIMER4 77 +#define SRST_TIMER5 78 +#define SRST_USB3GRF 79 + +#define SRST_PHYNIU 80 +#define SRST_HDMIPHY 81 +#define SRST_VDAC 82 +#define SRST_ACODEC_p 83 +#define SRST_SARADC 85 +#define SRST_SARADC_P 86 +#define SRST_GRF_DDR 87 +#define SRST_DFIMON 88 +#define SRST_MSCH 89 +#define SRST_DDRMSCH 91 +#define SRST_DDRCTRL 92 +#define SRST_DDRCTRL_P 93 +#define SRST_DDRPHY 94 +#define SRST_DDRPHY_P 95 + +#define SRST_GMAC_NIU_A 96 +#define SRST_GMAC_NIU_P 97 +#define SRST_GMAC2PHY_A 98 +#define SRST_GMAC2IO_A 99 +#define SRST_MACPHY 100 +#define SRST_OTP_PHY 101 +#define SRST_GPU_A 102 +#define SRST_GPU_NIU_A 103 +#define SRST_SDMMCEXT 104 +#define SRST_PERIPH_NIU_A 105 +#define SRST_PERIHP_NIU_H 106 +#define SRST_PERIHP_P 107 +#define SRST_PERIPHSYS_H 108 +#define SRST_MMC0 109 +#define SRST_SDIO 110 +#define SRST_EMMC 111 + +#define SRST_USB2OTG_H 112 +#define SRST_USB2OTG 113 +#define SRST_USB2OTG_ADP 114 +#define SRST_USB2HOST_H 115 +#define SRST_USB2HOST_ARB 116 +#define SRST_USB2HOST_AUX 117 +#define SRST_USB2HOST_EHCIPHY 118 +#define SRST_USB2HOST_UTMI 119 +#define SRST_USB3OTG 120 +#define SRST_USBPOR 121 +#define SRST_USB2OTG_UTMI 122 +#define SRST_USB2HOST_PHY_UTMI 123 +#define SRST_USB3OTG_UTMI 124 +#define SRST_USB3PHY_U2 125 +#define SRST_USB3PHY_U3 126 +#define SRST_USB3PHY_PIPE 127 + +#define SRST_VIO_A 128 +#define SRST_VIO_BUS_H 129 +#define SRST_VIO_H2P_H 130 +#define SRST_VIO_ARBI_H 131 +#define SRST_VOP_NIU_A 132 +#define SRST_VOP_A 133 +#define SRST_VOP_H 134 +#define SRST_VOP_D 135 +#define SRST_RGA 136 +#define SRST_RGA_NIU_A 137 +#define SRST_RGA_A 138 +#define SRST_RGA_H 139 +#define SRST_IEP_A 140 +#define SRST_IEP_H 141 +#define SRST_HDMI 142 +#define SRST_HDMI_P 143 + +#define SRST_HDCP_A 144 +#define SRST_HDCP 145 +#define SRST_HDCP_H 146 +#define SRST_CIF_A 147 +#define SRST_CIF_H 148 +#define SRST_CIF_P 149 +#define SRST_OTP_P 150 +#define SRST_OTP_SBPI 151 +#define SRST_OTP_USER 152 +#define SRST_DDRCTRL_A 153 +#define SRST_DDRSTDY_P 154 +#define SRST_DDRSTDY 155 +#define SRST_PDM_H 156 +#define SRST_PDM 157 +#define SRST_USB3PHY_OTG_P 158 +#define SRST_USB3PHY_PIPE_P 159 + +#define SRST_VCODEC_A 160 +#define SRST_VCODEC_NIU_A 161 +#define SRST_VCODEC_H 162 +#define SRST_VCODEC_NIU_H 163 +#define SRST_VDEC_A 164 +#define SRST_VDEC_NIU_A 165 +#define SRST_VDEC_H 166 +#define SRST_VDEC_NIU_H 167 +#define SRST_VDEC_CORE 168 +#define SRST_VDEC_CABAC 169 +#define SRST_DDRPHYDIV 175 + +#define SRST_RKVENC_NIU_A 176 +#define SRST_RKVENC_NIU_H 177 +#define SRST_RKVENC_H265_A 178 +#define SRST_RKVENC_H265_P 179 +#define SRST_RKVENC_H265_CORE 180 +#define SRST_RKVENC_H265_DSP 181 +#define SRST_RKVENC_H264_A 182 +#define SRST_RKVENC_H264_H 183 +#define SRST_RKVENC_INTMEM 184 + +#endif diff --git a/include/dt-bindings/clock/ste-ab8500.h b/include/dt-bindings/clock/ste-ab8500.h new file mode 100644 index 000000000000..6731f1f00a84 --- /dev/null +++ b/include/dt-bindings/clock/ste-ab8500.h @@ -0,0 +1,11 @@ +#ifndef __STE_CLK_AB8500_H__ +#define __STE_CLK_AB8500_H__ + +#define AB8500_SYSCLK_BUF2 0 +#define AB8500_SYSCLK_BUF3 1 +#define AB8500_SYSCLK_BUF4 2 +#define AB8500_SYSCLK_ULP 3 +#define AB8500_SYSCLK_INT 4 +#define AB8500_SYSCLK_AUDIO 5 + +#endif diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h new file mode 100644 index 000000000000..49bb3c203e5c --- /dev/null +++ b/include/dt-bindings/clock/stm32fx-clock.h @@ -0,0 +1,59 @@ +/* + * stm32fx-clock.h + * + * Copyright (C) 2016 STMicroelectronics + * Author: Gabriel Fernandez for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +/* + * List of clocks wich are not derived from system clock (SYSCLOCK) + * + * The index of these clocks is the secondary index of DT bindings + * (see Documentatoin/devicetree/bindings/clock/st,stm32-rcc.txt) + * + * e.g: + <assigned-clocks = <&rcc 1 CLK_LSE>; +*/ + +#ifndef _DT_BINDINGS_CLK_STMFX_H +#define _DT_BINDINGS_CLK_STMFX_H + +#define SYSTICK 0 +#define FCLK 1 +#define CLK_LSI 2 +#define CLK_LSE 3 +#define CLK_HSE_RTC 4 +#define CLK_RTC 5 +#define PLL_VCO_I2S 6 +#define PLL_VCO_SAI 7 +#define CLK_LCD 8 +#define CLK_I2S 9 +#define CLK_SAI1 10 +#define CLK_SAI2 11 +#define CLK_I2SQ_PDIV 12 +#define CLK_SAIQ_PDIV 13 + +#define END_PRIMARY_CLK 14 + +#define CLK_HSI 14 +#define CLK_SYSCLK 15 +#define CLK_HDMI_CEC 16 +#define CLK_SPDIF 17 +#define CLK_USART1 18 +#define CLK_USART2 19 +#define CLK_USART3 20 +#define CLK_UART4 21 +#define CLK_UART5 22 +#define CLK_USART6 23 +#define CLK_UART7 24 +#define CLK_UART8 25 +#define CLK_I2C1 26 +#define CLK_I2C2 27 +#define CLK_I2C3 28 +#define CLK_I2C4 29 +#define CLK_LPTIMER 30 + +#define END_PRIMARY_CLK_F7 31 + +#endif diff --git a/include/dt-bindings/clock/sun5i-ccu.h b/include/dt-bindings/clock/sun5i-ccu.h new file mode 100644 index 000000000000..aeb2e2f781fb --- /dev/null +++ b/include/dt-bindings/clock/sun5i-ccu.h @@ -0,0 +1,103 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_SUN5I_H_ +#define _DT_BINDINGS_CLK_SUN5I_H_ + +#define CLK_HOSC 1 + +#define CLK_CPU 17 + +#define CLK_AHB_OTG 23 +#define CLK_AHB_EHCI 24 +#define CLK_AHB_OHCI 25 +#define CLK_AHB_SS 26 +#define CLK_AHB_DMA 27 +#define CLK_AHB_BIST 28 +#define CLK_AHB_MMC0 29 +#define CLK_AHB_MMC1 30 +#define CLK_AHB_MMC2 31 +#define CLK_AHB_NAND 32 +#define CLK_AHB_SDRAM 33 +#define CLK_AHB_EMAC 34 +#define CLK_AHB_TS 35 +#define CLK_AHB_SPI0 36 +#define CLK_AHB_SPI1 37 +#define CLK_AHB_SPI2 38 +#define CLK_AHB_GPS 39 +#define CLK_AHB_HSTIMER 40 +#define CLK_AHB_VE 41 +#define CLK_AHB_TVE 42 +#define CLK_AHB_LCD 43 +#define CLK_AHB_CSI 44 +#define CLK_AHB_HDMI 45 +#define CLK_AHB_DE_BE 46 +#define CLK_AHB_DE_FE 47 +#define CLK_AHB_IEP 48 +#define CLK_AHB_GPU 49 +#define CLK_APB0_CODEC 50 +#define CLK_APB0_SPDIF 51 +#define CLK_APB0_I2S 52 +#define CLK_APB0_PIO 53 +#define CLK_APB0_IR 54 +#define CLK_APB0_KEYPAD 55 +#define CLK_APB1_I2C0 56 +#define CLK_APB1_I2C1 57 +#define CLK_APB1_I2C2 58 +#define CLK_APB1_UART0 59 +#define CLK_APB1_UART1 60 +#define CLK_APB1_UART2 61 +#define CLK_APB1_UART3 62 +#define CLK_NAND 63 +#define CLK_MMC0 64 +#define CLK_MMC1 65 +#define CLK_MMC2 66 +#define CLK_TS 67 +#define CLK_SS 68 +#define CLK_SPI0 69 +#define CLK_SPI1 70 +#define CLK_SPI2 71 +#define CLK_IR 72 +#define CLK_I2S 73 +#define CLK_SPDIF 74 +#define CLK_KEYPAD 75 +#define CLK_USB_OHCI 76 +#define CLK_USB_PHY0 77 +#define CLK_USB_PHY1 78 +#define CLK_GPS 79 +#define CLK_DRAM_VE 80 +#define CLK_DRAM_CSI 81 +#define CLK_DRAM_TS 82 +#define CLK_DRAM_TVE 83 +#define CLK_DRAM_DE_FE 84 +#define CLK_DRAM_DE_BE 85 +#define CLK_DRAM_ACE 86 +#define CLK_DRAM_IEP 87 +#define CLK_DE_BE 88 +#define CLK_DE_FE 89 +#define CLK_TCON_CH0 90 + +#define CLK_TCON_CH1 92 +#define CLK_CSI 93 +#define CLK_VE 94 +#define CLK_CODEC 95 +#define CLK_AVS 96 +#define CLK_HDMI 97 +#define CLK_GPU 98 + +#define CLK_IEP 100 + +#endif /* _DT_BINDINGS_CLK_SUN5I_H_ */ diff --git a/include/dt-bindings/clock/sun8i-v3s-ccu.h b/include/dt-bindings/clock/sun8i-v3s-ccu.h new file mode 100644 index 000000000000..c0d5d5599c87 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-v3s-ccu.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2016 Icenowy Zheng <icenowy@aosc.xyz> + * + * Based on sun8i-h3-ccu.h, which is: + * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_V3S_H_ +#define _DT_BINDINGS_CLK_SUN8I_V3S_H_ + +#define CLK_CPU 14 + +#define CLK_BUS_CE 20 +#define CLK_BUS_DMA 21 +#define CLK_BUS_MMC0 22 +#define CLK_BUS_MMC1 23 +#define CLK_BUS_MMC2 24 +#define CLK_BUS_DRAM 25 +#define CLK_BUS_EMAC 26 +#define CLK_BUS_HSTIMER 27 +#define CLK_BUS_SPI0 28 +#define CLK_BUS_OTG 29 +#define CLK_BUS_EHCI0 30 +#define CLK_BUS_OHCI0 31 +#define CLK_BUS_VE 32 +#define CLK_BUS_TCON0 33 +#define CLK_BUS_CSI 34 +#define CLK_BUS_DE 35 +#define CLK_BUS_CODEC 36 +#define CLK_BUS_PIO 37 +#define CLK_BUS_I2C0 38 +#define CLK_BUS_I2C1 39 +#define CLK_BUS_UART0 40 +#define CLK_BUS_UART1 41 +#define CLK_BUS_UART2 42 +#define CLK_BUS_EPHY 43 +#define CLK_BUS_DBG 44 + +#define CLK_MMC0 45 +#define CLK_MMC0_SAMPLE 46 +#define CLK_MMC0_OUTPUT 47 +#define CLK_MMC1 48 +#define CLK_MMC1_SAMPLE 49 +#define CLK_MMC1_OUTPUT 50 +#define CLK_MMC2 51 +#define CLK_MMC2_SAMPLE 52 +#define CLK_MMC2_OUTPUT 53 +#define CLK_CE 54 +#define CLK_SPI0 55 +#define CLK_USB_PHY0 56 +#define CLK_USB_OHCI0 57 + +#define CLK_DRAM_VE 59 +#define CLK_DRAM_CSI 60 +#define CLK_DRAM_EHCI 61 +#define CLK_DRAM_OHCI 62 +#define CLK_DE 63 +#define CLK_TCON0 64 +#define CLK_CSI_MISC 65 +#define CLK_CSI0_MCLK 66 +#define CLK_CSI1_SCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_VE 69 +#define CLK_AC_DIG 70 +#define CLK_AVS 71 + +#define CLK_MIPI_CSI 73 + +#endif /* _DT_BINDINGS_CLK_SUN8I_V3S_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-ccu.h b/include/dt-bindings/clock/sun9i-a80-ccu.h new file mode 100644 index 000000000000..6ea1492a73a6 --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-ccu.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ + +#define CLK_PLL_AUDIO 2 +#define CLK_PLL_PERIPH0 3 + +#define CLK_C0CPUX 12 +#define CLK_C1CPUX 13 + +#define CLK_OUT_A 27 +#define CLK_OUT_B 28 + +#define CLK_NAND0_0 29 +#define CLK_NAND0_1 30 +#define CLK_NAND1_0 31 +#define CLK_NAND1_1 32 +#define CLK_MMC0 33 +#define CLK_MMC0_SAMPLE 34 +#define CLK_MMC0_OUTPUT 35 +#define CLK_MMC1 36 +#define CLK_MMC1_SAMPLE 37 +#define CLK_MMC1_OUTPUT 38 +#define CLK_MMC2 39 +#define CLK_MMC2_SAMPLE 40 +#define CLK_MMC2_OUTPUT 41 +#define CLK_MMC3 42 +#define CLK_MMC3_SAMPLE 43 +#define CLK_MMC3_OUTPUT 44 +#define CLK_TS 45 +#define CLK_SS 46 +#define CLK_SPI0 47 +#define CLK_SPI1 48 +#define CLK_SPI2 49 +#define CLK_SPI3 50 +#define CLK_I2S0 51 +#define CLK_I2S1 52 +#define CLK_SPDIF 53 +#define CLK_SDRAM 54 +#define CLK_DE 55 +#define CLK_EDP 56 +#define CLK_MP 57 +#define CLK_LCD0 58 +#define CLK_LCD1 59 +#define CLK_MIPI_DSI0 60 +#define CLK_MIPI_DSI1 61 +#define CLK_HDMI 62 +#define CLK_HDMI_SLOW 63 +#define CLK_MIPI_CSI 64 +#define CLK_CSI_ISP 65 +#define CLK_CSI_MISC 66 +#define CLK_CSI0_MCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_FD 69 +#define CLK_VE 70 +#define CLK_AVS 71 +#define CLK_GPU_CORE 72 +#define CLK_GPU_MEMORY 73 +#define CLK_GPU_AXI 74 +#define CLK_SATA 75 +#define CLK_AC97 76 +#define CLK_MIPI_HSI 77 +#define CLK_GPADC 78 +#define CLK_CIR_TX 79 + +#define CLK_BUS_FD 80 +#define CLK_BUS_VE 81 +#define CLK_BUS_GPU_CTRL 82 +#define CLK_BUS_SS 83 +#define CLK_BUS_MMC 84 +#define CLK_BUS_NAND0 85 +#define CLK_BUS_NAND1 86 +#define CLK_BUS_SDRAM 87 +#define CLK_BUS_MIPI_HSI 88 +#define CLK_BUS_SATA 89 +#define CLK_BUS_TS 90 +#define CLK_BUS_SPI0 91 +#define CLK_BUS_SPI1 92 +#define CLK_BUS_SPI2 93 +#define CLK_BUS_SPI3 94 + +#define CLK_BUS_OTG 95 +#define CLK_BUS_USB 96 +#define CLK_BUS_GMAC 97 +#define CLK_BUS_MSGBOX 98 +#define CLK_BUS_SPINLOCK 99 +#define CLK_BUS_HSTIMER 100 +#define CLK_BUS_DMA 101 + +#define CLK_BUS_LCD0 102 +#define CLK_BUS_LCD1 103 +#define CLK_BUS_EDP 104 +#define CLK_BUS_CSI 105 +#define CLK_BUS_HDMI 106 +#define CLK_BUS_DE 107 +#define CLK_BUS_MP 108 +#define CLK_BUS_MIPI_DSI 109 + +#define CLK_BUS_SPDIF 110 +#define CLK_BUS_PIO 111 +#define CLK_BUS_AC97 112 +#define CLK_BUS_I2S0 113 +#define CLK_BUS_I2S1 114 +#define CLK_BUS_LRADC 115 +#define CLK_BUS_GPADC 116 +#define CLK_BUS_TWD 117 +#define CLK_BUS_CIR_TX 118 + +#define CLK_BUS_I2C0 119 +#define CLK_BUS_I2C1 120 +#define CLK_BUS_I2C2 121 +#define CLK_BUS_I2C3 122 +#define CLK_BUS_I2C4 123 +#define CLK_BUS_UART0 124 +#define CLK_BUS_UART1 125 +#define CLK_BUS_UART2 126 +#define CLK_BUS_UART3 127 +#define CLK_BUS_UART4 128 +#define CLK_BUS_UART5 129 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-de.h b/include/dt-bindings/clock/sun9i-a80-de.h new file mode 100644 index 000000000000..3dad6c3cd131 --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-de.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ + +#define CLK_FE0 0 +#define CLK_FE1 1 +#define CLK_FE2 2 +#define CLK_IEP_DEU0 3 +#define CLK_IEP_DEU1 4 +#define CLK_BE0 5 +#define CLK_BE1 6 +#define CLK_BE2 7 +#define CLK_IEP_DRC0 8 +#define CLK_IEP_DRC1 9 +#define CLK_MERGE 10 + +#define CLK_DRAM_FE0 11 +#define CLK_DRAM_FE1 12 +#define CLK_DRAM_FE2 13 +#define CLK_DRAM_DEU0 14 +#define CLK_DRAM_DEU1 15 +#define CLK_DRAM_BE0 16 +#define CLK_DRAM_BE1 17 +#define CLK_DRAM_BE2 18 +#define CLK_DRAM_DRC0 19 +#define CLK_DRAM_DRC1 20 + +#define CLK_BUS_FE0 21 +#define CLK_BUS_FE1 22 +#define CLK_BUS_FE2 23 +#define CLK_BUS_DEU0 24 +#define CLK_BUS_DEU1 25 +#define CLK_BUS_BE0 26 +#define CLK_BUS_BE1 27 +#define CLK_BUS_BE2 28 +#define CLK_BUS_DRC0 29 +#define CLK_BUS_DRC1 30 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-usb.h b/include/dt-bindings/clock/sun9i-a80-usb.h new file mode 100644 index 000000000000..783a60d2ccea --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-usb.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ + +#define CLK_BUS_HCI0 0 +#define CLK_USB_OHCI0 1 +#define CLK_BUS_HCI1 2 +#define CLK_BUS_HCI2 3 +#define CLK_USB_OHCI2 4 + +#define CLK_USB0_PHY 5 +#define CLK_USB1_HSIC 6 +#define CLK_USB1_PHY 7 +#define CLK_USB2_HSIC 8 +#define CLK_USB2_PHY 9 +#define CLK_USB_HSIC 10 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ */ diff --git a/include/dt-bindings/mfd/stm32f4-rcc.h b/include/dt-bindings/mfd/stm32f4-rcc.h index e98942dc0d44..082a81c94298 100644 --- a/include/dt-bindings/mfd/stm32f4-rcc.h +++ b/include/dt-bindings/mfd/stm32f4-rcc.h @@ -18,14 +18,20 @@ #define STM32F4_RCC_AHB1_GPIOJ 9 #define STM32F4_RCC_AHB1_GPIOK 10 #define STM32F4_RCC_AHB1_CRC 12 +#define STM32F4_RCC_AHB1_BKPSRAM 18 +#define STM32F4_RCC_AHB1_CCMDATARAM 20 #define STM32F4_RCC_AHB1_DMA1 21 #define STM32F4_RCC_AHB1_DMA2 22 #define STM32F4_RCC_AHB1_DMA2D 23 #define STM32F4_RCC_AHB1_ETHMAC 25 -#define STM32F4_RCC_AHB1_OTGHS 29 +#define STM32F4_RCC_AHB1_ETHMACTX 26 +#define STM32F4_RCC_AHB1_ETHMACRX 27 +#define STM32F4_RCC_AHB1_ETHMACPTP 28 +#define STM32F4_RCC_AHB1_OTGHS 29 +#define STM32F4_RCC_AHB1_OTGHSULPI 30 #define STM32F4_AHB1_RESET(bit) (STM32F4_RCC_AHB1_##bit + (0x10 * 8)) -#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit + (0x30 * 8)) +#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit) /* AHB2 */ @@ -36,13 +42,14 @@ #define STM32F4_RCC_AHB2_OTGFS 7 #define STM32F4_AHB2_RESET(bit) (STM32F4_RCC_AHB2_##bit + (0x14 * 8)) -#define STM32F4_AHB2_CLOCK(bit) (STM32F4_RCC_AHB2_##bit + (0x34 * 8)) +#define STM32F4_AHB2_CLOCK(bit) (STM32F4_RCC_AHB2_##bit + 0x20) /* AHB3 */ #define STM32F4_RCC_AHB3_FMC 0 +#define STM32F4_RCC_AHB3_QSPI 1 #define STM32F4_AHB3_RESET(bit) (STM32F4_RCC_AHB3_##bit + (0x18 * 8)) -#define STM32F4_AHB3_CLOCK(bit) (STM32F4_RCC_AHB3_##bit + (0x38 * 8)) +#define STM32F4_AHB3_CLOCK(bit) (STM32F4_RCC_AHB3_##bit + 0x40) /* APB1 */ #define STM32F4_RCC_APB1_TIM2 0 @@ -72,14 +79,16 @@ #define STM32F4_RCC_APB1_UART8 31 #define STM32F4_APB1_RESET(bit) (STM32F4_RCC_APB1_##bit + (0x20 * 8)) -#define STM32F4_APB1_CLOCK(bit) (STM32F4_RCC_APB1_##bit + (0x40 * 8)) +#define STM32F4_APB1_CLOCK(bit) (STM32F4_RCC_APB1_##bit + 0x80) /* APB2 */ #define STM32F4_RCC_APB2_TIM1 0 #define STM32F4_RCC_APB2_TIM8 1 #define STM32F4_RCC_APB2_USART1 4 #define STM32F4_RCC_APB2_USART6 5 -#define STM32F4_RCC_APB2_ADC 8 +#define STM32F4_RCC_APB2_ADC1 8 +#define STM32F4_RCC_APB2_ADC2 9 +#define STM32F4_RCC_APB2_ADC3 10 #define STM32F4_RCC_APB2_SDIO 11 #define STM32F4_RCC_APB2_SPI1 12 #define STM32F4_RCC_APB2_SPI4 13 @@ -91,8 +100,9 @@ #define STM32F4_RCC_APB2_SPI6 21 #define STM32F4_RCC_APB2_SAI1 22 #define STM32F4_RCC_APB2_LTDC 26 +#define STM32F4_RCC_APB2_DSI 27 #define STM32F4_APB2_RESET(bit) (STM32F4_RCC_APB2_##bit + (0x24 * 8)) -#define STM32F4_APB2_CLOCK(bit) (STM32F4_RCC_APB2_##bit + (0x44 * 8)) +#define STM32F4_APB2_CLOCK(bit) (STM32F4_RCC_APB2_##bit + 0xA0) #endif /* _DT_BINDINGS_MFD_STM32F4_RCC_H */ diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h new file mode 100644 index 000000000000..697161f80eb5 --- /dev/null +++ b/include/dt-bindings/net/mscc-phy-vsc8531.h @@ -0,0 +1,29 @@ +/* + * Device Tree constants for Microsemi VSC8531 PHY + * + * Author: Nagaraju Lakkaraju + * + * License: Dual MIT/GPL + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _DT_BINDINGS_MSCC_VSC8531_H +#define _DT_BINDINGS_MSCC_VSC8531_H + +/* PHY LED Modes */ +#define VSC8531_LINK_ACTIVITY 0 +#define VSC8531_LINK_1000_ACTIVITY 1 +#define VSC8531_LINK_100_ACTIVITY 2 +#define VSC8531_LINK_10_ACTIVITY 3 +#define VSC8531_LINK_100_1000_ACTIVITY 4 +#define VSC8531_LINK_10_1000_ACTIVITY 5 +#define VSC8531_LINK_10_100_ACTIVITY 6 +#define VSC8531_DUPLEX_COLLISION 8 +#define VSC8531_COLLISION 9 +#define VSC8531_ACTIVITY 10 +#define VSC8531_AUTONEG_FAULT 12 +#define VSC8531_SERIAL_MODE 13 +#define VSC8531_FORCE_LED_OFF 14 +#define VSC8531_FORCE_LED_ON 15 + +#endif diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h index effadd05695b..fbd6f7202476 100644 --- a/include/dt-bindings/pinctrl/omap.h +++ b/include/dt-bindings/pinctrl/omap.h @@ -45,8 +45,8 @@ #define PIN_OFF_NONE 0 #define PIN_OFF_OUTPUT_HIGH (OFF_EN | OFFOUT_EN | OFFOUT_VAL) #define PIN_OFF_OUTPUT_LOW (OFF_EN | OFFOUT_EN) -#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFF_PULL_EN | OFF_PULL_UP) -#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFF_PULL_EN) +#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFFOUT_EN | OFF_PULL_EN | OFF_PULL_UP) +#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFFOUT_EN | OFF_PULL_EN) #define PIN_OFF_WAKEUPENABLE WAKEUP_EN /* diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h index 6276eb785e2b..b7aa3646208b 100644 --- a/include/dt-bindings/pinctrl/samsung.h +++ b/include/dt-bindings/pinctrl/samsung.h @@ -45,6 +45,20 @@ #define EXYNOS5420_PIN_DRV_LV3 2 #define EXYNOS5420_PIN_DRV_LV4 3 +/* Drive strengths for Exynos5433 */ +#define EXYNOS5433_PIN_DRV_FAST_SR1 0 +#define EXYNOS5433_PIN_DRV_FAST_SR2 1 +#define EXYNOS5433_PIN_DRV_FAST_SR3 2 +#define EXYNOS5433_PIN_DRV_FAST_SR4 3 +#define EXYNOS5433_PIN_DRV_FAST_SR5 4 +#define EXYNOS5433_PIN_DRV_FAST_SR6 5 +#define EXYNOS5433_PIN_DRV_SLOW_SR1 8 +#define EXYNOS5433_PIN_DRV_SLOW_SR2 9 +#define EXYNOS5433_PIN_DRV_SLOW_SR3 0xa +#define EXYNOS5433_PIN_DRV_SLOW_SR4 0xb +#define EXYNOS5433_PIN_DRV_SLOW_SR5 0xc +#define EXYNOS5433_PIN_DRV_SLOW_SR6 0xf + #define EXYNOS_PIN_FUNC_INPUT 0 #define EXYNOS_PIN_FUNC_OUTPUT 1 #define EXYNOS_PIN_FUNC_2 2 @@ -54,4 +68,12 @@ #define EXYNOS_PIN_FUNC_6 6 #define EXYNOS_PIN_FUNC_F 0xf +/* Drive strengths for Exynos7 FSYS1 block */ +#define EXYNOS7_FSYS1_PIN_DRV_LV1 0 +#define EXYNOS7_FSYS1_PIN_DRV_LV2 4 +#define EXYNOS7_FSYS1_PIN_DRV_LV3 2 +#define EXYNOS7_FSYS1_PIN_DRV_LV4 6 +#define EXYNOS7_FSYS1_PIN_DRV_LV5 1 +#define EXYNOS7_FSYS1_PIN_DRV_LV6 5 + #endif /* __DT_BINDINGS_PINCTRL_SAMSUNG_H__ */ diff --git a/include/dt-bindings/power/rk3328-power.h b/include/dt-bindings/power/rk3328-power.h new file mode 100644 index 000000000000..10c3c3715334 --- /dev/null +++ b/include/dt-bindings/power/rk3328-power.h @@ -0,0 +1,18 @@ +#ifndef __DT_BINDINGS_POWER_RK3328_POWER_H__ +#define __DT_BINDINGS_POWER_RK3328_POWER_H__ + +/** + * RK3328 idle id Summary. + */ +#define RK3328_PD_CORE 0 +#define RK3328_PD_GPU 1 +#define RK3328_PD_BUS 2 +#define RK3328_PD_MSCH 3 +#define RK3328_PD_PERI 4 +#define RK3328_PD_VIDEO 5 +#define RK3328_PD_HEVC 6 +#define RK3328_PD_SYS 7 +#define RK3328_PD_VPU 8 +#define RK3328_PD_VIO 9 + +#endif diff --git a/include/dt-bindings/reset/sun5i-ccu.h b/include/dt-bindings/reset/sun5i-ccu.h new file mode 100644 index 000000000000..c2b9726b5026 --- /dev/null +++ b/include/dt-bindings/reset/sun5i-ccu.h @@ -0,0 +1,32 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RST_SUN5I_H_ +#define _RST_SUN5I_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_GPS 2 +#define RST_DE_BE 3 +#define RST_DE_FE 4 +#define RST_TVE 5 +#define RST_LCD 6 +#define RST_CSI 7 +#define RST_VE 8 +#define RST_GPU 9 +#define RST_IEP 10 + +#endif /* _RST_SUN5I_H_ */ diff --git a/include/dt-bindings/reset/sun8i-v3s-ccu.h b/include/dt-bindings/reset/sun8i-v3s-ccu.h new file mode 100644 index 000000000000..b58ef21a2e18 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-v3s-ccu.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz> + * + * Based on sun8i-v3s-ccu.h, which is + * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_V3S_H_ +#define _DT_BINDINGS_RST_SUN8I_V3S_H_ + +#define RST_USB_PHY0 0 + +#define RST_MBUS 1 + +#define RST_BUS_CE 5 +#define RST_BUS_DMA 6 +#define RST_BUS_MMC0 7 +#define RST_BUS_MMC1 8 +#define RST_BUS_MMC2 9 +#define RST_BUS_DRAM 11 +#define RST_BUS_EMAC 12 +#define RST_BUS_HSTIMER 14 +#define RST_BUS_SPI0 15 +#define RST_BUS_OTG 17 +#define RST_BUS_EHCI0 18 +#define RST_BUS_OHCI0 22 +#define RST_BUS_VE 26 +#define RST_BUS_TCON0 27 +#define RST_BUS_CSI 30 +#define RST_BUS_DE 34 +#define RST_BUS_DBG 38 +#define RST_BUS_EPHY 39 +#define RST_BUS_CODEC 40 +#define RST_BUS_I2C0 46 +#define RST_BUS_I2C1 47 +#define RST_BUS_UART0 49 +#define RST_BUS_UART1 50 +#define RST_BUS_UART2 51 + +#endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-ccu.h b/include/dt-bindings/reset/sun9i-a80-ccu.h new file mode 100644 index 000000000000..4b8df4b36788 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-ccu.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ + +#define RST_BUS_FD 0 +#define RST_BUS_VE 1 +#define RST_BUS_GPU_CTRL 2 +#define RST_BUS_SS 3 +#define RST_BUS_MMC 4 +#define RST_BUS_NAND0 5 +#define RST_BUS_NAND1 6 +#define RST_BUS_SDRAM 7 +#define RST_BUS_SATA 8 +#define RST_BUS_TS 9 +#define RST_BUS_SPI0 10 +#define RST_BUS_SPI1 11 +#define RST_BUS_SPI2 12 +#define RST_BUS_SPI3 13 + +#define RST_BUS_OTG 14 +#define RST_BUS_OTG_PHY 15 +#define RST_BUS_MIPI_HSI 16 +#define RST_BUS_GMAC 17 +#define RST_BUS_MSGBOX 18 +#define RST_BUS_SPINLOCK 19 +#define RST_BUS_HSTIMER 20 +#define RST_BUS_DMA 21 + +#define RST_BUS_LCD0 22 +#define RST_BUS_LCD1 23 +#define RST_BUS_EDP 24 +#define RST_BUS_LVDS 25 +#define RST_BUS_CSI 26 +#define RST_BUS_HDMI0 27 +#define RST_BUS_HDMI1 28 +#define RST_BUS_DE 29 +#define RST_BUS_MP 30 +#define RST_BUS_GPU 31 +#define RST_BUS_MIPI_DSI 32 + +#define RST_BUS_SPDIF 33 +#define RST_BUS_AC97 34 +#define RST_BUS_I2S0 35 +#define RST_BUS_I2S1 36 +#define RST_BUS_LRADC 37 +#define RST_BUS_GPADC 38 +#define RST_BUS_CIR_TX 39 + +#define RST_BUS_I2C0 40 +#define RST_BUS_I2C1 41 +#define RST_BUS_I2C2 42 +#define RST_BUS_I2C3 43 +#define RST_BUS_I2C4 44 +#define RST_BUS_UART0 45 +#define RST_BUS_UART1 46 +#define RST_BUS_UART2 47 +#define RST_BUS_UART3 48 +#define RST_BUS_UART4 49 +#define RST_BUS_UART5 50 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-de.h b/include/dt-bindings/reset/sun9i-a80-de.h new file mode 100644 index 000000000000..205072770171 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-de.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ + +#define RST_FE0 0 +#define RST_FE1 1 +#define RST_FE2 2 +#define RST_DEU0 3 +#define RST_DEU1 4 +#define RST_BE0 5 +#define RST_BE1 6 +#define RST_BE2 7 +#define RST_DRC0 8 +#define RST_DRC1 9 +#define RST_MERGE 10 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-usb.h b/include/dt-bindings/reset/sun9i-a80-usb.h new file mode 100644 index 000000000000..ee492864c2aa --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-usb.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ + +#define RST_USB0_HCI 0 +#define RST_USB1_HCI 1 +#define RST_USB2_HCI 2 + +#define RST_USB0_PHY 3 +#define RST_USB1_HSIC 4 +#define RST_USB1_PHY 5 +#define RST_USB2_HSIC 6 +#define RST_USB2_PHY 7 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ */ diff --git a/include/dt-bindings/soc/zte,pm_domains.h b/include/dt-bindings/soc/zte,pm_domains.h new file mode 100644 index 000000000000..a0b4019c8e01 --- /dev/null +++ b/include/dt-bindings/soc/zte,pm_domains.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2017 Linaro Ltd. + * + * Author: Baoyou Xie <baoyou.xie@linaro.org> + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H +#define _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H + +#define DM_ZX296718_SAPPU 0 +#define DM_ZX296718_VDE 1 /* g1v6 */ +#define DM_ZX296718_VCE 2 /* h1v6 */ +#define DM_ZX296718_HDE 3 /* g2v2 */ +#define DM_ZX296718_VIU 4 +#define DM_ZX296718_USB20 5 +#define DM_ZX296718_USB21 6 +#define DM_ZX296718_USB30 7 +#define DM_ZX296718_HSIC 8 +#define DM_ZX296718_GMAC 9 +#define DM_ZX296718_TS 10 +#define DM_ZX296718_VOU 11 + +#endif /* _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H */ diff --git a/include/dt-bindings/sound/cs42l42.h b/include/dt-bindings/sound/cs42l42.h index 399a123aed58..db69d84ed7d1 100644 --- a/include/dt-bindings/sound/cs42l42.h +++ b/include/dt-bindings/sound/cs42l42.h @@ -20,7 +20,7 @@ #define CS42L42_HPOUT_LOAD_1NF 0 #define CS42L42_HPOUT_LOAD_10NF 1 -/* HPOUT Clamp to GND Overide */ +/* HPOUT Clamp to GND Override */ #define CS42L42_HPOUT_CLAMP_EN 0 #define CS42L42_HPOUT_CLAMP_DIS 1 diff --git a/include/keys/user-type.h b/include/keys/user-type.h index c56fef40f53e..e098cbe27db5 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -48,9 +48,14 @@ extern void user_describe(const struct key *user, struct seq_file *m); extern long user_read(const struct key *key, char __user *buffer, size_t buflen); -static inline const struct user_key_payload *user_key_payload(const struct key *key) +static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) { - return (struct user_key_payload *)rcu_dereference_key(key); + return (struct user_key_payload *)dereference_key_rcu(key); +} + +static inline struct user_key_payload *user_key_payload_locked(const struct key *key) +{ + return (struct user_key_payload *)dereference_key_locked((struct key *)key); } #endif /* CONFIG_KEYS */ diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 5c970ce67949..fe797d6ef89d 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -23,20 +23,24 @@ #include <linux/hrtimer.h> #include <linux/workqueue.h> -struct arch_timer_kvm { +struct arch_timer_context { + /* Registers: control register, timer value */ + u32 cnt_ctl; + u64 cnt_cval; + + /* Timer IRQ */ + struct kvm_irq_level irq; + + /* Active IRQ state caching */ + bool active_cleared_last; + /* Virtual offset */ u64 cntvoff; }; struct arch_timer_cpu { - /* Registers: control register, timer value */ - u32 cntv_ctl; /* Saved/restored */ - u64 cntv_cval; /* Saved/restored */ - - /* - * Anything that is not used directly from assembly code goes - * here. - */ + struct arch_timer_context vtimer; + struct arch_timer_context ptimer; /* Background timer used when the guest is not running */ struct hrtimer timer; @@ -47,21 +51,15 @@ struct arch_timer_cpu { /* Background timer active */ bool armed; - /* Timer IRQ */ - struct kvm_irq_level irq; - - /* Active IRQ state caching */ - bool active_cleared_last; - /* Is the timer enabled */ bool enabled; }; int kvm_timer_hyp_init(void); int kvm_timer_enable(struct kvm_vcpu *vcpu); -void kvm_timer_init(struct kvm *kvm); int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, - const struct kvm_irq_level *irq); + const struct kvm_irq_level *virt_irq, + const struct kvm_irq_level *phys_irq); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); @@ -70,11 +68,16 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); -bool kvm_timer_should_fire(struct kvm_vcpu *vcpu); +bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx); void kvm_timer_schedule(struct kvm_vcpu *vcpu); void kvm_timer_unschedule(struct kvm_vcpu *vcpu); +u64 kvm_phys_timer_read(void); + void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); void kvm_timer_init_vhe(void); + +#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) +#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) #endif diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 002f0922cd92..b72dd2ad5f44 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -71,6 +71,8 @@ struct vgic_global { /* GIC system register CPU interface */ struct static_key_false gicv3_cpuif; + + u32 ich_vtr_el2; }; extern struct vgic_global kvm_vgic_global_state; @@ -101,9 +103,10 @@ struct vgic_irq { */ u32 intid; /* Guest visible INTID */ - bool pending; bool line_level; /* Level only */ - bool soft_pending; /* Level only */ + bool pending_latch; /* The pending latch state used to calculate + * the pending state for both level + * and edge triggered IRQs. */ bool active; /* not used for LPIs */ bool enabled; bool hw; /* Tied to HW IRQ */ @@ -165,6 +168,8 @@ struct vgic_its { struct list_head collection_list; }; +struct vgic_state_iter; + struct vgic_dist { bool in_kernel; bool ready; @@ -212,6 +217,9 @@ struct vgic_dist { spinlock_t lpi_list_lock; struct list_head lpi_list_head; int lpi_list_count; + + /* used by vgic-debug */ + struct vgic_state_iter *iter; }; struct vgic_v2_cpu_if { @@ -269,6 +277,12 @@ struct vgic_cpu { u64 pendbaser; bool lpis_enabled; + + /* Cache guest priority bits */ + u32 num_pri_bits; + + /* Cache guest interrupt ID bits */ + u32 num_id_bits; }; extern struct static_key_false vgic_v2_cpuif_trap; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 673acda012af..9b05886f9773 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -287,18 +287,15 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) } /* Validate the processor object's proc_id */ -bool acpi_processor_validate_proc_id(int proc_id); +bool acpi_duplicate_processor_id(int proc_id); #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu); int acpi_unmap_cpu(int cpu); -int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ -void acpi_set_processor_mapping(void); - #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); #endif diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index b5abfda80465..4c5bca38c653 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -14,9 +14,6 @@ #ifndef __LINUX_ARM_SMCCC_H #define __LINUX_ARM_SMCCC_H -#include <linux/linkage.h> -#include <linux/types.h> - /* * This file provides common defines for ARM SMC Calling Convention as * specified in @@ -60,6 +57,13 @@ #define ARM_SMCCC_OWNER_TRUSTED_OS 50 #define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 +#define ARM_SMCCC_QUIRK_NONE 0 +#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ + +#ifndef __ASSEMBLY__ + +#include <linux/linkage.h> +#include <linux/types.h> /** * struct arm_smccc_res - Result from SMC/HVC call * @a0-a3 result values from registers 0 to 3 @@ -72,33 +76,59 @@ struct arm_smccc_res { }; /** - * arm_smccc_smc() - make SMC calls + * struct arm_smccc_quirk - Contains quirk information + * @id: quirk identification + * @state: quirk specific information + * @a6: Qualcomm quirk entry for returning post-smc call contents of a6 + */ +struct arm_smccc_quirk { + int id; + union { + unsigned long a6; + } state; +}; + +/** + * __arm_smccc_smc() - make SMC calls * @a0-a7: arguments passed in registers 0 to 7 * @res: result values from registers 0 to 3 + * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required. * * This function is used to make SMC calls following SMC Calling Convention. * The content of the supplied param are copied to registers 0 to 7 prior * to the SMC instruction. The return values are updated with the content - * from register 0 to 3 on return from the SMC instruction. + * from register 0 to 3 on return from the SMC instruction. An optional + * quirk structure provides vendor specific behavior. */ -asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1, +asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long a6, unsigned long a7, - struct arm_smccc_res *res); + struct arm_smccc_res *res, struct arm_smccc_quirk *quirk); /** - * arm_smccc_hvc() - make HVC calls + * __arm_smccc_hvc() - make HVC calls * @a0-a7: arguments passed in registers 0 to 7 * @res: result values from registers 0 to 3 + * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required. * * This function is used to make HVC calls following SMC Calling * Convention. The content of the supplied param are copied to registers 0 * to 7 prior to the HVC instruction. The return values are updated with - * the content from register 0 to 3 on return from the HVC instruction. + * the content from register 0 to 3 on return from the HVC instruction. An + * optional quirk structure provides vendor specific behavior. */ -asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1, +asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long a6, unsigned long a7, - struct arm_smccc_res *res); + struct arm_smccc_res *res, struct arm_smccc_quirk *quirk); + +#define arm_smccc_smc(...) __arm_smccc_smc(__VA_ARGS__, NULL) + +#define arm_smccc_smc_quirk(...) __arm_smccc_smc(__VA_ARGS__) + +#define arm_smccc_hvc(...) __arm_smccc_hvc(__VA_ARGS__, NULL) + +#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) +#endif /*__ASSEMBLY__*/ #endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h index 7c0f6549898b..fdb545101ede 100644 --- a/include/linux/atmel-ssc.h +++ b/include/linux/atmel-ssc.h @@ -20,6 +20,7 @@ struct ssc_device { int user; int irq; bool clk_from_rk_pin; + bool sound_dai; }; struct ssc_device * __must_check ssc_request(unsigned int ssc_num); diff --git a/include/linux/average.h b/include/linux/average.h index d04aa58280de..7ddaf340d2ac 100644 --- a/include/linux/average.h +++ b/include/linux/average.h @@ -1,45 +1,66 @@ #ifndef _LINUX_AVERAGE_H #define _LINUX_AVERAGE_H -/* Exponentially weighted moving average (EWMA) */ +/* + * Exponentially weighted moving average (EWMA) + * + * This implements a fixed-precision EWMA algorithm, with both the + * precision and fall-off coefficient determined at compile-time + * and built into the generated helper funtions. + * + * The first argument to the macro is the name that will be used + * for the struct and helper functions. + * + * The second argument, the precision, expresses how many bits are + * used for the fractional part of the fixed-precision values. + * + * The third argument, the weight reciprocal, determines how the + * new values will be weighed vs. the old state, new values will + * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note + * that this parameter must be a power of two for efficiency. + */ -#define DECLARE_EWMA(name, _factor, _weight) \ +#define DECLARE_EWMA(name, _precision, _weight_rcp) \ struct ewma_##name { \ unsigned long internal; \ }; \ static inline void ewma_##name##_init(struct ewma_##name *e) \ { \ - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + /* \ + * Even if you want to feed it just 0/1 you should have \ + * some bits for the non-fractional part... \ + */ \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ e->internal = 0; \ } \ static inline unsigned long \ ewma_##name##_read(struct ewma_##name *e) \ { \ - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ - return e->internal >> ilog2(_factor); \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ + return e->internal >> (_precision); \ } \ static inline void ewma_##name##_add(struct ewma_##name *e, \ unsigned long val) \ { \ unsigned long internal = ACCESS_ONCE(e->internal); \ - unsigned long weight = ilog2(_weight); \ - unsigned long factor = ilog2(_factor); \ + unsigned long weight_rcp = ilog2(_weight_rcp); \ + unsigned long precision = _precision; \ \ - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ \ ACCESS_ONCE(e->internal) = internal ? \ - (((internal << weight) - internal) + \ - (val << factor)) >> weight : \ - (val << factor); \ + (((internal << weight_rcp) - internal) + \ + (val << precision)) >> weight_rcp : \ + (val << precision); \ } #endif /* _LINUX_AVERAGE_H */ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 1303b570b18c..05488da3aee9 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -6,6 +6,8 @@ #include <asm/exec.h> #include <uapi/linux/binfmts.h> +struct filename; + #define CORENAME_MAX_SIZE 128 /* @@ -123,4 +125,12 @@ extern void install_exec_creds(struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); +extern int do_execve(struct filename *, + const char __user * const __user *, + const char __user * const __user *); +extern int do_execveat(int, struct filename *, + const char __user * const __user *, + const char __user * const __user *, + int); + #endif /* _LINUX_BINFMTS_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 7cf8a6c70a3f..8e521194f6fc 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -183,7 +183,7 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) -static inline unsigned bio_segments(struct bio *bio) +static inline unsigned __bio_segments(struct bio *bio, struct bvec_iter *bvec) { unsigned segs = 0; struct bio_vec bv; @@ -205,12 +205,17 @@ static inline unsigned bio_segments(struct bio *bio) break; } - bio_for_each_segment(bv, bio, iter) + __bio_for_each_segment(bv, bio, iter, *bvec) segs++; return segs; } +static inline unsigned bio_segments(struct bio *bio) +{ + return __bio_segments(bio, &bio->bi_iter); +} + /* * get a reference to a bio, so it won't disappear. the intended use is * something like: @@ -384,6 +389,8 @@ extern void bio_put(struct bio *); extern void __bio_clone_fast(struct bio *, struct bio *); extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); +extern struct bio *bio_clone_bioset_partial(struct bio *, gfp_t, + struct bio_set *, int, int); extern struct bio_set *fs_bio_set; diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index f6505d83069d..8b9d6fff002d 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -63,6 +63,19 @@ }) /** + * FIELD_FIT() - check if value fits in the field + * @_mask: shifted mask defining the field's length and position + * @_val: value to test against the field + * + * Return: true if @_val can fit inside @_mask, false if @_val is too big. + */ +#define FIELD_FIT(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ + !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ + }) + +/** * FIELD_PREP() - prepare a bitfield element * @_mask: shifted mask defining the field's length and position * @_val: value to put in the field diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h new file mode 100644 index 000000000000..b1ef6e14744f --- /dev/null +++ b/include/linux/blk-mq-virtio.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_BLK_MQ_VIRTIO_H +#define _LINUX_BLK_MQ_VIRTIO_H + +struct blk_mq_tag_set; +struct virtio_device; + +int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set, + struct virtio_device *vdev, int first_vec); + +#endif /* _LINUX_BLK_MQ_VIRTIO_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8e4df3d6c8cd..b296a9006117 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -33,6 +33,7 @@ struct blk_mq_hw_ctx { struct blk_mq_ctx **ctxs; unsigned int nr_ctx; + wait_queue_t dispatch_wait; atomic_t wait_index; struct blk_mq_tags *tags; @@ -160,6 +161,7 @@ enum { BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_SCHED_RESTART = 2, + BLK_MQ_S_TAG_WAITING = 3, BLK_MQ_MAX_DEPTH = 10240, @@ -243,6 +245,9 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); +void blk_mq_freeze_queue_wait(struct request_queue *q); +int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, + unsigned long timeout); int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); int blk_mq_map_queues(struct blk_mq_tag_set *set); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aecca0e7d9ca..5a7da607ca04 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -2,6 +2,7 @@ #define _LINUX_BLKDEV_H #include <linux/sched.h> +#include <linux/sched/clock.h> #ifdef CONFIG_BLOCK @@ -434,7 +435,6 @@ struct request_queue { struct delayed_work delay_work; struct backing_dev_info *backing_dev_info; - struct disk_devt *disk_devt; /* * The queue owner gets to use this for whatever they like. diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3ed1f3b1d594..909fc033173a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -8,10 +8,12 @@ #define _LINUX_BPF_H 1 #include <uapi/linux/bpf.h> + #include <linux/workqueue.h> #include <linux/file.h> #include <linux/percpu.h> #include <linux/err.h> +#include <linux/rbtree_latch.h> struct perf_event; struct bpf_map; @@ -69,14 +71,14 @@ enum bpf_arg_type { /* the following constraints used to prototype bpf_memcmp() and other * functions that access data on eBPF program stack */ - ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ - ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not - * need to be initialized, helper function must fill - * all bytes or clear them in error case. + ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ + ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, + * helper function must fill all bytes or clear + * them in error case. */ - ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ - ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */ + ARG_CONST_SIZE, /* number of bytes accessed from memory */ + ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ ARG_PTR_TO_CTX, /* pointer to context */ ARG_ANYTHING, /* any (initialized) argument is ok */ @@ -161,9 +163,10 @@ struct bpf_verifier_ops { enum bpf_reg_type *reg_type); int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, const struct bpf_prog *prog); - u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, - int src_reg, int ctx_off, - struct bpf_insn *insn, struct bpf_prog *prog); + u32 (*convert_ctx_access)(enum bpf_access_type type, + const struct bpf_insn *src, + struct bpf_insn *dst, + struct bpf_prog *prog); }; struct bpf_prog_type_list { @@ -176,6 +179,8 @@ struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; u32 max_ctx_offset; + struct latch_tree_node ksym_tnode; + struct list_head ksym_lnode; const struct bpf_verifier_ops *ops; struct bpf_map **used_maps; struct bpf_prog *prog; diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h new file mode 100644 index 000000000000..b22efbdd2eb4 --- /dev/null +++ b/include/linux/bpf_trace.h @@ -0,0 +1,7 @@ +#ifndef __LINUX_BPF_TRACE_H__ +#define __LINUX_BPF_TRACE_H__ + +#include <trace/events/bpf.h> +#include <trace/events/xdp.h> + +#endif /* __LINUX_BPF_TRACE_H__ */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 4f7d8be9ddbf..55e517130311 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -17,6 +17,7 @@ #define PHY_ID_BCM5482 0x0143bcb0 #define PHY_ID_BCM5411 0x00206070 #define PHY_ID_BCM5421 0x002060e0 +#define PHY_ID_BCM54210E 0x600d84a0 #define PHY_ID_BCM5464 0x002060b0 #define PHY_ID_BCM5461 0x002060c0 #define PHY_ID_BCM54612E 0x03625e60 @@ -24,6 +25,7 @@ #define PHY_ID_BCM57780 0x03625d90 #define PHY_ID_BCM7250 0xae025280 +#define PHY_ID_BCM7278 0xae0251a0 #define PHY_ID_BCM7364 0xae025260 #define PHY_ID_BCM7366 0x600d8490 #define PHY_ID_BCM7346 0x600d8650 @@ -31,6 +33,7 @@ #define PHY_ID_BCM7425 0x600d86b0 #define PHY_ID_BCM7429 0x600d8730 #define PHY_ID_BCM7435 0x600d8750 +#define PHY_ID_BCM74371 0xae0252e0 #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7439_2 0xae025080 #define PHY_ID_BCM7445 0x600d8510 @@ -103,19 +106,17 @@ /* * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18) */ -#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 +#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00 #define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400 #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 -#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 -#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100 -#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 -#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000 -#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007 -#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 -#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8) -#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4) +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100 +#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 +#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 +#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 #define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007 /* diff --git a/include/linux/bug.h b/include/linux/bug.h index baff2e8fc8a8..5828489309bb 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -124,18 +124,20 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, /* * Since detected data corruption should stop operation on the affected - * structures, this returns false if the corruption condition is found. + * structures. Return value must be checked and sanely acted on by caller. */ +static inline __must_check bool check_data_corruption(bool v) { return v; } #define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ - do { \ - if (unlikely(condition)) { \ + check_data_corruption(({ \ + bool corruption = unlikely(condition); \ + if (corruption) { \ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ pr_err(fmt, ##__VA_ARGS__); \ BUG(); \ } else \ WARN(1, fmt, ##__VA_ARGS__); \ - return false; \ } \ - } while (0) + corruption; \ + })) #endif /* _LINUX_BUG_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 5f5270941ba0..141b05aade81 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -38,6 +38,13 @@ struct can_priv { struct can_bittiming bittiming, data_bittiming; const struct can_bittiming_const *bittiming_const, *data_bittiming_const; + const u16 *termination_const; + unsigned int termination_const_cnt; + u16 termination; + const u32 *bitrate_const; + unsigned int bitrate_const_cnt; + const u32 *data_bitrate_const; + unsigned int data_bitrate_const_cnt; struct can_clock clock; enum can_state state; @@ -53,6 +60,7 @@ struct can_priv { int (*do_set_bittiming)(struct net_device *dev); int (*do_set_data_bittiming)(struct net_device *dev); int (*do_set_mode)(struct net_device *dev, enum can_mode mode); + int (*do_set_termination)(struct net_device *dev, u16 term); int (*do_get_state)(const struct net_device *dev, enum can_state *state); int (*do_get_berr_counter)(const struct net_device *dev, diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h new file mode 100644 index 000000000000..cb31683bbe15 --- /dev/null +++ b/include/linux/can/rx-offload.h @@ -0,0 +1,59 @@ +/* + * linux/can/rx-offload.h + * + * Copyright (c) 2014 David Jander, Protonic Holland + * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CAN_RX_OFFLOAD_H +#define _CAN_RX_OFFLOAD_H + +#include <linux/netdevice.h> +#include <linux/can.h> + +struct can_rx_offload { + struct net_device *dev; + + unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf, + u32 *timestamp, unsigned int mb); + + struct sk_buff_head skb_queue; + u32 skb_queue_len_max; + + unsigned int mb_first; + unsigned int mb_last; + + struct napi_struct napi; + + bool inc; +}; + +int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload); +int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); +int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); +int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb); +void can_rx_offload_reset(struct can_rx_offload *offload); +void can_rx_offload_del(struct can_rx_offload *offload); +void can_rx_offload_enable(struct can_rx_offload *offload); + +static inline void can_rx_offload_schedule(struct can_rx_offload *offload) +{ + napi_schedule(&offload->napi); +} + +static inline void can_rx_offload_disable(struct can_rx_offload *offload) +{ + napi_disable(&offload->napi); +} + +#endif /* !_CAN_RX_OFFLOAD_H */ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 1816c5e26581..88cd5dc8e238 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -48,6 +48,7 @@ struct ceph_options { unsigned long mount_timeout; /* jiffies */ unsigned long osd_idle_ttl; /* jiffies */ unsigned long osd_keepalive_timeout; /* jiffies */ + unsigned long osd_request_timeout; /* jiffies */ /* * any type that can't be simply compared or doesn't need need @@ -68,6 +69,7 @@ struct ceph_options { #define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) +#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */ #define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) #define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 03a6653d329a..c125b5d9e13c 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -22,7 +22,6 @@ struct ceph_osd_client; * completion callback for async writepages */ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *); -typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool); #define CEPH_HOMELESS_OSD -1 @@ -170,15 +169,12 @@ struct ceph_osd_request { unsigned int r_num_ops; int r_result; - bool r_got_reply; struct ceph_osd_client *r_osdc; struct kref r_kref; bool r_mempool; - struct completion r_completion; - struct completion r_done_completion; /* fsync waiter */ + struct completion r_completion; /* private to osd_client.c */ ceph_osdc_callback_t r_callback; - ceph_osdc_unsafe_callback_t r_unsafe_callback; struct list_head r_unsafe_item; struct inode *r_inode; /* for use by callbacks */ @@ -193,6 +189,7 @@ struct ceph_osd_request { /* internal */ unsigned long r_stamp; /* jiffies, send or check time */ + unsigned long r_start_stamp; /* jiffies */ int r_attempts; struct ceph_eversion r_replay_version; /* aka reassert_version */ u32 r_last_force_resend; diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index 9a9041784dcf..938656f70807 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -57,7 +57,7 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) case CEPH_POOL_TYPE_EC: return false; default: - BUG_ON(1); + BUG(); } } @@ -82,13 +82,6 @@ void ceph_oloc_copy(struct ceph_object_locator *dest, void ceph_oloc_destroy(struct ceph_object_locator *oloc); /* - * Maximum supported by kernel client object name length - * - * (probably outdated: must be >= RBD_MAX_MD_NAME_LEN -- currently 100) - */ -#define CEPH_MAX_OID_NAME_LEN 100 - -/* * 51-char inline_name is long enough for all cephfs and all but one * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all @@ -173,8 +166,8 @@ struct ceph_osdmap { * the list of osds that store+replicate them. */ struct crush_map *crush; - struct mutex crush_scratch_mutex; - int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3]; + struct mutex crush_workspace_mutex; + void *crush_workspace; }; static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd) diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 5c0da61cb763..5d0018782d50 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -50,7 +50,7 @@ struct ceph_timespec { #define CEPH_PG_LAYOUT_LINEAR 2 #define CEPH_PG_LAYOUT_HYBRID 3 -#define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */ +#define CEPH_PG_MAX_SIZE 32 /* max # osds in a single pg */ /* * placement group. diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 861b4677fc5b..6a3f850cabab 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -148,14 +148,18 @@ struct cgroup_subsys_state { * set for a task. */ struct css_set { - /* Reference count */ - atomic_t refcount; - /* - * List running through all cgroup groups in the same hash - * slot. Protected by css_set_lock + * Set of subsystem states, one for each subsystem. This array is + * immutable after creation apart from the init_css_set during + * subsystem registration (at boot time). */ - struct hlist_node hlist; + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + /* reference count */ + atomic_t refcount; + + /* the default cgroup associated with this css_set */ + struct cgroup *dfl_cgrp; /* * Lists running through all tasks using this cgroup group. @@ -167,21 +171,29 @@ struct css_set { struct list_head tasks; struct list_head mg_tasks; + /* all css_task_iters currently walking this cset */ + struct list_head task_iters; + /* - * List of cgrp_cset_links pointing at cgroups referenced from this - * css_set. Protected by css_set_lock. + * On the default hierarhcy, ->subsys[ssid] may point to a css + * attached to an ancestor instead of the cgroup this css_set is + * associated with. The following node is anchored at + * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to + * iterate through all css's attached to a given cgroup. */ - struct list_head cgrp_links; + struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; - /* the default cgroup associated with this css_set */ - struct cgroup *dfl_cgrp; + /* + * List running through all cgroup groups in the same hash + * slot. Protected by css_set_lock + */ + struct hlist_node hlist; /* - * Set of subsystem states, one for each subsystem. This array is - * immutable after creation apart from the init_css_set during - * subsystem registration (at boot time). + * List of cgrp_cset_links pointing at cgroups referenced from this + * css_set. Protected by css_set_lock. */ - struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + struct list_head cgrp_links; /* * List of csets participating in the on-going migration either as @@ -201,18 +213,6 @@ struct css_set { struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; - /* - * On the default hierarhcy, ->subsys[ssid] may point to a css - * attached to an ancestor instead of the cgroup this css_set is - * associated with. The following node is anchored at - * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to - * iterate through all css's attached to a given cgroup. - */ - struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; - - /* all css_task_iters currently walking this cset */ - struct list_head task_iters; - /* dead and being drained, ignore for migration */ bool dead; @@ -388,6 +388,9 @@ struct cftype { struct list_head node; /* anchored at ss->cfts */ struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + /* * read_u64() is a shortcut for the common case of returning a * single integer. Use it in place of read() @@ -528,8 +531,8 @@ extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups * @tsk: target task * - * Called from threadgroup_change_begin() and allows cgroup operations to - * synchronize against threadgroup changes using a percpu_rw_semaphore. + * Allows cgroup operations to synchronize against threadgroup changes + * using a percpu_rw_semaphore. */ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) { @@ -540,8 +543,7 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups * @tsk: target task * - * Called from threadgroup_change_end(). Counterpart of - * cgroup_threadcgroup_change_begin(). + * Counterpart of cgroup_threadcgroup_change_begin(). */ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) { @@ -552,7 +554,11 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) #define CGROUP_SUBSYS_COUNT 0 -static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + might_sleep(); +} + static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} #endif /* CONFIG_CGROUPS */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c83c23f0577b..f6b43fbb141c 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -266,7 +266,7 @@ void css_task_iter_end(struct css_task_iter *it); * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset * @leader: the loop cursor * @dst_css: the destination css - * @tset: takset to iterate + * @tset: taskset to iterate * * Iterate threadgroup leaders of @tset. For single-task migrations, @tset * may not contain any. diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h new file mode 100644 index 000000000000..e94290b29e99 --- /dev/null +++ b/include/linux/cgroup_rdma.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com> + * + * This file is subject to the terms and conditions of version 2 of the GNU + * General Public License. See the file COPYING in the main directory of the + * Linux distribution for more details. + */ + +#ifndef _CGROUP_RDMA_H +#define _CGROUP_RDMA_H + +#include <linux/cgroup.h> + +enum rdmacg_resource_type { + RDMACG_RESOURCE_HCA_HANDLE, + RDMACG_RESOURCE_HCA_OBJECT, + RDMACG_RESOURCE_MAX, +}; + +#ifdef CONFIG_CGROUP_RDMA + +struct rdma_cgroup { + struct cgroup_subsys_state css; + + /* + * head to keep track of all resource pools + * that belongs to this cgroup. + */ + struct list_head rpools; +}; + +struct rdmacg_device { + struct list_head dev_node; + struct list_head rpools; + char *name; +}; + +/* + * APIs for RDMA/IB stack to publish when a device wants to + * participate in resource accounting + */ +int rdmacg_register_device(struct rdmacg_device *device); +void rdmacg_unregister_device(struct rdmacg_device *device); + +/* APIs for RDMA/IB stack to charge/uncharge pool specific resources */ +int rdmacg_try_charge(struct rdma_cgroup **rdmacg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +void rdmacg_uncharge(struct rdma_cgroup *cg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +#endif /* CONFIG_CGROUP_RDMA */ +#endif /* _CGROUP_RDMA_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 0df0336acee9..d0e597c44585 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -56,6 +56,10 @@ SUBSYS(hugetlb) SUBSYS(pids) #endif +#if IS_ENABLED(CONFIG_CGROUP_RDMA) +SUBSYS(rdma) +#endif + /* * The following subsystems are not supported on the default hierarchy. */ diff --git a/include/linux/cma.h b/include/linux/cma.h index 6f0a91b37f68..03f32d0bd1d8 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -29,6 +29,7 @@ extern int __init cma_declare_contiguous(phys_addr_t base, extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, struct cma **res_cma); -extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); +extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, + gfp_t gfp_mask); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); #endif diff --git a/include/linux/compat.h b/include/linux/compat.h index 9e40be522793..aef47be2a5c1 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -711,8 +711,10 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long); compat_stack_t __user *__uss = uss; \ struct task_struct *t = current; \ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ - put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \ + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + if (t->sas_ss_flags & SS_AUTODISARM) \ + sas_ss_reset(t); \ } while (0); asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 0444b1336268..0efef9cf014f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -116,11 +116,13 @@ */ #define __pure __attribute__((pure)) #define __aligned(x) __attribute__((aligned(x))) +#define __aligned_largest __attribute__((aligned)) #define __printf(a, b) __attribute__((format(printf, a, b))) #define __scanf(a, b) __attribute__((format(scanf, a, b))) #define __attribute_const__ __attribute__((__const__)) #define __maybe_unused __attribute__((unused)) #define __always_unused __attribute__((unused)) +#define __mode(x) __attribute__((mode(x))) /* gcc version specific checks */ @@ -195,6 +197,17 @@ #endif #endif +#ifdef CONFIG_STACK_VALIDATION +#define annotate_unreachable() ({ \ + asm("%c0:\t\n" \ + ".pushsection .discard.unreachable\t\n" \ + ".long %c0b - .\t\n" \ + ".popsection\t\n" : : "i" (__LINE__)); \ +}) +#else +#define annotate_unreachable() +#endif + /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer @@ -204,7 +217,8 @@ * this in the preprocessor, but we can live with this because they're * unreleased. Really, we need to have autoconf for the kernel. */ -#define unreachable() __builtin_unreachable() +#define unreachable() \ + do { annotate_unreachable(); __builtin_unreachable(); } while (0) /* Mark a function definition as prohibited from being cloned. */ #define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index cf0fa5d86059..f8110051188f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -27,7 +27,11 @@ extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) #else /* __CHECKER__ */ -# define __user +# ifdef STRUCTLEAK_PLUGIN +# define __user __attribute__((user)) +# else +# define __user +# endif # define __kernel # define __safe # define __force @@ -101,29 +105,36 @@ struct ftrace_branch_data { }; }; +struct ftrace_likely_data { + struct ftrace_branch_data data; + unsigned long constant; +}; + /* * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code * to disable branch tracing on a per file basis. */ #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) -void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); +void ftrace_likely_update(struct ftrace_likely_data *f, int val, + int expect, int is_constant); #define likely_notrace(x) __builtin_expect(!!(x), 1) #define unlikely_notrace(x) __builtin_expect(!!(x), 0) -#define __branch_check__(x, expect) ({ \ +#define __branch_check__(x, expect, is_constant) ({ \ int ______r; \ - static struct ftrace_branch_data \ + static struct ftrace_likely_data \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_annotated_branch"))) \ ______f = { \ - .func = __func__, \ - .file = __FILE__, \ - .line = __LINE__, \ + .data.func = __func__, \ + .data.file = __FILE__, \ + .data.line = __LINE__, \ }; \ - ______r = likely_notrace(x); \ - ftrace_likely_update(&______f, ______r, expect); \ + ______r = __builtin_expect(!!(x), expect); \ + ftrace_likely_update(&______f, ______r, \ + expect, is_constant); \ ______r; \ }) @@ -133,10 +144,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); * written by Daniel Walker. */ # ifndef likely -# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) +# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) # endif # ifndef unlikely -# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) +# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) # endif #ifdef CONFIG_PROFILE_ALL_BRANCHES @@ -566,12 +577,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s (_________p1); \ }) -/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ -#ifdef CONFIG_KPROBES -# define __kprobes __attribute__((__section__(".kprobes.text"))) -# define nokprobe_inline __always_inline -#else -# define __kprobes -# define nokprobe_inline inline -#endif #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/console.h b/include/linux/console.h index 9c26c6685587..5949d1855589 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -73,6 +73,10 @@ struct consw { u16 *(*con_screen_pos)(struct vc_data *, int); unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *); /* + * Flush the video console driver's scrollback buffer + */ + void (*con_flush_scrollback)(struct vc_data *); + /* * Prepare the console for the debugger. This includes, but is not * limited to, unblanking the console, loading an appropriate * palette, and allowing debugger generated output. diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 21f9c74496e7..f92081234afd 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -30,6 +30,8 @@ struct cpu { extern void boot_cpu_init(void); extern void boot_cpu_state_init(void); +extern void cpu_init(void); +extern void trap_init(void); extern int register_cpu(struct cpu *cpu, int num); extern struct device *get_cpu_device(unsigned cpu); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 921acaaa1601..62d240e962f0 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -26,7 +26,6 @@ enum cpuhp_state { CPUHP_ARM_OMAP_WAKE_DEAD, CPUHP_IRQ_POLL_DEAD, CPUHP_BLOCK_SOFTIRQ_DEAD, - CPUHP_VIRT_SCSI_DEAD, CPUHP_ACPI_CPUDRV_DEAD, CPUHP_S390_PFAULT_DEAD, CPUHP_BLK_MQ_DEAD, @@ -137,6 +136,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, CPUHP_AP_PERF_ARM_L2X0_ONLINE, + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, CPUHP_AP_ONLINE_DYN, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index da346f2817a8..fc1e5d7fc1c7 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -62,6 +62,7 @@ struct cpuidle_state { }; /* Idle State Flags */ +#define CPUIDLE_FLAG_NONE (0x00) #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index bfc204e70338..611fce58d670 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -9,6 +9,8 @@ */ #include <linux/sched.h> +#include <linux/sched/topology.h> +#include <linux/sched/task.h> #include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/mm.h> diff --git a/include/linux/cputime.h b/include/linux/cputime.h deleted file mode 100644 index a691dc4ddc13..000000000000 --- a/include/linux/cputime.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __LINUX_CPUTIME_H -#define __LINUX_CPUTIME_H - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -#include <asm/cputime.h> - -#ifndef cputime_to_nsecs -# define cputime_to_nsecs(__ct) \ - (cputime_to_usecs(__ct) * NSEC_PER_USEC) -#endif - -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ -#endif /* __LINUX_CPUTIME_H */ diff --git a/include/linux/cred.h b/include/linux/cred.h index f0e70a1bb3ac..b03e7d049a64 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -18,8 +18,9 @@ #include <linux/selinux.h> #include <linux/atomic.h> #include <linux/uidgid.h> +#include <linux/sched.h> +#include <linux/sched/user.h> -struct user_struct; struct cred; struct inode; diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index be8f12b8f195..fbecbd089d75 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -135,13 +135,6 @@ struct crush_bucket { __u32 size; /* num items */ __s32 *items; - /* - * cached random permutation: used for uniform bucket and for - * the linear search fallback for the other bucket types. - */ - __u32 perm_x; /* @x for which *perm is defined */ - __u32 perm_n; /* num elements of *perm that are permuted/defined */ - __u32 *perm; }; struct crush_bucket_uniform { @@ -211,6 +204,21 @@ struct crush_map { * device fails. */ __u8 chooseleaf_stable; + /* + * This value is calculated after decode or construction by + * the builder. It is exposed here (rather than having a + * 'build CRUSH working space' function) so that callers can + * reserve a static buffer, allocate space on the stack, or + * otherwise avoid calling into the heap allocator if they + * want to. The size of the working space depends on the map, + * while the size of the scratch vector passed to the mapper + * depends on the size of the desired result set. + * + * Nothing stops the caller from allocating both in one swell + * foop and passing in two points, though. + */ + size_t working_size; + #ifndef __KERNEL__ /* * version 0 (original) of straw_calc has various flaws. version 1 @@ -248,4 +256,23 @@ static inline int crush_calc_tree_node(int i) return ((i+1) << 1)-1; } +/* + * These data structures are private to the CRUSH implementation. They + * are exposed in this header file because builder needs their + * definitions to calculate the total working size. + * + * Moving this out of the crush map allow us to treat the CRUSH map as + * immutable within the mapper and removes the requirement for a CRUSH + * map lock. + */ +struct crush_work_bucket { + __u32 perm_x; /* @x for which *perm is defined */ + __u32 perm_n; /* num elements of *perm that are permuted/defined */ + __u32 *perm; /* Permutation of the bucket's items */ +}; + +struct crush_work { + struct crush_work_bucket **work; /* Per-bucket working store */ +}; + #endif diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h index 5dfd5b1125d2..c95e19e1ff11 100644 --- a/include/linux/crush/mapper.h +++ b/include/linux/crush/mapper.h @@ -15,6 +15,20 @@ extern int crush_do_rule(const struct crush_map *map, int ruleno, int x, int *result, int result_max, const __u32 *weights, int weight_max, - int *scratch); + void *cwin); + +/* + * Returns the exact amount of workspace that will need to be used + * for a given combination of crush_map and result_max. The caller can + * then allocate this much on its own, either on the stack, in a + * per-thread long-lived buffer, or however it likes. + */ +static inline size_t crush_work_size(const struct crush_map *map, + int result_max) +{ + return map->working_size + result_max * 3 * sizeof(__u32); +} + +void crush_init_workspace(const struct crush_map *map, void *v); #endif diff --git a/include/linux/dax.h b/include/linux/dax.h index 24ad71173995..d8a3dc042e1c 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -37,9 +37,9 @@ static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags) } ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, - struct iomap_ops *ops); -int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, - struct iomap_ops *ops); + const struct iomap_ops *ops); +int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, + const struct iomap_ops *ops); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, @@ -71,21 +71,13 @@ static inline unsigned int dax_radix_order(void *entry) return PMD_SHIFT - PAGE_SHIFT; return 0; } -int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmd, unsigned int flags, struct iomap_ops *ops); #else static inline unsigned int dax_radix_order(void *entry) { return 0; } -static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, unsigned int flags, - struct iomap_ops *ops) -{ - return VM_FAULT_FALLBACK; -} #endif -int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); +int dax_pfn_mkwrite(struct vm_fault *vmf); static inline bool vma_is_dax(struct vm_area_struct *vma) { diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c965e4469499..d2e38dc6172c 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -11,6 +11,7 @@ #include <linux/rcupdate.h> #include <linux/lockref.h> #include <linux/stringhash.h> +#include <linux/wait.h> struct path; struct vfsmount; @@ -562,7 +563,7 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper) * @inode: inode to select the dentry from multiple layers (can be NULL) * @flags: open flags to control copy-up behavior * - * If dentry is on an union/overlay, then return the underlying, real dentry. + * If dentry is on a union/overlay, then return the underlying, real dentry. * Otherwise return the dentry itself. * * See also: Documentation/filesystems/vfs.txt @@ -581,7 +582,7 @@ static inline struct dentry *d_real(struct dentry *dentry, * d_real_inode - Return the real inode * @dentry: The dentry to query * - * If dentry is on an union/overlay, then return the underlying, real inode. + * If dentry is on a union/overlay, then return the underlying, real inode. * Otherwise return d_inode(). */ static inline struct inode *d_real_inode(const struct dentry *dentry) diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 61d042bbbf60..68449293c4b6 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -163,6 +163,7 @@ struct dccp_request_sock { __u64 dreq_isr; __u64 dreq_gsr; __be32 dreq_service; + spinlock_t dreq_lock; struct list_head dreq_featneg; __u32 dreq_timestamp_echo; __u32 dreq_timestamp_time; diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index c0befcf41b58..7dff776e6d16 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -52,8 +52,7 @@ extern struct srcu_struct debugfs_srcu; * Must only be called under the protection established by * debugfs_use_file_start(). */ -static inline const struct file_operations * -debugfs_real_fops(const struct file *filp) +static inline const struct file_operations *debugfs_real_fops(const struct file *filp) __must_hold(&debugfs_srcu) { /* @@ -99,9 +98,10 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *dest); +typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, - struct vfsmount *(*f)(void *), + debugfs_automount_t f, void *data); void debugfs_remove(struct dentry *dentry); diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 00e60f79a9cc..4178d2493547 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -18,8 +18,6 @@ #define _LINUX_DELAYACCT_H #include <uapi/linux/taskstats.h> -#include <linux/sched.h> -#include <linux/slab.h> /* * Per-task flags relevant to delay accounting @@ -30,7 +28,43 @@ #define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */ #ifdef CONFIG_TASK_DELAY_ACCT +struct task_delay_info { + spinlock_t lock; + unsigned int flags; /* Private per-task flags */ + + /* For each stat XXX, add following, aligned appropriately + * + * struct timespec XXX_start, XXX_end; + * u64 XXX_delay; + * u32 XXX_count; + * + * Atomicity of updates to XXX_delay, XXX_count protected by + * single lock above (split into XXX_lock if contention is an issue). + */ + + /* + * XXX_count is incremented on every XXX operation, the delay + * associated with the operation is added to XXX_delay. + * XXX_delay contains the accumulated delay time in nanoseconds. + */ + u64 blkio_start; /* Shared by blkio, swapin */ + u64 blkio_delay; /* wait for sync block io completion */ + u64 swapin_delay; /* wait for swapin block io completion */ + u32 blkio_count; /* total count of the number of sync block */ + /* io operations performed */ + u32 swapin_count; /* total count of the number of swapin block */ + /* io operations performed */ + + u64 freepages_start; + u64 freepages_delay; /* wait for memory reclaim */ + u32 freepages_count; /* total count of memory reclaim */ +}; +#endif +#include <linux/sched.h> +#include <linux/slab.h> + +#ifdef CONFIG_TASK_DELAY_ACCT extern int delayacct_on; /* Delay accounting turned on/off */ extern struct kmem_cache *delayacct_cache; extern void delayacct_init(void); diff --git a/include/linux/device.h b/include/linux/device.h index 491b4c0ca633..9ef518af5515 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -88,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *); * * @suspend: Called when a device on this bus wants to go to sleep mode. * @resume: Called to bring a device on this bus out of sleep mode. + * @num_vf: Called to find out how many virtual functions a device on this + * bus supports. * @pm: Power management operations of this bus, callback the specific * device driver's pm-ops. * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU @@ -127,6 +129,8 @@ struct bus_type { int (*suspend)(struct device *dev, pm_message_t state); int (*resume)(struct device *dev); + int (*num_vf)(struct device *dev); + const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; @@ -921,6 +925,7 @@ struct device { #ifdef CONFIG_NUMA int numa_node; /* NUMA node this device is close to */ #endif + const struct dma_map_ops *dma_ops; u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for alloc_coherent mappings as @@ -1140,6 +1145,13 @@ extern int device_online(struct device *dev); extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); +static inline int dev_num_vf(struct device *dev) +{ + if (dev->bus && dev->bus->num_vf) + return dev->bus->num_vf(dev); + return 0; +} + /* * Root device objects for grouping under /sys/devices */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 8daeb3ce0016..bfb3704fc6fc 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -39,23 +39,6 @@ struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf - * @attach: [optional] allows different devices to 'attach' themselves to the - * given buffer. It might return -EBUSY to signal that backing storage - * is already allocated and incompatible with the requirements - * of requesting device. - * @detach: [optional] detach a given device from this buffer. - * @map_dma_buf: returns list of scatter pages allocated, increases usecount - * of the buffer. Requires atleast one attach to be called - * before. Returned sg list should already be mapped into - * _device_ address space. This call may sleep. May also return - * -EINTR. Should return -EINVAL if attach hasn't been called yet. - * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter - * pages. - * @release: release this buffer; to be called after the last dma_buf_put. - * @begin_cpu_access: [optional] called before cpu access to invalidate cpu - * caches and allocate backing storage (if not yet done) - * respectively pin the object into memory. - * @end_cpu_access: [optional] called after cpu access to flush caches. * @kmap_atomic: maps a page from the buffer into kernel address * space, users may not block until the subsequent unmap call. * This callback must not sleep. @@ -63,43 +46,206 @@ struct dma_buf_attachment; * This Callback must not sleep. * @kmap: maps a page from the buffer into kernel address space. * @kunmap: [optional] unmaps a page from the buffer. - * @mmap: used to expose the backing storage to userspace. Note that the - * mapping needs to be coherent - if the exporter doesn't directly - * support this, it needs to fake coherency by shooting down any ptes - * when transitioning away from the cpu domain. * @vmap: [optional] creates a virtual mapping for the buffer into kernel * address space. Same restrictions as for vmap and friends apply. * @vunmap: [optional] unmaps a vmap from the buffer */ struct dma_buf_ops { + /** + * @attach: + * + * This is called from dma_buf_attach() to make sure that a given + * &device can access the provided &dma_buf. Exporters which support + * buffer objects in special locations like VRAM or device-specific + * carveout areas should check whether the buffer could be move to + * system memory (or directly accessed by the provided device), and + * otherwise need to fail the attach operation. + * + * The exporter should also in general check whether the current + * allocation fullfills the DMA constraints of the new device. If this + * is not the case, and the allocation cannot be moved, it should also + * fail the attach operation. + * + * Any exporter-private housekeeping data can be stored in the + * &dma_buf_attachment.priv pointer. + * + * This callback is optional. + * + * Returns: + * + * 0 on success, negative error code on failure. It might return -EBUSY + * to signal that backing storage is already allocated and incompatible + * with the requirements of requesting device. + */ int (*attach)(struct dma_buf *, struct device *, - struct dma_buf_attachment *); + struct dma_buf_attachment *); + /** + * @detach: + * + * This is called by dma_buf_detach() to release a &dma_buf_attachment. + * Provided so that exporters can clean up any housekeeping for an + * &dma_buf_attachment. + * + * This callback is optional. + */ void (*detach)(struct dma_buf *, struct dma_buf_attachment *); - /* For {map,unmap}_dma_buf below, any specific buffer attributes - * required should get added to device_dma_parameters accessible - * via dev->dma_params. + /** + * @map_dma_buf: + * + * This is called by dma_buf_map_attachment() and is used to map a + * shared &dma_buf into device address space, and it is mandatory. It + * can only be called if @attach has been called successfully. This + * essentially pins the DMA buffer into place, and it cannot be moved + * any more + * + * This call may sleep, e.g. when the backing storage first needs to be + * allocated, or moved to a location suitable for all currently attached + * devices. + * + * Note that any specific buffer attributes required for this function + * should get added to device_dma_parameters accessible via + * &device.dma_params from the &dma_buf_attachment. The @attach callback + * should also check these constraints. + * + * If this is being called for the first time, the exporter can now + * choose to scan through the list of attachments for this buffer, + * collate the requirements of the attached devices, and choose an + * appropriate backing storage for the buffer. + * + * Based on enum dma_data_direction, it might be possible to have + * multiple users accessing at the same time (for reading, maybe), or + * any other kind of sharing that the exporter might wish to make + * available to buffer-users. + * + * Returns: + * + * A &sg_table scatter list of or the backing storage of the DMA buffer, + * already mapped into the device address space of the &device attached + * with the provided &dma_buf_attachment. + * + * On failure, returns a negative error value wrapped into a pointer. + * May also return -EINTR when a signal was received while being + * blocked. */ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, - enum dma_data_direction); + enum dma_data_direction); + /** + * @unmap_dma_buf: + * + * This is called by dma_buf_unmap_attachment() and should unmap and + * release the &sg_table allocated in @map_dma_buf, and it is mandatory. + * It should also unpin the backing storage if this is the last mapping + * of the DMA buffer, it the exporter supports backing storage + * migration. + */ void (*unmap_dma_buf)(struct dma_buf_attachment *, - struct sg_table *, - enum dma_data_direction); + struct sg_table *, + enum dma_data_direction); + /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY * if the call would block. */ - /* after final dma_buf_put() */ + /** + * @release: + * + * Called after the last dma_buf_put to release the &dma_buf, and + * mandatory. + */ void (*release)(struct dma_buf *); + /** + * @begin_cpu_access: + * + * This is called from dma_buf_begin_cpu_access() and allows the + * exporter to ensure that the memory is actually available for cpu + * access - the exporter might need to allocate or swap-in and pin the + * backing storage. The exporter also needs to ensure that cpu access is + * coherent for the access direction. The direction can be used by the + * exporter to optimize the cache flushing, i.e. access with a different + * direction (read instead of write) might return stale or even bogus + * data (e.g. when the exporter needs to copy the data to temporary + * storage). + * + * This callback is optional. + * + * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command + * from userspace (where storage shouldn't be pinned to avoid handing + * de-factor mlock rights to userspace) and for the kernel-internal + * users of the various kmap interfaces, where the backing storage must + * be pinned to guarantee that the atomic kmap calls can succeed. Since + * there's no in-kernel users of the kmap interfaces yet this isn't a + * real problem. + * + * Returns: + * + * 0 on success or a negative error code on failure. This can for + * example fail when the backing storage can't be allocated. Can also + * return -ERESTARTSYS or -EINTR when the call has been interrupted and + * needs to be restarted. + */ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + + /** + * @end_cpu_access: + * + * This is called from dma_buf_end_cpu_access() when the importer is + * done accessing the CPU. The exporter can use this to flush caches and + * unpin any resources pinned in @begin_cpu_access. + * The result of any dma_buf kmap calls after end_cpu_access is + * undefined. + * + * This callback is optional. + * + * Returns: + * + * 0 on success or a negative error code on failure. Can return + * -ERESTARTSYS or -EINTR when the call has been interrupted and needs + * to be restarted. + */ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); void *(*kmap_atomic)(struct dma_buf *, unsigned long); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void *(*kmap)(struct dma_buf *, unsigned long); void (*kunmap)(struct dma_buf *, unsigned long, void *); + /** + * @mmap: + * + * This callback is used by the dma_buf_mmap() function + * + * Note that the mapping needs to be incoherent, userspace is expected + * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. + * + * Because dma-buf buffers have invariant size over their lifetime, the + * dma-buf core checks whether a vma is too large and rejects such + * mappings. The exporter hence does not need to duplicate this check. + * Drivers do not need to check this themselves. + * + * If an exporter needs to manually flush caches and hence needs to fake + * coherency for mmap support, it needs to be able to zap all the ptes + * pointing at the backing storage. Now linux mm needs a struct + * address_space associated with the struct file stored in vma->vm_file + * to do that with the function unmap_mapping_range. But the dma_buf + * framework only backs every dma_buf fd with the anon_file struct file, + * i.e. all dma_bufs share the same file. + * + * Hence exporters need to setup their own file (and address_space) + * association by setting vma->vm_file and adjusting vma->vm_pgoff in + * the dma_buf mmap callback. In the specific case of a gem driver the + * exporter could use the shmem file already provided by gem (and set + * vm_pgoff = 0). Exporters can then zap ptes by unmapping the + * corresponding range of the struct address_space associated with their + * own file. + * + * This callback is optional. + * + * Returns: + * + * 0 on success or a negative error code on failure. + */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); void *(*vmap)(struct dma_buf *); @@ -124,6 +270,15 @@ struct dma_buf_ops { * @poll: for userspace poll support * @cb_excl: for userspace poll support * @cb_shared: for userspace poll support + * + * This represents a shared buffer, created by calling dma_buf_export(). The + * userspace representation is a normal file descriptor, which can be created by + * calling dma_buf_fd(). + * + * Shared dma buffers are reference counted using dma_buf_put() and + * get_dma_buf(). + * + * Device DMA access is handled by the separate &struct dma_buf_attachment. */ struct dma_buf { size_t size; @@ -160,6 +315,11 @@ struct dma_buf { * This structure holds the attachment information between the dma_buf buffer * and its user device(s). The list contains one attachment struct per device * attached to the buffer. + * + * An attachment is created by calling dma_buf_attach(), and released again by + * calling dma_buf_detach(). The DMA mapping itself needed to initiate a + * transfer is created by dma_buf_map_attachment() and freed again by calling + * dma_buf_unmap_attachment(). */ struct dma_buf_attachment { struct dma_buf *dmabuf; @@ -192,9 +352,11 @@ struct dma_buf_export_info { }; /** - * helper macro for exporters; zeros and fills in most common values - * + * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters * @name: export-info name + * + * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, + * zeroes it out and pre-populates exp_name in it. */ #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index fec734df1524..b67bf6ac907d 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -112,7 +112,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, } struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, - unsigned int order); + unsigned int order, gfp_t gfp_mask); bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count); @@ -145,7 +145,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, static inline struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, - unsigned int order) + unsigned int order, gfp_t gfp_mask) { return NULL; } diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index d51a7d23c358..6048fa404e57 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -47,7 +47,7 @@ struct dma_fence_cb; * can be compared to decide which fence would be signaled later. * @flags: A mask of DMA_FENCE_FLAG_* defined below * @timestamp: Timestamp when the fence was signaled. - * @status: Optional, only valid if < 0, must be set before calling + * @error: Optional, only valid if < 0, must be set before calling * dma_fence_signal, indicates that the fence has completed with an error. * * the flags member must be manipulated and read using the appropriate @@ -79,7 +79,7 @@ struct dma_fence { unsigned seqno; unsigned long flags; ktime_t timestamp; - int status; + int error; }; enum dma_fence_flag_bits { @@ -133,7 +133,7 @@ struct dma_fence_cb { * or some failure occurred that made it impossible to enable * signaling. True indicates successful enabling. * - * fence->status may be set in enable_signaling, but only when false is + * fence->error may be set in enable_signaling, but only when false is * returned. * * Calling dma_fence_signal before enable_signaling is called allows @@ -145,7 +145,7 @@ struct dma_fence_cb { * the second time will be a noop since it was already signaled. * * Notes on signaled: - * May set fence->status if returning true. + * May set fence->error if returning true. * * Notes on wait: * Must not be NULL, set to dma_fence_default_wait for default implementation. @@ -378,6 +378,50 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, return dma_fence_is_signaled(f2) ? NULL : f2; } +/** + * dma_fence_get_status_locked - returns the status upon completion + * @fence: [in] the dma_fence to query + * + * Drivers can supply an optional error status condition before they signal + * the fence (to indicate whether the fence was completed due to an error + * rather than success). The value of the status condition is only valid + * if the fence has been signaled, dma_fence_get_status_locked() first checks + * the signal state before reporting the error status. + * + * Returns 0 if the fence has not yet been signaled, 1 if the fence has + * been signaled without an error condition, or a negative error code + * if the fence has been completed in err. + */ +static inline int dma_fence_get_status_locked(struct dma_fence *fence) +{ + if (dma_fence_is_signaled_locked(fence)) + return fence->error ?: 1; + else + return 0; +} + +int dma_fence_get_status(struct dma_fence *fence); + +/** + * dma_fence_set_error - flag an error condition on the fence + * @fence: [in] the dma_fence + * @error: [in] the error to store + * + * Drivers can supply an optional error status condition before they signal + * the fence, to indicate that the fence was completed due to an error + * rather than success. This must be set before signaling (so that the value + * is visible before any waiters on the signal callback are woken). This + * helper exists to help catching erroneous setting of #dma_fence.error. + */ +static inline void dma_fence_set_error(struct dma_fence *fence, + int error) +{ + BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); + BUG_ON(error >= 0 || error < -MAX_ERRNO); + + fence->error = error; +} + signed long dma_fence_wait_timeout(struct dma_fence *, bool intr, signed long timeout); signed long dma_fence_wait_any_timeout(struct dma_fence **fences, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index c24721a33b4c..0977317c6835 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -134,7 +134,8 @@ struct dma_map_ops { int is_phys; }; -extern struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) @@ -171,14 +172,26 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, #ifdef CONFIG_HAS_DMA #include <asm/dma-mapping.h> +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (dev && dev->dma_ops) + return dev->dma_ops; + return get_arch_dma_ops(dev ? dev->bus : NULL); +} + +static inline void set_dma_ops(struct device *dev, + const struct dma_map_ops *dma_ops) +{ + dev->dma_ops = dma_ops; +} #else /* * Define the dma api to allow compilation but not linking of * dma dependent code. Code that depends on the dma-mapping * API needs to set 'depends on HAS_DMA' in its Kconfig */ -extern struct dma_map_ops bad_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +extern const struct dma_map_ops bad_dma_ops; +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { return &bad_dma_ops; } @@ -189,7 +202,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(ptr, size); @@ -208,7 +221,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) @@ -224,7 +237,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); int i, ents; struct scatterlist *s; @@ -242,7 +255,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); debug_dma_unmap_sg(dev, sg, nents, dir); @@ -256,7 +269,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(page_address(page) + offset, size); @@ -272,7 +285,7 @@ static inline void dma_unmap_page_attrs(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) @@ -286,7 +299,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); @@ -307,7 +320,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_resource) @@ -319,7 +332,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_cpu) @@ -331,7 +344,7 @@ static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_device) @@ -371,7 +384,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_cpu) @@ -383,7 +396,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_device) @@ -428,7 +441,7 @@ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); if (ops->mmap) return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); @@ -446,7 +459,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); if (ops->get_sgtable) return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, @@ -464,7 +477,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *cpu_addr; BUG_ON(!ops); @@ -486,7 +499,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); WARN_ON(irqs_disabled()); @@ -544,7 +557,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #ifndef HAVE_ARCH_DMA_SUPPORTED static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (!ops) return 0; @@ -557,7 +570,7 @@ static inline int dma_supported(struct device *dev, u64 mask) #ifndef HAVE_ARCH_DMA_SET_MASK static inline int dma_set_mask(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->set_dma_mask) return ops->set_dma_mask(dev, mask); diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e9bc9292bd3a..e8ffba1052d3 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -26,7 +26,7 @@ #include <linux/msi.h> #include <linux/irqreturn.h> #include <linux/rwsem.h> -#include <linux/rcupdate.h> +#include <linux/rculist.h> struct acpi_dmar_header; diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 698d51a0eea3..c8240a12c42d 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -3,6 +3,8 @@ #include <linux/user.h> #include <linux/bug.h> +#include <linux/sched/task_stack.h> + #include <asm/elf.h> #include <uapi/linux/elfcore.h> diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 6fec9e81bd70..c62b709b1ce0 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -54,6 +54,11 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) +struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, + unsigned int txqs, + unsigned int rxqs); +#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1) + struct sk_buff **eth_gro_receive(struct sk_buff **head, struct sk_buff *skb); int eth_gro_complete(struct sk_buff *skb, int nhoff); @@ -397,6 +402,66 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2, } /** + * ether_addr_to_u64 - Convert an Ethernet address into a u64 value. + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return a u64 value of the address + */ +static inline u64 ether_addr_to_u64(const u8 *addr) +{ + u64 u = 0; + int i; + + for (i = 0; i < ETH_ALEN; i++) + u = u << 8 | addr[i]; + + return u; +} + +/** + * u64_to_ether_addr - Convert a u64 to an Ethernet address. + * @u: u64 to convert to an Ethernet MAC address + * @addr: Pointer to a six-byte array to contain the Ethernet address + */ +static inline void u64_to_ether_addr(u64 u, u8 *addr) +{ + int i; + + for (i = ETH_ALEN - 1; i >= 0; i--) { + addr[i] = u & 0xff; + u = u >> 8; + } +} + +/** + * eth_addr_dec - Decrement the given MAC address + * + * @addr: Pointer to a six-byte array containing Ethernet address to decrement + */ +static inline void eth_addr_dec(u8 *addr) +{ + u64 u = ether_addr_to_u64(addr); + + u--; + u64_to_ether_addr(u, addr); +} + +/** + * ether_addr_greater - Compare two Ethernet addresses + * @addr1: Pointer to a six-byte array containing the Ethernet address + * @addr2: Pointer other six-byte array containing the Ethernet address + * + * Compare two Ethernet addresses, returns true addr1 is greater than addr2 + */ +static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2) +{ + u64 u1 = ether_addr_to_u64(addr1); + u64 u2 = ether_addr_to_u64(addr2); + + return u1 > u2; +} + +/** * is_etherdev_addr - Tell if given Ethernet address belongs to the device. * @dev: Pointer to a device structure * @addr: Pointer to a six-byte array containing the Ethernet address diff --git a/include/linux/extcon.h b/include/linux/extcon.h index b871c0cb1f02..7010fb01a81a 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -46,7 +46,18 @@ #define EXTCON_USB 1 #define EXTCON_USB_HOST 2 -/* Charging external connector */ +/* + * Charging external connector + * + * When one SDP charger connector was reported, we should also report + * the USB connector, which means EXTCON_CHG_USB_SDP should always + * appear together with EXTCON_USB. The same as ACA charger connector, + * EXTCON_CHG_USB_ACA would normally appear with EXTCON_USB_HOST. + * + * The EXTCON_CHG_USB_SLOW connector can provide at least 500mA of + * current at 5V. The EXTCON_CHG_USB_FAST connector can provide at + * least 1A of current at 5V. + */ #define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */ #define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */ #define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */ @@ -54,6 +65,7 @@ #define EXTCON_CHG_USB_FAST 9 #define EXTCON_CHG_USB_SLOW 10 #define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */ +#define EXTCON_CHG_USB_PD 12 /* USB Power Delivery */ /* Jack external connector */ #define EXTCON_JACK_MICROPHONE 20 @@ -160,62 +172,7 @@ union extcon_property_value { }; struct extcon_cable; - -/** - * struct extcon_dev - An extcon device represents one external connector. - * @name: The name of this extcon device. Parent device name is - * used if NULL. - * @supported_cable: Array of supported cable names ending with EXTCON_NONE. - * If supported_cable is NULL, cable name related APIs - * are disabled. - * @mutually_exclusive: Array of mutually exclusive set of cables that cannot - * be attached simultaneously. The array should be - * ending with NULL or be NULL (no mutually exclusive - * cables). For example, if it is { 0x7, 0x30, 0}, then, - * {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot - * be attached simulataneously. {0x7, 0} is equivalent to - * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there - * can be no simultaneous connections. - * @dev: Device of this extcon. - * @state: Attach/detach state of this extcon. Do not provide at - * register-time. - * @nh: Notifier for the state change events from this extcon - * @entry: To support list of extcon devices so that users can - * search for extcon devices based on the extcon name. - * @lock: - * @max_supported: Internal value to store the number of cables. - * @extcon_dev_type: Device_type struct to provide attribute_groups - * customized for each extcon device. - * @cables: Sysfs subdirectories. Each represents one cable. - * - * In most cases, users only need to provide "User initializing data" of - * this struct when registering an extcon. In some exceptional cases, - * optional callbacks may be needed. However, the values in "internal data" - * are overwritten by register function. - */ -struct extcon_dev { - /* Optional user initializing data */ - const char *name; - const unsigned int *supported_cable; - const u32 *mutually_exclusive; - - /* Internal data. Please do not set. */ - struct device dev; - struct raw_notifier_head *nh; - struct list_head entry; - int max_supported; - spinlock_t lock; /* could be called by irq handler */ - u32 state; - - /* /sys/class/extcon/.../cable.n/... */ - struct device_type extcon_dev_type; - struct extcon_cable *cables; - - /* /sys/class/extcon/.../mutually_exclusive/... */ - struct attribute_group attr_g_muex; - struct attribute **attrs_muex; - struct device_attribute *d_attrs_muex; -}; +struct extcon_dev; #if IS_ENABLED(CONFIG_EXTCON) diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h index a0e03b13b449..2aa32075bca1 100644 --- a/include/linux/extcon/extcon-adc-jack.h +++ b/include/linux/extcon/extcon-adc-jack.h @@ -59,7 +59,7 @@ struct adc_jack_pdata { const char *name; const char *consumer_channel; - const enum extcon *cable_names; + const unsigned int *cable_names; /* The last entry's state should be 0 */ struct adc_jack_cond *adc_conditions; diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index cea41a124a80..e2d239ed4c60 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -36,6 +36,12 @@ #define F2FS_NODE_INO(sbi) (sbi->node_ino_num) #define F2FS_META_INO(sbi) (sbi->meta_ino_num) +#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */ +#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */ +#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */ +#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */ +#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) + /* This flag is used by node and meta inodes, and by recovery */ #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) #define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) @@ -108,6 +114,7 @@ struct f2fs_super_block { /* * For checkpoint */ +#define CP_NAT_BITS_FLAG 0x00000080 #define CP_CRC_RECOVERY_FLAG 0x00000040 #define CP_FASTBOOT_FLAG 0x00000020 #define CP_FSCK_FLAG 0x00000010 @@ -272,6 +279,7 @@ struct f2fs_node { * For NAT entries */ #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) +#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8) struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 9f4956d8601c..728d4e0292aa 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -61,6 +61,8 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name, #endif /* CONFIG_FAULT_INJECTION */ +struct kmem_cache; + #ifdef CONFIG_FAILSLAB extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags); #else diff --git a/include/linux/filter.h b/include/linux/filter.h index e4eb2546339a..fbf7b39e8103 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -54,6 +54,12 @@ struct bpf_prog_aux; #define BPF_REG_AX MAX_BPF_REG #define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) +/* As per nm, we expose JITed images as text (code) section for + * kallsyms. That way, tools like perf can find it to match + * addresses. + */ +#define BPF_SYM_ELF_TYPE 't' + /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 @@ -403,6 +409,7 @@ struct bpf_prog { u16 pages; /* Number of allocated pages */ kmemcheck_bitfield_begin(meta); u16 jited:1, /* Is our filter JIT'ed? */ + locked:1, /* Program image locked? */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ @@ -545,15 +552,32 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) -#ifdef CONFIG_DEBUG_SET_MODULE_RONX +#ifdef CONFIG_ARCH_HAS_SET_MEMORY static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { - set_memory_ro((unsigned long)fp, fp->pages); + fp->locked = 1; + WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages)); } static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { - set_memory_rw((unsigned long)fp, fp->pages); + if (fp->locked) { + WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages)); + /* In case set_memory_rw() fails, we want to be the first + * to crash here instead of some random place later on. + */ + fp->locked = 0; + } +} + +static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) +{ + WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages)); +} + +static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) +{ + WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages)); } #else static inline void bpf_prog_lock_ro(struct bpf_prog *fp) @@ -563,7 +587,24 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp) static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { } -#endif /* CONFIG_DEBUG_SET_MODULE_RONX */ + +static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) +{ +} + +static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) +{ +} +#endif /* CONFIG_ARCH_HAS_SET_MEMORY */ + +static inline struct bpf_binary_header * +bpf_jit_binary_hdr(const struct bpf_prog *fp) +{ + unsigned long real_start = (unsigned long)fp->bpf_func; + unsigned long addr = real_start & PAGE_MASK; + + return (void *)addr; +} int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); static inline int sk_filter(struct sock *sk, struct sk_buff *skb) @@ -607,6 +648,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); +void bpf_jit_compile(struct bpf_prog *prog); bool bpf_helper_changes_pkt_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, @@ -616,6 +658,7 @@ void bpf_warn_invalid_xdp_action(u32 act); #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; extern int bpf_jit_harden; +extern int bpf_jit_kallsyms; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); @@ -625,7 +668,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, bpf_jit_fill_hole_t bpf_fill_ill_insns); void bpf_jit_binary_free(struct bpf_binary_header *hdr); -void bpf_jit_compile(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp); struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); @@ -651,6 +693,11 @@ static inline bool bpf_jit_is_ebpf(void) # endif } +static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) +{ + return fp->jited && bpf_jit_is_ebpf(); +} + static inline bool bpf_jit_blinding_enabled(void) { /* These are the prerequisites, should someone ever have the @@ -668,15 +715,91 @@ static inline bool bpf_jit_blinding_enabled(void) return true; } -#else -static inline void bpf_jit_compile(struct bpf_prog *fp) + +static inline bool bpf_jit_kallsyms_enabled(void) +{ + /* There are a couple of corner cases where kallsyms should + * not be enabled f.e. on hardening. + */ + if (bpf_jit_harden) + return false; + if (!bpf_jit_kallsyms) + return false; + if (bpf_jit_kallsyms == 1) + return true; + + return false; +} + +const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char *sym); +bool is_bpf_text_address(unsigned long addr); +int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym); + +static inline const char * +bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym) +{ + const char *ret = __bpf_address_lookup(addr, size, off, sym); + + if (ret && modname) + *modname = NULL; + return ret; +} + +void bpf_prog_kallsyms_add(struct bpf_prog *fp); +void bpf_prog_kallsyms_del(struct bpf_prog *fp); + +#else /* CONFIG_BPF_JIT */ + +static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) { + return false; } static inline void bpf_jit_free(struct bpf_prog *fp) { bpf_prog_unlock_free(fp); } + +static inline bool bpf_jit_kallsyms_enabled(void) +{ + return false; +} + +static inline const char * +__bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char *sym) +{ + return NULL; +} + +static inline bool is_bpf_text_address(unsigned long addr) +{ + return false; +} + +static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *sym) +{ + return -ERANGE; +} + +static inline const char * +bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym) +{ + return NULL; +} + +static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) +{ +} + +static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) +{ +} #endif /* CONFIG_BPF_JIT */ #define BPF_ANC BIT(15) diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 16551d5eac36..57beb5d09bfc 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -22,6 +22,7 @@ #define _LINUX_FPGA_MGR_H struct fpga_manager; +struct sg_table; /** * enum fpga_mgr_states - fpga framework states @@ -88,6 +89,7 @@ struct fpga_image_info { * @state: returns an enum value of the FPGA's state * @write_init: prepare the FPGA to receive confuration data * @write: write count bytes of configuration data to the FPGA + * @write_sg: write the scatter list of configuration data to the FPGA * @write_complete: set FPGA to operating state after writing is done * @fpga_remove: optional: Set FPGA into a specific state during driver remove * @@ -102,6 +104,7 @@ struct fpga_manager_ops { struct fpga_image_info *info, const char *buf, size_t count); int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); + int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt); int (*write_complete)(struct fpga_manager *mgr, struct fpga_image_info *info); void (*fpga_remove)(struct fpga_manager *mgr); @@ -129,6 +132,8 @@ struct fpga_manager { int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, const char *buf, size_t count); +int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info, + struct sg_table *sgt); int fpga_mgr_firmware_load(struct fpga_manager *mgr, struct fpga_image_info *info, diff --git a/include/linux/frame.h b/include/linux/frame.h index e6baaba3f1ae..d772c61c31da 100644 --- a/include/linux/frame.h +++ b/include/linux/frame.h @@ -11,7 +11,7 @@ * For more information, see tools/objtool/Documentation/stack-validation.txt. */ #define STACK_FRAME_NON_STANDARD(func) \ - static void __used __section(__func_stack_frame_non_standard) \ + static void __used __section(.discard.func_stack_frame_non_standard) \ *__func_stack_frame_non_standard_##func = func #else /* !CONFIG_STACK_VALIDATION */ diff --git a/include/linux/fs.h b/include/linux/fs.h index c930cbc19342..7251f7bb45e8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -655,6 +655,11 @@ struct inode { void *i_private; /* fs or device private pointer */ }; +static inline unsigned int i_blocksize(const struct inode *node) +{ + return (1 << node->i_blkbits); +} + static inline int inode_unhashed(struct inode *inode) { return hlist_unhashed(&inode->i_hash); @@ -1562,6 +1567,9 @@ extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); extern int vfs_whiteout(struct inode *, struct dentry *); +extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, + int open_flag); + /* * VFS file helper functions. */ @@ -1701,7 +1709,7 @@ struct inode_operations { int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); + int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); @@ -1713,6 +1721,29 @@ struct inode_operations { int (*set_acl)(struct inode *, struct posix_acl *, int); } ____cacheline_aligned; +static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->read_iter(kio, iter); +} + +static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->write_iter(kio, iter); +} + +static inline int call_mmap(struct file *file, struct vm_area_struct *vma) +{ + return file->f_op->mmap(file, vma); +} + +static inline int call_fsync(struct file *file, loff_t start, loff_t end, + int datasync) +{ + return file->f_op->fsync(file, start, end, datasync); +} + ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, @@ -1739,19 +1770,6 @@ extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, extern int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same); -static inline int do_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - u64 len) -{ - int ret; - - sb_start_write(file_inode(file_out)->i_sb); - ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len); - sb_end_write(file_inode(file_out)->i_sb); - - return ret; -} - struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); @@ -2563,6 +2581,19 @@ static inline void file_end_write(struct file *file) __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); } +static inline int do_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + u64 len) +{ + int ret; + + file_start_write(file_out); + ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len); + file_end_write(file_out); + + return ret; +} + /* * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. @@ -2647,7 +2678,7 @@ static const char * const kernel_read_file_str[] = { static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) { - if (id < 0 || id >= READING_MAX_ID) + if ((unsigned)id >= READING_MAX_ID) return kernel_read_file_str[READING_UNKNOWN]; return kernel_read_file_str[id]; @@ -2871,8 +2902,8 @@ extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); extern void generic_fillattr(struct inode *, struct kstat *); -int vfs_getattr_nosec(struct path *path, struct kstat *stat); -extern int vfs_getattr(struct path *, struct kstat *); +extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); +extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); void __inode_add_bytes(struct inode *inode, loff_t bytes); void inode_add_bytes(struct inode *inode, loff_t bytes); void __inode_sub_bytes(struct inode *inode, loff_t bytes); @@ -2885,10 +2916,29 @@ extern const struct inode_operations simple_symlink_inode_operations; extern int iterate_dir(struct file *, struct dir_context *); -extern int vfs_stat(const char __user *, struct kstat *); -extern int vfs_lstat(const char __user *, struct kstat *); -extern int vfs_fstat(unsigned int, struct kstat *); -extern int vfs_fstatat(int , const char __user *, struct kstat *, int); +extern int vfs_statx(int, const char __user *, int, struct kstat *, u32); +extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int); + +static inline int vfs_stat(const char __user *filename, struct kstat *stat) +{ + return vfs_statx(AT_FDCWD, filename, 0, stat, STATX_BASIC_STATS); +} +static inline int vfs_lstat(const char __user *name, struct kstat *stat) +{ + return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW, + stat, STATX_BASIC_STATS); +} +static inline int vfs_fstatat(int dfd, const char __user *filename, + struct kstat *stat, int flags) +{ + return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS); +} +static inline int vfs_fstat(int fd, struct kstat *stat) +{ + return vfs_statx_fd(fd, stat, STATX_BASIC_STATS, 0); +} + + extern const char *vfs_get_link(struct dentry *, struct delayed_call *); extern int vfs_readlink(struct dentry *, char __user *, int); @@ -2918,7 +2968,7 @@ extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); extern int dcache_readdir(struct file *, struct dir_context *); extern int simple_setattr(struct dentry *, struct iattr *); -extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int simple_statfs(struct dentry *, struct kstatfs *); extern int simple_open(struct inode *inode, struct file *file); extern int simple_link(struct dentry *, struct inode *, struct dentry *); diff --git a/include/linux/fsi.h b/include/linux/fsi.h new file mode 100644 index 000000000000..273cbf6400ea --- /dev/null +++ b/include/linux/fsi.h @@ -0,0 +1,50 @@ +/* FSI device & driver interfaces + * + * Copyright (C) IBM Corporation 2016 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef LINUX_FSI_H +#define LINUX_FSI_H + +#include <linux/device.h> + +struct fsi_device { + struct device dev; + u8 engine_type; + u8 version; +}; + +struct fsi_device_id { + u8 engine_type; + u8 version; +}; + +#define FSI_VERSION_ANY 0 + +#define FSI_DEVICE(t) \ + .engine_type = (t), .version = FSI_VERSION_ANY, + +#define FSI_DEVICE_VERSIONED(t, v) \ + .engine_type = (t), .version = (v), + + +struct fsi_driver { + struct device_driver drv; + const struct fsi_device_id *id_table; +}; + +#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev) +#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv) + +extern struct bus_type fsi_bus_type; + +#endif /* LINUX_FSI_H */ diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h index a1e8277120c7..c46eab5bc893 100644 --- a/include/linux/fsl-diu-fb.h +++ b/include/linux/fsl-diu-fb.h @@ -73,7 +73,7 @@ struct diu_ad { /* Word 0(32-bit) in DDR memory */ /* __u16 comp; */ /* __u16 pixel_s:2; */ -/* __u16 pallete:1; */ +/* __u16 palette:1; */ /* __u16 red_c:2; */ /* __u16 green_c:2; */ /* __u16 blue_c:2; */ @@ -142,7 +142,7 @@ struct diu_ad { struct diu { __be32 desc[3]; __be32 gamma; - __be32 pallete; + __be32 palette; __be32 cursor; __be32 curs_pos; __be32 diu_mode; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 487246546ebe..e6e689b5569e 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -16,6 +16,7 @@ #include <linux/spinlock.h> #include <linux/types.h> #include <linux/atomic.h> +#include <linux/user_namespace.h> /* * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily @@ -170,7 +171,7 @@ struct fsnotify_group { struct inotify_group_private_data { spinlock_t idr_lock; struct idr idr; - struct user_struct *user; + struct ucounts *ucounts; } inotify_data; #endif #ifdef CONFIG_FANOTIFY diff --git a/include/linux/genhd.h b/include/linux/genhd.h index a999d281a2f1..76f39754e7b0 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -167,13 +167,6 @@ struct blk_integrity { }; #endif /* CONFIG_BLK_DEV_INTEGRITY */ -struct disk_devt { - atomic_t count; - void (*release)(struct disk_devt *disk_devt); -}; - -void put_disk_devt(struct disk_devt *disk_devt); -void get_disk_devt(struct disk_devt *disk_devt); struct gendisk { /* major, first_minor and minors are input parameters only, @@ -183,7 +176,6 @@ struct gendisk { int first_minor; int minors; /* maximum number of minors, =1 for * disks that can't be partitioned. */ - struct disk_devt *disk_devt; char disk_name[DISK_NAME_LEN]; /* name of major driver */ char *(*devnode)(struct gendisk *gd, umode_t *mode); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0fe0b6295ab5..db373b9d3223 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -541,7 +541,7 @@ static inline bool pm_suspended_storage(void) #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, - unsigned migratetype); + unsigned migratetype, gfp_t gfp_mask); extern void free_contig_range(unsigned long pfn, unsigned nr_pages); #endif diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index fb0fde686cb1..933d93656605 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -135,10 +135,15 @@ int desc_to_gpio(const struct gpio_desc *desc); struct fwnode_handle; struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, - const char *propname); -struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, - const char *con_id, - struct fwnode_handle *child); + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); +struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, + const char *con_id, int index, + struct fwnode_handle *child, + enum gpiod_flags flags, + const char *label); + #else /* CONFIG_GPIOLIB */ static inline int gpiod_count(struct device *dev, const char *con_id) @@ -411,20 +416,38 @@ static inline int desc_to_gpio(const struct gpio_desc *desc) /* Child properties interface */ struct fwnode_handle; -static inline struct gpio_desc *fwnode_get_named_gpiod( - struct fwnode_handle *fwnode, const char *propname) +static inline +struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) { return ERR_PTR(-ENOSYS); } -static inline struct gpio_desc *devm_get_gpiod_from_child( - struct device *dev, const char *con_id, struct fwnode_handle *child) +static inline +struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, + const char *con_id, int index, + struct fwnode_handle *child, + enum gpiod_flags flags, + const char *label) { return ERR_PTR(-ENOSYS); } #endif /* CONFIG_GPIOLIB */ +static inline +struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev, + const char *con_id, + struct fwnode_handle *child, + enum gpiod_flags flags, + const char *label) +{ + return devm_fwnode_get_index_gpiod_from_child(dev, con_id, 0, child, + flags, label); +} + #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) int gpiod_export(struct gpio_desc *desc, bool direction_may_change); diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index dd85f3503410..7ef111d3ecc5 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -232,6 +232,7 @@ struct hid_sensor_common { atomic_t data_ready; atomic_t user_requested_state; struct iio_trigger *trigger; + int timestamp_ns_scale; struct hid_sensor_hub_attribute_info poll; struct hid_sensor_hub_attribute_info report_state; struct hid_sensor_hub_attribute_info power_state; @@ -271,4 +272,7 @@ int hid_sensor_format_scale(u32 usage_id, s32 hid_sensor_read_poll_value(struct hid_sensor_common *st); +int64_t hid_sensor_convert_timestamp(struct hid_sensor_common *st, + int64_t raw_value); + #endif diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h index f2ee90aed0c2..30c7dc45e45f 100644 --- a/include/linux/hid-sensor-ids.h +++ b/include/linux/hid-sensor-ids.h @@ -52,6 +52,9 @@ #define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458 #define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459 +/* Gravity vector */ +#define HID_USAGE_SENSOR_GRAVITY_VECTOR 0x20007B + /* ORIENTATION: Compass 3D: (200083) */ #define HID_USAGE_SENSOR_COMPASS_3D 0x200083 #define HID_USAGE_SENSOR_DATA_ORIENTATION 0x200470 @@ -95,6 +98,7 @@ #define HID_USAGE_SENSOR_TIME_HOUR 0x200525 #define HID_USAGE_SENSOR_TIME_MINUTE 0x200526 #define HID_USAGE_SENSOR_TIME_SECOND 0x200527 +#define HID_USAGE_SENSOR_TIME_TIMESTAMP 0x200529 /* Units */ #define HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED 0x00 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index e52b427223ba..249e579ecd4c 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -19,7 +19,6 @@ #include <linux/ktime.h> #include <linux/init.h> #include <linux/list.h> -#include <linux/wait.h> #include <linux/percpu.h> #include <linux/timer.h> #include <linux/timerqueue.h> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 97e478d6b690..a3762d49ba39 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -6,6 +6,18 @@ extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma); extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); +extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pud_t *dst_pud, pud_t *src_pud, unsigned long addr, + struct vm_area_struct *vma); + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); +#else +static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) +{ +} +#endif + extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, @@ -17,6 +29,9 @@ extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, extern int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr); +extern int zap_huge_pud(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pud_t *pud, unsigned long addr); extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec); @@ -26,13 +41,16 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); -int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, - pfn_t pfn, bool write); +int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, pfn_t pfn, bool write); +int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, + pud_t *pud, pfn_t pfn, bool write); enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, @@ -57,13 +75,14 @@ extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) #ifdef CONFIG_TRANSPARENT_HUGEPAGE -struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, int flags); - #define HPAGE_PMD_SHIFT PMD_SHIFT #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) +#define HPAGE_PUD_SHIFT PUD_SHIFT +#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) +#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) + extern bool is_vma_temporary_stack(struct vm_area_struct *vma); #define transparent_hugepage_enabled(__vma) \ @@ -117,6 +136,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page); +void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, + unsigned long address); + +#define split_huge_pud(__vma, __pud, __address) \ + do { \ + pud_t *____pud = (__pud); \ + if (pud_trans_huge(*____pud) \ + || pud_devmap(*____pud)) \ + __split_huge_pud(__vma, __pud, __address); \ + } while (0) + extern int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice); extern void vma_adjust_trans_huge(struct vm_area_struct *vma, @@ -125,6 +155,8 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma, long adjust_next); extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); +extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma); /* mmap_sem must be held on entry */ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) @@ -135,6 +167,15 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, else return NULL; } +static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma) +{ + VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); + if (pud_trans_huge(*pud) || pud_devmap(*pud)) + return __pud_trans_huge_lock(pud, vma); + else + return NULL; +} static inline int hpage_nr_pages(struct page *page) { if (unlikely(PageTransHuge(page))) @@ -142,6 +183,11 @@ static inline int hpage_nr_pages(struct page *page) return 1; } +struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, int flags); +struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, + pud_t *pud, int flags); + extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *huge_zero_page; @@ -156,6 +202,11 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) return is_huge_zero_page(pmd_page(pmd)); } +static inline bool is_huge_zero_pud(pud_t pud) +{ + return false; +} + struct page *mm_get_huge_zero_page(struct mm_struct *mm); void mm_put_huge_zero_page(struct mm_struct *mm); @@ -166,6 +217,10 @@ void mm_put_huge_zero_page(struct mm_struct *mm); #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) + #define hpage_nr_pages(x) 1 #define transparent_hugepage_enabled(__vma) 0 @@ -194,6 +249,9 @@ static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, static inline void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page) {} +#define split_huge_pud(__vma, __pmd, __address) \ + do { } while (0) + static inline int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { @@ -211,6 +269,11 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, { return NULL; } +static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma) +{ + return NULL; +} static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) { @@ -222,6 +285,11 @@ static inline bool is_huge_zero_page(struct page *page) return false; } +static inline bool is_huge_zero_pud(pud_t pud) +{ + return false; +} + static inline void mm_put_huge_zero_page(struct mm_struct *mm) { return; @@ -232,6 +300,12 @@ static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, { return NULL; } + +static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t *pud, int flags) +{ + return NULL; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 48c76d612d40..b857fc8cc2ec 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -65,7 +65,8 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, - unsigned long *, unsigned long *, long, unsigned int); + unsigned long *, unsigned long *, long, unsigned int, + int *); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *); void __unmap_hugepage_range_final(struct mmu_gather *tlb, @@ -81,6 +82,11 @@ void hugetlb_show_meminfo(void); unsigned long hugetlb_total_pages(void); int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); +int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, + unsigned long src_addr, + struct page **pagep); int hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); @@ -116,7 +122,7 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int flags); int pmd_huge(pmd_t pmd); -int pud_huge(pud_t pmd); +int pud_huge(pud_t pud); unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); @@ -131,7 +137,7 @@ static inline unsigned long hugetlb_total_pages(void) return 0; } -#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; }) +#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; }) #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) static inline void hugetlb_report_meminfo(struct seq_file *m) @@ -149,6 +155,8 @@ static inline void hugetlb_show_meminfo(void) #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) +#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ + src_addr, pagep) ({ BUG(); 0; }) #define huge_pte_offset(mm, address) 0 static inline int dequeue_hwpoisoned_huge_page(struct page *page) { @@ -189,6 +197,9 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb, #ifndef pgd_huge #define pgd_huge(x) 0 #endif +#ifndef p4d_huge +#define p4d_huge(x) 0 +#endif #ifndef pgd_write static inline int pgd_write(pgd_t pgd) diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 183efde54269..62bbf3c1aa4a 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -32,11 +32,10 @@ #include <linux/scatterlist.h> #include <linux/list.h> #include <linux/timer.h> -#include <linux/workqueue.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/mod_devicetable.h> - +#include <linux/interrupt.h> #define MAX_PAGE_BUFFER_COUNT 32 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ @@ -139,8 +138,8 @@ struct hv_ring_buffer_info { * for the specified ring buffer */ static inline void -hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, - u32 *read, u32 *write) +hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, + u32 *read, u32 *write) { u32 read_loc, write_loc, dsize; @@ -154,7 +153,7 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, *read = dsize - *write; } -static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi) +static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) { u32 read_loc, write_loc, dsize, read; @@ -168,7 +167,7 @@ static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi) return read; } -static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) +static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi) { u32 read_loc, write_loc, dsize, write; @@ -641,6 +640,7 @@ struct vmbus_channel_msginfo { /* Synchronize the request/response if needed */ struct completion waitevent; + struct vmbus_channel *waiting_channel; union { struct vmbus_channel_version_supported version_supported; struct vmbus_channel_open_result open_result; @@ -683,11 +683,6 @@ struct hv_input_signal_event_buffer { struct hv_input_signal_event event; }; -enum hv_signal_policy { - HV_SIGNAL_POLICY_DEFAULT = 0, - HV_SIGNAL_POLICY_EXPLICIT, -}; - enum hv_numa_policy { HV_BALANCED = 0, HV_LOCALIZED, @@ -747,26 +742,27 @@ struct vmbus_channel { struct vmbus_close_msg close_msg; - /* Channel callback are invoked in this workqueue context */ - /* HANDLE dataWorkQueue; */ - + /* Channel callback's invoked in softirq context */ + struct tasklet_struct callback_event; void (*onchannel_callback)(void *context); void *channel_callback_context; /* - * A channel can be marked for efficient (batched) - * reading: - * If batched_reading is set to "true", we read until the - * channel is empty and hold off interrupts from the host - * during the entire read process. - * If batched_reading is set to "false", the client is not - * going to perform batched reading. - * - * By default we will enable batched reading; specific - * drivers that don't want this behavior can turn it off. + * A channel can be marked for one of three modes of reading: + * BATCHED - callback called from taslket and should read + * channel until empty. Interrupts from the host + * are masked while read is in process (default). + * DIRECT - callback called from tasklet (softirq). + * ISR - callback called in interrupt context and must + * invoke its own deferred processing. + * Host interrupts are disabled and must be re-enabled + * when ring is empty. */ - - bool batched_reading; + enum hv_callback_mode { + HV_CALL_BATCHED, + HV_CALL_DIRECT, + HV_CALL_ISR + } callback_mode; bool is_dedicated_interrupt; struct hv_input_signal_event_buffer sig_buf; @@ -850,23 +846,6 @@ struct vmbus_channel { */ struct list_head percpu_list; /* - * Host signaling policy: The default policy will be - * based on the ring buffer state. We will also support - * a policy where the client driver can have explicit - * signaling control. - */ - enum hv_signal_policy signal_policy; - /* - * On the channel send side, many of the VMBUS - * device drivers explicity serialize access to the - * outgoing ring buffer. Give more control to the - * VMBUS device drivers in terms how to serialize - * accesss to the outgoing ring buffer. - * The default behavior will be to aquire the - * ring lock to preserve the current behavior. - */ - bool acquire_ring_lock; - /* * For performance critical channels (storage, networking * etc,), Hyper-V has a mechanism to enhance the throughput * at the expense of latency: @@ -906,32 +885,22 @@ struct vmbus_channel { }; -static inline void set_channel_lock_state(struct vmbus_channel *c, bool state) -{ - c->acquire_ring_lock = state; -} - static inline bool is_hvsock_channel(const struct vmbus_channel *c) { return !!(c->offermsg.offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); } -static inline void set_channel_signal_state(struct vmbus_channel *c, - enum hv_signal_policy policy) -{ - c->signal_policy = policy; -} - static inline void set_channel_affinity_state(struct vmbus_channel *c, enum hv_numa_policy policy) { c->affinity_policy = policy; } -static inline void set_channel_read_state(struct vmbus_channel *c, bool state) +static inline void set_channel_read_mode(struct vmbus_channel *c, + enum hv_callback_mode mode) { - c->batched_reading = state; + c->callback_mode = mode; } static inline void set_per_channel_state(struct vmbus_channel *c, void *s) @@ -1054,8 +1023,7 @@ extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel, u32 bufferLen, u64 requestid, enum vmbus_packet_type type, - u32 flags, - bool kick_q); + u32 flags); extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, struct hv_page_buffer pagebuffers[], @@ -1070,8 +1038,7 @@ extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, void *buffer, u32 bufferlen, u64 requestid, - u32 flags, - bool kick_q); + u32 flags); extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, struct hv_multipage_buffer *mpb, @@ -1458,9 +1425,10 @@ struct hyperv_service_callback { }; #define MAX_SRV_VER 0x7ffffff -extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *, - struct icmsg_negotiate *, u8 *, int, - int); +extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, + const int *fw_version, int fw_vercnt, + const int *srv_version, int srv_vercnt, + int *nego_fw_version, int *nego_srv_version); void hv_event_tasklet_disable(struct vmbus_channel *channel); void hv_event_tasklet_enable(struct vmbus_channel *channel); @@ -1480,9 +1448,9 @@ void vmbus_set_event(struct vmbus_channel *channel); /* Get the start of the ring buffer. */ static inline void * -hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) +hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info) { - return (void *)ring_info->ring_buffer->buffer; + return ring_info->ring_buffer->buffer; } /* @@ -1545,6 +1513,36 @@ init_cached_read_index(struct vmbus_channel *channel) } /* + * Mask off host interrupt callback notifications + */ +static inline void hv_begin_read(struct hv_ring_buffer_info *rbi) +{ + rbi->ring_buffer->interrupt_mask = 1; + + /* make sure mask update is not reordered */ + virt_mb(); +} + +/* + * Re-enable host callback and return number of outstanding bytes + */ +static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi) +{ + + rbi->ring_buffer->interrupt_mask = 0; + + /* make sure mask update is not reordered */ + virt_mb(); + + /* + * Now check to see if the ring buffer is still empty. + * If it is not, we raced and we need to process new + * incoming messages. + */ + return hv_get_bytes_to_read(rbi); +} + +/* * An API to support in-place processing of incoming VMBUS packets. */ #define VMBUS_PKT_TRAILER 8 diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 7b23a3316dcb..6b183521c616 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -30,6 +30,7 @@ #include <linux/device.h> /* for struct device */ #include <linux/sched.h> /* for completion */ #include <linux/mutex.h> +#include <linux/rtmutex.h> #include <linux/irqdomain.h> /* for Host Notify IRQ */ #include <linux/of.h> /* for struct device_node */ #include <linux/swab.h> /* for swab16 */ @@ -283,6 +284,7 @@ enum i2c_slave_event { extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); extern int i2c_slave_unregister(struct i2c_client *client); +extern bool i2c_detect_slave_mode(struct device *dev); static inline int i2c_slave_event(struct i2c_client *client, enum i2c_slave_event event, u8 *val) diff --git a/include/linux/i2c/mpr121_touchkey.h b/include/linux/i2c/mpr121_touchkey.h deleted file mode 100644 index f0bcc38bbb97..000000000000 --- a/include/linux/i2c/mpr121_touchkey.h +++ /dev/null @@ -1,20 +0,0 @@ -/* Header file for Freescale MPR121 Capacitive Touch Sensor */ - -#ifndef _MPR121_TOUCHKEY_H -#define _MPR121_TOUCHKEY_H - -/** - * struct mpr121_platform_data - platform data for mpr121 sensor - * @keymap: pointer to array of KEY_* values representing keymap - * @keymap_size: size of the keymap - * @wakeup: configure the button as a wake-up source - * @vdd_uv: VDD voltage in uV - */ -struct mpr121_platform_data { - const unsigned short *keymap; - unsigned int keymap_size; - bool wakeup; - int vdd_uv; -}; - -#endif /* _MPR121_TOUCHKEY_H */ diff --git a/include/linux/idr.h b/include/linux/idr.h index 3c01b89aed67..bf70b3ef0a07 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -12,47 +12,29 @@ #ifndef __IDR_H__ #define __IDR_H__ -#include <linux/types.h> -#include <linux/bitops.h> -#include <linux/init.h> -#include <linux/rcupdate.h> +#include <linux/radix-tree.h> +#include <linux/gfp.h> +#include <linux/percpu.h> + +struct idr { + struct radix_tree_root idr_rt; + unsigned int idr_next; +}; /* - * Using 6 bits at each layer allows us to allocate 7 layers out of each page. - * 8 bits only gave us 3 layers out of every pair of pages, which is less - * efficient except for trees with a largest element between 192-255 inclusive. + * The IDR API does not expose the tagging functionality of the radix tree + * to users. Use tag 0 to track whether a node has free space below it. */ -#define IDR_BITS 6 -#define IDR_SIZE (1 << IDR_BITS) -#define IDR_MASK ((1 << IDR_BITS)-1) - -struct idr_layer { - int prefix; /* the ID prefix of this idr_layer */ - int layer; /* distance from leaf */ - struct idr_layer __rcu *ary[1<<IDR_BITS]; - int count; /* When zero, we can release it */ - union { - /* A zero bit means "space here" */ - DECLARE_BITMAP(bitmap, IDR_SIZE); - struct rcu_head rcu_head; - }; -}; +#define IDR_FREE 0 -struct idr { - struct idr_layer __rcu *hint; /* the last layer allocated from */ - struct idr_layer __rcu *top; - int layers; /* only valid w/o concurrent changes */ - int cur; /* current pos for cyclic allocation */ - spinlock_t lock; - int id_free_cnt; - struct idr_layer *id_free; -}; +/* Set the IDR flag and the IDR_FREE tag */ +#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) -#define IDR_INIT(name) \ +#define IDR_INIT \ { \ - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \ } -#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) +#define DEFINE_IDR(name) struct idr name = IDR_INIT /** * idr_get_cursor - Return the current position of the cyclic allocator @@ -62,9 +44,9 @@ struct idr { * idr_alloc_cyclic() if it is free (otherwise the search will start from * this position). */ -static inline unsigned int idr_get_cursor(struct idr *idr) +static inline unsigned int idr_get_cursor(const struct idr *idr) { - return READ_ONCE(idr->cur); + return READ_ONCE(idr->idr_next); } /** @@ -77,7 +59,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr) */ static inline void idr_set_cursor(struct idr *idr, unsigned int val) { - WRITE_ONCE(idr->cur, val); + WRITE_ONCE(idr->idr_next, val); } /** @@ -97,22 +79,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val) * period). */ -/* - * This is what we export. - */ - -void *idr_find_slowpath(struct idr *idp, int id); void idr_preload(gfp_t gfp_mask); -int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_for_each(struct idr *idp, +int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t); +int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t); +int idr_for_each(const struct idr *, int (*fn)(int id, void *p, void *data), void *data); -void *idr_get_next(struct idr *idp, int *nextid); -void *idr_replace(struct idr *idp, void *ptr, int id); -void idr_remove(struct idr *idp, int id); -void idr_destroy(struct idr *idp); -void idr_init(struct idr *idp); -bool idr_is_empty(struct idr *idp); +void *idr_get_next(struct idr *, int *nextid); +void *idr_replace(struct idr *, void *, int id); +void idr_destroy(struct idr *); + +static inline void *idr_remove(struct idr *idr, int id) +{ + return radix_tree_delete_item(&idr->idr_rt, id, NULL); +} + +static inline void idr_init(struct idr *idr) +{ + INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); + idr->idr_next = 0; +} + +static inline bool idr_is_empty(const struct idr *idr) +{ + return radix_tree_empty(&idr->idr_rt) && + radix_tree_tagged(&idr->idr_rt, IDR_FREE); +} /** * idr_preload_end - end preload section started with idr_preload() @@ -137,19 +128,14 @@ static inline void idr_preload_end(void) * This function can be called under rcu_read_lock(), given that the leaf * pointers lifetimes are correctly managed. */ -static inline void *idr_find(struct idr *idr, int id) +static inline void *idr_find(const struct idr *idr, int id) { - struct idr_layer *hint = rcu_dereference_raw(idr->hint); - - if (hint && (id & ~IDR_MASK) == hint->prefix) - return rcu_dereference_raw(hint->ary[id & IDR_MASK]); - - return idr_find_slowpath(idr, id); + return radix_tree_lookup(&idr->idr_rt, id); } /** * idr_for_each_entry - iterate over an idr's elements of a given type - * @idp: idr handle + * @idr: idr handle * @entry: the type * to use as cursor * @id: id entry's key * @@ -157,57 +143,60 @@ static inline void *idr_find(struct idr *idr, int id) * after normal terminatinon @entry is left with the value NULL. This * is convenient for a "not found" value. */ -#define idr_for_each_entry(idp, entry, id) \ - for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) +#define idr_for_each_entry(idr, entry, id) \ + for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) /** - * idr_for_each_entry - continue iteration over an idr's elements of a given type - * @idp: idr handle + * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type + * @idr: idr handle * @entry: the type * to use as cursor * @id: id entry's key * * Continue to iterate over list of given type, continuing after * the current position. */ -#define idr_for_each_entry_continue(idp, entry, id) \ - for ((entry) = idr_get_next((idp), &(id)); \ +#define idr_for_each_entry_continue(idr, entry, id) \ + for ((entry) = idr_get_next((idr), &(id)); \ entry; \ - ++id, (entry) = idr_get_next((idp), &(id))) + ++id, (entry) = idr_get_next((idr), &(id))) /* * IDA - IDR based id allocator, use when translation from id to * pointer isn't necessary. - * - * IDA_BITMAP_LONGS is calculated to be one less to accommodate - * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes. */ #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ -#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) +#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) struct ida_bitmap { - long nr_busy; unsigned long bitmap[IDA_BITMAP_LONGS]; }; +DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap); + struct ida { - struct idr idr; - struct ida_bitmap *free_bitmap; + struct radix_tree_root ida_rt; }; -#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } -#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) +#define IDA_INIT { \ + .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \ +} +#define DEFINE_IDA(name) struct ida name = IDA_INIT int ida_pre_get(struct ida *ida, gfp_t gfp_mask); int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); void ida_remove(struct ida *ida, int id); void ida_destroy(struct ida *ida); -void ida_init(struct ida *ida); int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t gfp_mask); void ida_simple_remove(struct ida *ida, unsigned int id); +static inline void ida_init(struct ida *ida) +{ + INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); +} + /** * ida_get_new - allocate new ID * @ida: idr handle @@ -220,11 +209,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id) return ida_get_new_above(ida, 0, p_id); } -static inline bool ida_is_empty(struct ida *ida) +static inline bool ida_is_empty(const struct ida *ida) { - return idr_is_empty(&ida->idr); + return radix_tree_empty(&ida->ida_rt); } - -void __init idr_init_cache(void); - #endif /* __IDR_H__ */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index fe849329511a..0dd9498c694f 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -185,6 +185,8 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) /* number of user priorities 802.11 uses */ #define IEEE80211_NUM_UPS 8 +/* number of ACs */ +#define IEEE80211_NUM_ACS 4 #define IEEE80211_QOS_CTL_LEN 2 /* 1d tag mask */ @@ -1041,8 +1043,9 @@ struct ieee80211_mgmt { } u; } __packed __aligned(2); -/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */ +/* Supported rates membership selectors */ #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 +#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) @@ -2322,31 +2325,33 @@ enum ieee80211_sa_query_action { }; +#define SUITE(oui, id) (((oui) << 8) | (id)) + /* cipher suite selectors */ -#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00 -#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01 -#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02 -/* reserved: 0x000FAC03 */ -#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04 -#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 -#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 -#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08 -#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09 -#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A -#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B -#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C -#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D - -#define WLAN_CIPHER_SUITE_SMS4 0x00147201 +#define WLAN_CIPHER_SUITE_USE_GROUP SUITE(0x000FAC, 0) +#define WLAN_CIPHER_SUITE_WEP40 SUITE(0x000FAC, 1) +#define WLAN_CIPHER_SUITE_TKIP SUITE(0x000FAC, 2) +/* reserved: SUITE(0x000FAC, 3) */ +#define WLAN_CIPHER_SUITE_CCMP SUITE(0x000FAC, 4) +#define WLAN_CIPHER_SUITE_WEP104 SUITE(0x000FAC, 5) +#define WLAN_CIPHER_SUITE_AES_CMAC SUITE(0x000FAC, 6) +#define WLAN_CIPHER_SUITE_GCMP SUITE(0x000FAC, 8) +#define WLAN_CIPHER_SUITE_GCMP_256 SUITE(0x000FAC, 9) +#define WLAN_CIPHER_SUITE_CCMP_256 SUITE(0x000FAC, 10) +#define WLAN_CIPHER_SUITE_BIP_GMAC_128 SUITE(0x000FAC, 11) +#define WLAN_CIPHER_SUITE_BIP_GMAC_256 SUITE(0x000FAC, 12) +#define WLAN_CIPHER_SUITE_BIP_CMAC_256 SUITE(0x000FAC, 13) + +#define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1) /* AKM suite selectors */ -#define WLAN_AKM_SUITE_8021X 0x000FAC01 -#define WLAN_AKM_SUITE_PSK 0x000FAC02 -#define WLAN_AKM_SUITE_8021X_SHA256 0x000FAC05 -#define WLAN_AKM_SUITE_PSK_SHA256 0x000FAC06 -#define WLAN_AKM_SUITE_TDLS 0x000FAC07 -#define WLAN_AKM_SUITE_SAE 0x000FAC08 -#define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09 +#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1) +#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2) +#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5) +#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6) +#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) +#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) +#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) #define WLAN_MAX_KEY_LEN 32 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index c6587c01d951..c5847dc75a93 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -46,6 +46,8 @@ struct br_ip_list { #define BR_LEARNING_SYNC BIT(9) #define BR_PROXYARP_WIFI BIT(10) #define BR_MCAST_FLOOD BIT(11) +#define BR_MULTICAST_TO_UNICAST BIT(12) +#define BR_VLAN_TUNNEL BIT(13) #define BR_DEFAULT_AGEING_TIME (300 * HZ) diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h index 4316aa173dde..46df7e565d6f 100644 --- a/include/linux/if_frad.h +++ b/include/linux/if_frad.h @@ -66,8 +66,6 @@ struct dlci_local struct frad_local { - struct net_device_stats stats; - /* devices which this FRAD is slaved to */ struct net_device *master[CONFIG_DLCI_MAX]; short dlci[CONFIG_DLCI_MAX]; diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index a4ccc3122f93..c9ec1343d187 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -9,19 +9,6 @@ #include <net/netlink.h> #include <linux/u64_stats_sync.h> -#if IS_ENABLED(CONFIG_MACVTAP) -struct socket *macvtap_get_socket(struct file *); -#else -#include <linux/err.h> -#include <linux/errno.h> -struct file; -struct socket; -static inline struct socket *macvtap_get_socket(struct file *f) -{ - return ERR_PTR(-EINVAL); -} -#endif /* CONFIG_MACVTAP */ - struct macvlan_port; struct macvtap_queue; @@ -29,7 +16,7 @@ struct macvtap_queue; * Maximum times a macvtap device can be opened. This can be used to * configure the number of receive queue, e.g. for multiqueue virtio. */ -#define MAX_MACVTAP_QUEUES 256 +#define MAX_TAP_QUEUES 256 #define MACVLAN_MC_FILTER_BITS 8 #define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) @@ -49,7 +36,7 @@ struct macvlan_dev { enum macvlan_mode mode; u16 flags; /* This array tracks active taps. */ - struct macvtap_queue __rcu *taps[MAX_MACVTAP_QUEUES]; + struct tap_queue __rcu *taps[MAX_TAP_QUEUES]; /* This list tracks all taps (both enabled and disabled) */ struct list_head queue_list; int numvtaps; diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h new file mode 100644 index 000000000000..3482c3c2037d --- /dev/null +++ b/include/linux/if_tap.h @@ -0,0 +1,75 @@ +#ifndef _LINUX_IF_TAP_H_ +#define _LINUX_IF_TAP_H_ + +#if IS_ENABLED(CONFIG_TAP) +struct socket *tap_get_socket(struct file *); +#else +#include <linux/err.h> +#include <linux/errno.h> +struct file; +struct socket; +static inline struct socket *tap_get_socket(struct file *f) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_TAP */ + +#include <net/sock.h> +#include <linux/skb_array.h> + +#define MAX_TAP_QUEUES 256 + +struct tap_queue; + +struct tap_dev { + struct net_device *dev; + u16 flags; + /* This array tracks active taps. */ + struct tap_queue __rcu *taps[MAX_TAP_QUEUES]; + /* This list tracks all taps (both enabled and disabled) */ + struct list_head queue_list; + int numvtaps; + int numqueues; + netdev_features_t tap_features; + int minor; + + void (*update_features)(struct tap_dev *tap, netdev_features_t features); + void (*count_tx_dropped)(struct tap_dev *tap); + void (*count_rx_dropped)(struct tap_dev *tap); +}; + +/* + * A tap queue is the central object of tap module, it connects + * an open character device to virtual interface. There can be + * multiple queues on one interface, which map back to queues + * implemented in hardware on the underlying device. + * + * tap_proto is used to allocate queues through the sock allocation + * mechanism. + * + */ + +struct tap_queue { + struct sock sk; + struct socket sock; + struct socket_wq wq; + int vnet_hdr_sz; + struct tap_dev __rcu *tap; + struct file *file; + unsigned int flags; + u16 queue_index; + bool enabled; + struct list_head next; + struct skb_array skb_array; +}; + +rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); +void tap_del_queues(struct tap_dev *tap); +int tap_get_minor(dev_t major, struct tap_dev *tap); +void tap_free_minor(dev_t major, struct tap_dev *tap); +int tap_queue_resize(struct tap_dev *tap); +int tap_create_cdev(struct cdev *tap_cdev, + dev_t *tap_major, const char *device_name); +void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev); + +#endif /*_LINUX_IF_TAP_H_*/ diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 70a5164f4728..48767c776119 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -11,139 +11,15 @@ #define _IIO_BUFFER_GENERIC_H_ #include <linux/sysfs.h> #include <linux/iio/iio.h> -#include <linux/kref.h> - -#ifdef CONFIG_IIO_BUFFER struct iio_buffer; -/** - * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be - * configured. It has a fixed value which will be buffer specific. - */ -#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0) - -/** - * struct iio_buffer_access_funcs - access functions for buffers. - * @store_to: actually store stuff to the buffer - * @read_first_n: try to get a specified number of bytes (must exist) - * @data_available: indicates how much data is available for reading from - * the buffer. - * @request_update: if a parameter change has been marked, update underlying - * storage. - * @set_bytes_per_datum:set number of bytes per datum - * @set_length: set number of datums in buffer - * @enable: called if the buffer is attached to a device and the - * device starts sampling. Calls are balanced with - * @disable. - * @disable: called if the buffer is attached to a device and the - * device stops sampling. Calles are balanced with @enable. - * @release: called when the last reference to the buffer is dropped, - * should free all resources allocated by the buffer. - * @modes: Supported operating modes by this buffer type - * @flags: A bitmask combination of INDIO_BUFFER_FLAG_* - * - * The purpose of this structure is to make the buffer element - * modular as event for a given driver, different usecases may require - * different buffer designs (space efficiency vs speed for example). - * - * It is worth noting that a given buffer implementation may only support a - * small proportion of these functions. The core code 'should' cope fine with - * any of them not existing. - **/ -struct iio_buffer_access_funcs { - int (*store_to)(struct iio_buffer *buffer, const void *data); - int (*read_first_n)(struct iio_buffer *buffer, - size_t n, - char __user *buf); - size_t (*data_available)(struct iio_buffer *buffer); - - int (*request_update)(struct iio_buffer *buffer); - - int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); - int (*set_length)(struct iio_buffer *buffer, int length); - - int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); - int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); - - void (*release)(struct iio_buffer *buffer); - - unsigned int modes; - unsigned int flags; -}; - -/** - * struct iio_buffer - general buffer structure - * @length: [DEVICE] number of datums in buffer - * @bytes_per_datum: [DEVICE] size of individual datum including timestamp - * @scan_el_attrs: [DRIVER] control of scan elements if that scan mode - * control method is used - * @scan_mask: [INTERN] bitmask used in masking scan mode elements - * @scan_timestamp: [INTERN] does the scan mode include a timestamp - * @access: [DRIVER] buffer access functions associated with the - * implementation. - * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes. - * @buffer_group: [INTERN] attributes of the buffer group - * @scan_el_group: [DRIVER] attribute group for those attributes not - * created from the iio_chan_info array. - * @pollq: [INTERN] wait queue to allow for polling on the buffer. - * @stufftoread: [INTERN] flag to indicate new data. - * @attrs: [INTERN] standard attributes of the buffer - * @demux_list: [INTERN] list of operations required to demux the scan. - * @demux_bounce: [INTERN] buffer for doing gather from incoming scan. - * @buffer_list: [INTERN] entry in the devices list of current buffers. - * @ref: [INTERN] reference count of the buffer. - * @watermark: [INTERN] number of datums to wait for poll/read. - */ -struct iio_buffer { - int length; - int bytes_per_datum; - struct attribute_group *scan_el_attrs; - long *scan_mask; - bool scan_timestamp; - const struct iio_buffer_access_funcs *access; - struct list_head scan_el_dev_attr_list; - struct attribute_group buffer_group; - struct attribute_group scan_el_group; - wait_queue_head_t pollq; - bool stufftoread; - const struct attribute **attrs; - struct list_head demux_list; - void *demux_bounce; - struct list_head buffer_list; - struct kref ref; - unsigned int watermark; -}; - -/** - * iio_update_buffers() - add or remove buffer from active list - * @indio_dev: device to add buffer to - * @insert_buffer: buffer to insert - * @remove_buffer: buffer_to_remove - * - * Note this will tear down the all buffering and build it up again - */ -int iio_update_buffers(struct iio_dev *indio_dev, - struct iio_buffer *insert_buffer, - struct iio_buffer *remove_buffer); - -/** - * iio_buffer_init() - Initialize the buffer structure - * @buffer: buffer to be initialized - **/ -void iio_buffer_init(struct iio_buffer *buffer); +void iio_buffer_set_attrs(struct iio_buffer *buffer, + const struct attribute **attrs); -int iio_scan_mask_query(struct iio_dev *indio_dev, - struct iio_buffer *buffer, int bit); - -/** - * iio_push_to_buffers() - push to a registered buffer. - * @indio_dev: iio_dev structure for device. - * @data: Full scan. - */ int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data); -/* +/** * iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers * @indio_dev: iio_dev structure for device. * @data: sample data @@ -168,34 +44,10 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev, return iio_push_to_buffers(indio_dev, data); } -int iio_update_demux(struct iio_dev *indio_dev); - bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, - const unsigned long *mask); - -struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer); -void iio_buffer_put(struct iio_buffer *buffer); - -/** - * iio_device_attach_buffer - Attach a buffer to a IIO device - * @indio_dev: The device the buffer should be attached to - * @buffer: The buffer to attach to the device - * - * This function attaches a buffer to a IIO device. The buffer stays attached to - * the device until the device is freed. The function should only be called at - * most once per device. - */ -static inline void iio_device_attach_buffer(struct iio_dev *indio_dev, - struct iio_buffer *buffer) -{ - indio_dev->buffer = iio_buffer_get(buffer); -} - -#else /* CONFIG_IIO_BUFFER */ - -static inline void iio_buffer_get(struct iio_buffer *buffer) {} -static inline void iio_buffer_put(struct iio_buffer *buffer) {} + const unsigned long *mask); -#endif /* CONFIG_IIO_BUFFER */ +void iio_device_attach_buffer(struct iio_dev *indio_dev, + struct iio_buffer *buffer); #endif /* _IIO_BUFFER_GENERIC_H_ */ diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h new file mode 100644 index 000000000000..8daba198fafa --- /dev/null +++ b/include/linux/iio/buffer_impl.h @@ -0,0 +1,162 @@ +#ifndef _IIO_BUFFER_GENERIC_IMPL_H_ +#define _IIO_BUFFER_GENERIC_IMPL_H_ +#include <linux/sysfs.h> +#include <linux/kref.h> + +#ifdef CONFIG_IIO_BUFFER + +struct iio_dev; +struct iio_buffer; + +/** + * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be + * configured. It has a fixed value which will be buffer specific. + */ +#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0) + +/** + * struct iio_buffer_access_funcs - access functions for buffers. + * @store_to: actually store stuff to the buffer + * @read_first_n: try to get a specified number of bytes (must exist) + * @data_available: indicates how much data is available for reading from + * the buffer. + * @request_update: if a parameter change has been marked, update underlying + * storage. + * @set_bytes_per_datum:set number of bytes per datum + * @set_length: set number of datums in buffer + * @enable: called if the buffer is attached to a device and the + * device starts sampling. Calls are balanced with + * @disable. + * @disable: called if the buffer is attached to a device and the + * device stops sampling. Calles are balanced with @enable. + * @release: called when the last reference to the buffer is dropped, + * should free all resources allocated by the buffer. + * @modes: Supported operating modes by this buffer type + * @flags: A bitmask combination of INDIO_BUFFER_FLAG_* + * + * The purpose of this structure is to make the buffer element + * modular as event for a given driver, different usecases may require + * different buffer designs (space efficiency vs speed for example). + * + * It is worth noting that a given buffer implementation may only support a + * small proportion of these functions. The core code 'should' cope fine with + * any of them not existing. + **/ +struct iio_buffer_access_funcs { + int (*store_to)(struct iio_buffer *buffer, const void *data); + int (*read_first_n)(struct iio_buffer *buffer, + size_t n, + char __user *buf); + size_t (*data_available)(struct iio_buffer *buffer); + + int (*request_update)(struct iio_buffer *buffer); + + int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); + int (*set_length)(struct iio_buffer *buffer, int length); + + int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + + void (*release)(struct iio_buffer *buffer); + + unsigned int modes; + unsigned int flags; +}; + +/** + * struct iio_buffer - general buffer structure + * + * Note that the internals of this structure should only be of interest to + * those writing new buffer implementations. + */ +struct iio_buffer { + /** @length: Number of datums in buffer. */ + int length; + + /** @bytes_per_datum: Size of individual datum including timestamp. */ + int bytes_per_datum; + + /** + * @access: Buffer access functions associated with the + * implementation. + */ + const struct iio_buffer_access_funcs *access; + + /** @scan_mask: Bitmask used in masking scan mode elements. */ + long *scan_mask; + + /** @demux_list: List of operations required to demux the scan. */ + struct list_head demux_list; + + /** @pollq: Wait queue to allow for polling on the buffer. */ + wait_queue_head_t pollq; + + /** @watermark: Number of datums to wait for poll/read. */ + unsigned int watermark; + + /* private: */ + /* + * @scan_el_attrs: Control of scan elements if that scan mode + * control method is used. + */ + struct attribute_group *scan_el_attrs; + + /* @scan_timestamp: Does the scan mode include a timestamp. */ + bool scan_timestamp; + + /* @scan_el_dev_attr_list: List of scan element related attributes. */ + struct list_head scan_el_dev_attr_list; + + /* @buffer_group: Attributes of the buffer group. */ + struct attribute_group buffer_group; + + /* + * @scan_el_group: Attribute group for those attributes not + * created from the iio_chan_info array. + */ + struct attribute_group scan_el_group; + + /* @stufftoread: Flag to indicate new data. */ + bool stufftoread; + + /* @attrs: Standard attributes of the buffer. */ + const struct attribute **attrs; + + /* @demux_bounce: Buffer for doing gather from incoming scan. */ + void *demux_bounce; + + /* @buffer_list: Entry in the devices list of current buffers. */ + struct list_head buffer_list; + + /* @ref: Reference count of the buffer. */ + struct kref ref; +}; + +/** + * iio_update_buffers() - add or remove buffer from active list + * @indio_dev: device to add buffer to + * @insert_buffer: buffer to insert + * @remove_buffer: buffer_to_remove + * + * Note this will tear down the all buffering and build it up again + */ +int iio_update_buffers(struct iio_dev *indio_dev, + struct iio_buffer *insert_buffer, + struct iio_buffer *remove_buffer); + +/** + * iio_buffer_init() - Initialize the buffer structure + * @buffer: buffer to be initialized + **/ +void iio_buffer_init(struct iio_buffer *buffer); + +struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer); +void iio_buffer_put(struct iio_buffer *buffer); + +#else /* CONFIG_IIO_BUFFER */ + +static inline void iio_buffer_get(struct iio_buffer *buffer) {} +static inline void iio_buffer_put(struct iio_buffer *buffer) {} + +#endif /* CONFIG_IIO_BUFFER */ +#endif /* _IIO_BUFFER_GENERIC_IMPL_H_ */ diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h index 1796af093368..254de3c7dde8 100644 --- a/include/linux/iio/common/st_sensors_i2c.h +++ b/include/linux/iio/common/st_sensors_i2c.h @@ -28,4 +28,13 @@ static inline void st_sensors_of_i2c_probe(struct i2c_client *client, } #endif +#ifdef CONFIG_ACPI +int st_sensors_match_acpi_device(struct device *dev); +#else +static inline int st_sensors_match_acpi_device(struct device *dev) +{ + return -ENODEV; +} +#endif + #endif /* ST_SENSORS_I2C_H */ diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h index 1683bc710d14..027cfa9c3703 100644 --- a/include/linux/iio/kfifo_buf.h +++ b/include/linux/iio/kfifo_buf.h @@ -1,9 +1,8 @@ #ifndef __LINUX_IIO_KFIFO_BUF_H__ #define __LINUX_IIO_KFIFO_BUF_H__ -#include <linux/kfifo.h> -#include <linux/iio/iio.h> -#include <linux/iio/buffer.h> +struct iio_buffer; +struct device; struct iio_buffer *iio_kfifo_allocate(void); void iio_kfifo_free(struct iio_buffer *r); diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h new file mode 100644 index 000000000000..55535aef2e6c --- /dev/null +++ b/include/linux/iio/timer/stm32-timer-trigger.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) STMicroelectronics 2016 + * + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STM32_TIMER_TRIGGER_H_ +#define _STM32_TIMER_TRIGGER_H_ + +#define TIM1_TRGO "tim1_trgo" +#define TIM1_CH1 "tim1_ch1" +#define TIM1_CH2 "tim1_ch2" +#define TIM1_CH3 "tim1_ch3" +#define TIM1_CH4 "tim1_ch4" + +#define TIM2_TRGO "tim2_trgo" +#define TIM2_CH1 "tim2_ch1" +#define TIM2_CH2 "tim2_ch2" +#define TIM2_CH3 "tim2_ch3" +#define TIM2_CH4 "tim2_ch4" + +#define TIM3_TRGO "tim3_trgo" +#define TIM3_CH1 "tim3_ch1" +#define TIM3_CH2 "tim3_ch2" +#define TIM3_CH3 "tim3_ch3" +#define TIM3_CH4 "tim3_ch4" + +#define TIM4_TRGO "tim4_trgo" +#define TIM4_CH1 "tim4_ch1" +#define TIM4_CH2 "tim4_ch2" +#define TIM4_CH3 "tim4_ch3" +#define TIM4_CH4 "tim4_ch4" + +#define TIM5_TRGO "tim5_trgo" +#define TIM5_CH1 "tim5_ch1" +#define TIM5_CH2 "tim5_ch2" +#define TIM5_CH3 "tim5_ch3" +#define TIM5_CH4 "tim5_ch4" + +#define TIM6_TRGO "tim6_trgo" + +#define TIM7_TRGO "tim7_trgo" + +#define TIM8_TRGO "tim8_trgo" +#define TIM8_CH1 "tim8_ch1" +#define TIM8_CH2 "tim8_ch2" +#define TIM8_CH3 "tim8_ch3" +#define TIM8_CH4 "tim8_ch4" + +#define TIM9_TRGO "tim9_trgo" +#define TIM9_CH1 "tim9_ch1" +#define TIM9_CH2 "tim9_ch2" + +#define TIM12_TRGO "tim12_trgo" +#define TIM12_CH1 "tim12_ch1" +#define TIM12_CH2 "tim12_ch2" + +bool is_stm32_timer_trigger(struct iio_trigger *trig); + +#endif diff --git a/include/linux/init.h b/include/linux/init.h index 885c3e6d0f9d..79af0962fd52 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -126,10 +126,10 @@ void prepare_namespace(void); void __init load_default_modules(void); int __init init_rootfs(void); -#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX) +#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX) extern bool rodata_enabled; #endif -#ifdef CONFIG_DEBUG_RODATA +#ifdef CONFIG_STRICT_KERNEL_RWX void mark_rodata_ro(void); #endif diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 3a85d61f7614..91d9049f0039 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -12,8 +12,10 @@ #include <linux/securebits.h> #include <linux/seqlock.h> #include <linux/rbtree.h> +#include <linux/sched/autogroup.h> #include <net/net_namespace.h> #include <linux/sched/rt.h> +#include <linux/mm_types.h> #include <asm/thread_info.h> @@ -149,8 +151,6 @@ extern struct group_info init_groups; extern struct cred init_cred; -extern struct task_group root_task_group; - #ifdef CONFIG_CGROUP_SCHED # define INIT_CGROUP_SCHED(tsk) \ .sched_task_group = &root_task_group, diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index 27e06acc509a..37b04a0fdea4 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h @@ -80,24 +80,9 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data, unsigned int rows, unsigned int cols, unsigned short *keymap, struct input_dev *input_dev); +int matrix_keypad_parse_properties(struct device *dev, + unsigned int *rows, unsigned int *cols); -#ifdef CONFIG_OF -/** - * matrix_keypad_parse_of_params() - Read parameters from matrix-keypad node - * - * @dev: Device containing of_node - * @rows: Returns number of matrix rows - * @cols: Returns number of matrix columns - * @return 0 if OK, <0 on error - */ -int matrix_keypad_parse_of_params(struct device *dev, - unsigned int *rows, unsigned int *cols); -#else -static inline int matrix_keypad_parse_of_params(struct device *dev, - unsigned int *rows, unsigned int *cols) -{ - return -ENOSYS; -} -#endif /* CONFIG_OF */ +#define matrix_keypad_parse_of_params matrix_keypad_parse_properties #endif /* _MATRIX_KEYPAD_H */ diff --git a/include/linux/input/tca8418_keypad.h b/include/linux/input/tca8418_keypad.h deleted file mode 100644 index e71a85dc2cbd..000000000000 --- a/include/linux/input/tca8418_keypad.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * TCA8418 keypad platform support - * - * Copyright (C) 2011 Fuel7, Inc. All rights reserved. - * - * Author: Kyle Manna <kyle.manna@fuel7.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License v2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA. - * - * If you can't comply with GPLv2, alternative licensing terms may be - * arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary - * alternative licensing inquiries. - */ - -#ifndef _TCA8418_KEYPAD_H -#define _TCA8418_KEYPAD_H - -#include <linux/types.h> -#include <linux/input/matrix_keypad.h> - -#define TCA8418_I2C_ADDR 0x34 -#define TCA8418_NAME "tca8418_keypad" - -struct tca8418_keypad_platform_data { - const struct matrix_keymap_data *keymap_data; - unsigned rows; - unsigned cols; - bool rep; - bool irq_is_gpio; -}; - -#endif diff --git a/include/linux/iomap.h b/include/linux/iomap.h index a4c94b86401e..7291810067eb 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -72,17 +72,16 @@ struct iomap_ops { }; ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, - struct iomap_ops *ops); + const struct iomap_ops *ops); int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, - struct iomap_ops *ops); + const struct iomap_ops *ops); int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, - bool *did_zero, struct iomap_ops *ops); + bool *did_zero, const struct iomap_ops *ops); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, - struct iomap_ops *ops); -int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, - struct iomap_ops *ops); + const struct iomap_ops *ops); +int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, - loff_t start, loff_t len, struct iomap_ops *ops); + loff_t start, loff_t len, const struct iomap_ops *ops); /* * Flags for direct I/O ->end_io: @@ -92,6 +91,6 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret, unsigned flags); ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, - struct iomap_ops *ops, iomap_dio_end_io_t end_io); + const struct iomap_ops *ops, iomap_dio_end_io_t end_io); #endif /* LINUX_IOMAP_H */ diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 1c30014ed176..d29e1e21bf3f 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -17,7 +17,7 @@ #include <linux/kernel.h> #include <linux/types.h> -#include <linux/hrtimer.h> +#include <linux/ktime.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/io.h> diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 78c5d5ae3857..f1045b2c6a00 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -100,7 +100,7 @@ struct ipmi_user_hndl { /* Create a new user of the IPMI layer on the given interface number. */ int ipmi_create_user(unsigned int if_num, - struct ipmi_user_hndl *handler, + const struct ipmi_user_hndl *handler, void *handler_data, ipmi_user_t *user); diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 671d014e6429..71be5b330d21 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -69,6 +69,7 @@ struct ipv6_devconf { __s32 seg6_require_hmac; #endif __u32 enhanced_dad; + __u32 addr_gen_mode; struct ctl_table_header *sysctl_header; }; diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 725e86b506f3..97cbca19430d 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -349,8 +349,32 @@ /* * CPU interface registers */ -#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1) -#define ICC_CTLR_EL1_EOImode_drop (1U << 1) +#define ICC_CTLR_EL1_EOImode_SHIFT (1) +#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_CBPR_SHIFT 0 +#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) +#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 +#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) +#define ICC_CTLR_EL1_ID_BITS_SHIFT 11 +#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) +#define ICC_CTLR_EL1_SEIS_SHIFT 14 +#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) +#define ICC_CTLR_EL1_A3V_SHIFT 15 +#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) +#define ICC_PMR_EL1_SHIFT 0 +#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) +#define ICC_BPR0_EL1_SHIFT 0 +#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) +#define ICC_BPR1_EL1_SHIFT 0 +#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) +#define ICC_IGRPEN0_EL1_SHIFT 0 +#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) +#define ICC_IGRPEN1_EL1_SHIFT 0 +#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) +#define ICC_SRE_EL1_DIB (1U << 2) +#define ICC_SRE_EL1_DFB (1U << 1) #define ICC_SRE_EL1_SRE (1U << 0) /* @@ -379,14 +403,29 @@ #define ICH_HCR_EN (1 << 0) #define ICH_HCR_UIE (1 << 1) -#define ICH_VMCR_CTLR_SHIFT 0 -#define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT) +#define ICH_VMCR_CBPR_SHIFT 4 +#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) +#define ICH_VMCR_EOIM_SHIFT 9 +#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT) #define ICH_VMCR_BPR1_SHIFT 18 #define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) #define ICH_VMCR_BPR0_SHIFT 21 #define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) #define ICH_VMCR_PMR_SHIFT 24 #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) +#define ICH_VMCR_ENG0_SHIFT 0 +#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT) +#define ICH_VMCR_ENG1_SHIFT 1 +#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT) + +#define ICH_VTR_PRI_BITS_SHIFT 29 +#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT) +#define ICH_VTR_ID_BITS_SHIFT 23 +#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT) +#define ICH_VTR_SEIS_SHIFT 22 +#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) +#define ICH_VTR_A3V_SHIFT 21 +#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) #define ICC_IAR1_EL1_SPURIOUS 0x3ff diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 188eced6813e..9f3616085423 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -524,6 +524,10 @@ static inline struct irq_domain *irq_find_matching_fwnode( { return NULL; } +static inline bool irq_domain_check_msi_remap(void) +{ + return false; +} #endif /* !CONFIG_IRQ_DOMAIN */ #endif /* _LINUX_IRQDOMAIN_H */ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index b63d6b7b0db0..2afd74b9d844 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -89,11 +89,24 @@ extern bool static_key_initialized; struct static_key { atomic_t enabled; -/* Set lsb bit to 1 if branch is default true, 0 ot */ - struct jump_entry *entries; -#ifdef CONFIG_MODULES - struct static_key_mod *next; -#endif +/* + * Note: + * To make anonymous unions work with old compilers, the static + * initialization of them requires brackets. This creates a dependency + * on the order of the struct with the initializers. If any fields + * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need + * to be modified. + * + * bit 0 => 1 if key is initially true + * 0 if initially false + * bit 1 => 1 if points to struct static_key_mod + * 0 if points to struct jump_entry + */ + union { + unsigned long type; + struct jump_entry *entries; + struct static_key_mod *next; + }; }; #else @@ -118,9 +131,10 @@ struct module; #ifdef HAVE_JUMP_LABEL -#define JUMP_TYPE_FALSE 0UL -#define JUMP_TYPE_TRUE 1UL -#define JUMP_TYPE_MASK 1UL +#define JUMP_TYPE_FALSE 0UL +#define JUMP_TYPE_TRUE 1UL +#define JUMP_TYPE_LINKED 2UL +#define JUMP_TYPE_MASK 3UL static __always_inline bool static_key_false(struct static_key *key) { @@ -159,10 +173,10 @@ extern void static_key_disable(struct static_key *key); */ #define STATIC_KEY_INIT_TRUE \ { .enabled = { 1 }, \ - .entries = (void *)JUMP_TYPE_TRUE } + { .entries = (void *)JUMP_TYPE_TRUE } } #define STATIC_KEY_INIT_FALSE \ { .enabled = { 0 }, \ - .entries = (void *)JUMP_TYPE_FALSE } + { .entries = (void *)JUMP_TYPE_FALSE } } #else /* !HAVE_JUMP_LABEL */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 820c0ad54a01..5734480c9590 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -1,12 +1,12 @@ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H -#include <linux/sched.h> #include <linux/types.h> struct kmem_cache; struct page; struct vm_struct; +struct task_struct; #ifdef CONFIG_KASAN @@ -19,6 +19,7 @@ extern unsigned char kasan_zero_page[PAGE_SIZE]; extern pte_t kasan_zero_pte[PTRS_PER_PTE]; extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; extern pud_t kasan_zero_pud[PTRS_PER_PUD]; +extern p4d_t kasan_zero_p4d[PTRS_PER_P4D]; void kasan_populate_zero_shadow(const void *shadow_start, const void *shadow_end); @@ -30,16 +31,10 @@ static inline void *kasan_mem_to_shadow(const void *addr) } /* Enable reporting bugs after kasan_disable_current() */ -static inline void kasan_enable_current(void) -{ - current->kasan_depth++; -} +extern void kasan_enable_current(void); /* Disable reporting bugs for current task */ -static inline void kasan_disable_current(void) -{ - current->kasan_depth--; -} +extern void kasan_disable_current(void); void kasan_unpoison_shadow(const void *address, size_t size); @@ -52,7 +47,7 @@ void kasan_free_pages(struct page *page, unsigned int order); void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags); void kasan_cache_shrink(struct kmem_cache *cache); -void kasan_cache_destroy(struct kmem_cache *cache); +void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_poison_slab(struct page *page); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); @@ -98,7 +93,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {} -static inline void kasan_cache_destroy(struct kmem_cache *cache) {} +static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index 8f2e059e4d45..4d748603e818 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -8,7 +8,7 @@ /* * The use of "&&" / "||" is limited in certain expressions. - * The followings enable to calculate "and" / "or" with macro expansion only. + * The following enable to calculate "and" / "or" with macro expansion only. */ #define __and(x, y) ___and(x, y) #define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cb09238f6d32..4c26dc3a8295 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -100,16 +100,18 @@ ) /* - * Divide positive or negative dividend by positive divisor and round - * to closest integer. Result is undefined for negative divisors and - * for negative dividends if the divisor variable type is unsigned. + * Divide positive or negative dividend by positive or negative divisor + * and round to closest integer. Result is undefined for negative + * divisors if he dividend variable type is unsigned and for negative + * dividends if the divisor variable type is unsigned. */ #define DIV_ROUND_CLOSEST(x, divisor)( \ { \ typeof(x) __x = x; \ typeof(divisor) __d = divisor; \ (((typeof(x))-1) > 0 || \ - ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ + ((typeof(divisor))-1) > 0 || \ + (((__x) > 0) == ((__d) > 0))) ? \ (((__x) + ((__d) / 2)) / (__d)) : \ (((__x) - ((__d) / 2)) / (__d)); \ } \ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 7056238fd9f5..a9b11b8d06f2 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -46,6 +46,7 @@ enum kernfs_node_flag { KERNFS_SUICIDAL = 0x0400, KERNFS_SUICIDED = 0x0800, KERNFS_EMPTY_DIR = 0x1000, + KERNFS_HAS_RELEASE = 0x2000, }; /* @flags for kernfs_create_root() */ @@ -175,6 +176,7 @@ struct kernfs_open_file { /* published fields */ struct kernfs_node *kn; struct file *file; + struct seq_file *seq_file; void *priv; /* private fields, do not use outside kernfs proper */ @@ -185,12 +187,20 @@ struct kernfs_open_file { char *prealloc_buf; size_t atomic_write_len; - bool mmapped; + bool mmapped:1; + bool released:1; const struct vm_operations_struct *vm_ops; }; struct kernfs_ops { /* + * Optional open/release methods. Both are called with + * @of->seq_file populated. + */ + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + + /* * Read is handled by either seq_file or raw_read(). * * If seq_show() is present, seq_file path is active. Other seq diff --git a/include/linux/key.h b/include/linux/key.h index 722914798f37..e45212f2777e 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -354,7 +354,10 @@ static inline bool key_is_instantiated(const struct key *key) !test_bit(KEY_FLAG_NEGATIVE, &key->flags); } -#define rcu_dereference_key(KEY) \ +#define dereference_key_rcu(KEY) \ + (rcu_dereference((KEY)->payload.rcu_data0)) + +#define dereference_key_locked(KEY) \ (rcu_dereference_protected((KEY)->payload.rcu_data0, \ rwsem_is_locked(&((struct key *)(KEY))->sem))) diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 1e032a1ddb3e..5d9a400af509 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -1,7 +1,8 @@ #ifndef _LINUX_KHUGEPAGED_H #define _LINUX_KHUGEPAGED_H -#include <linux/sched.h> /* MMF_VM_HUGEPAGE */ +#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern struct attribute_group khugepaged_attr_group; diff --git a/include/linux/kmod.h b/include/linux/kmod.h index fcfd2bf14d3f..c4e441e00db5 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -56,7 +56,7 @@ struct file; struct subprocess_info { struct work_struct work; struct completion *complete; - char *path; + const char *path; char **argv; char **envp; int wait; @@ -67,10 +67,11 @@ struct subprocess_info { }; extern int -call_usermodehelper(char *path, char **argv, char **envp, int wait); +call_usermodehelper(const char *path, char **argv, char **envp, int wait); extern struct subprocess_info * -call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask, +call_usermodehelper_setup(const char *path, char **argv, char **envp, + gfp_t gfp_mask, int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *), void *data); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 16ddfb8b304a..c328e4f7dcad 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -29,7 +29,7 @@ * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi * <prasanna@in.ibm.com> added function-return probes. */ -#include <linux/compiler.h> /* for __kprobes */ +#include <linux/compiler.h> #include <linux/linkage.h> #include <linux/list.h> #include <linux/notifier.h> @@ -40,9 +40,9 @@ #include <linux/rcupdate.h> #include <linux/mutex.h> #include <linux/ftrace.h> +#include <asm/kprobes.h> #ifdef CONFIG_KPROBES -#include <asm/kprobes.h> /* kprobe_status settings */ #define KPROBE_HIT_ACTIVE 0x00000001 @@ -51,6 +51,7 @@ #define KPROBE_HIT_SSDONE 0x00000008 #else /* CONFIG_KPROBES */ +#include <asm-generic/kprobes.h> typedef int kprobe_opcode_t; struct arch_specific_insn { int dummy; @@ -509,18 +510,4 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) } #endif -#ifdef CONFIG_KPROBES -/* - * Blacklist ganerating macro. Specify functions which is not probed - * by using this macro. - */ -#define __NOKPROBE_SYMBOL(fname) \ -static unsigned long __used \ - __attribute__((section("_kprobe_blacklist"))) \ - _kbl_addr_##fname = (unsigned long)fname; -#define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) -#else -#define NOKPROBE_SYMBOL(fname) -#endif - #endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 481c8c4627ca..e1cfda4bee58 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -12,6 +12,7 @@ #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/sched.h> +#include <linux/sched/coredump.h> struct stable_node; struct mem_cgroup; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1c5190dab2c1..2c14ad9809da 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -26,6 +26,7 @@ #include <linux/context_tracking.h> #include <linux/irqbypass.h> #include <linux/swait.h> +#include <linux/refcount.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -45,7 +46,6 @@ * include/linux/kvm_h. */ #define KVM_MEMSLOT_INVALID (1UL << 16) -#define KVM_MEMSLOT_INCOHERENT (1UL << 17) /* Two fragments for cross MMIO pages. */ #define KVM_MAX_MMIO_FRAGMENTS 2 @@ -222,7 +222,6 @@ struct kvm_vcpu { struct mutex mutex; struct kvm_run *run; - int fpu_active; int guest_fpu_loaded, guest_xcr0_loaded; struct swait_queue_head wq; struct pid *pid; @@ -403,7 +402,7 @@ struct kvm { #endif struct kvm_vm_stat stat; struct kvm_arch arch; - atomic_t users_count; + refcount_t users_count; #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; spinlock_t ring_lock; @@ -642,18 +641,18 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); -int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - void *data, unsigned long len); +int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); -int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - void *data, unsigned long len); -int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - void *data, int offset, unsigned long len); -int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa, unsigned long len); +int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); +int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, + void *data, int offset, unsigned long len); +int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, + gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 8458c5351e56..77e7af32543f 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -70,6 +70,8 @@ struct nd_cmd_desc { struct nd_interleave_set { u64 cookie; + /* compatibility with initial buggy Linux implementation */ + u64 altcookie; }; struct nd_mapping_desc { diff --git a/include/linux/list.h b/include/linux/list.h index d1039ecaf94f..ae537fa46216 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -527,6 +527,19 @@ static inline void list_splice_tail_init(struct list_head *list, pos = list_next_entry(pos, member)) /** + * list_for_each_entry_from_reverse - iterate backwards over list of given type + * from the current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate backwards over list of given type, continuing from current position. + */ +#define list_for_each_entry_from_reverse(pos, head, member) \ + for (; &pos->member != (head); \ + pos = list_prev_entry(pos, member)) + +/** * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index b01fe1009084..87ff4f58a2f0 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -29,6 +29,11 @@ struct hlist_nulls_node { ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls)) #define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_nulls_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \ + }) /** * ptr_is_a_nulls - Test if a ptr is a nulls * @ptr: ptr to be tested diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index c15373894a42..b37dee3acaba 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) static inline int nlm_compare_locks(const struct file_lock *fl1, const struct file_lock *fl2) { - return fl1->fl_pid == fl2->fl_pid + return file_inode(fl1->fl_file) == file_inode(fl2->fl_file) + && fl1->fl_pid == fl2->fl_pid && fl1->fl_owner == fl2->fl_owner && fl1->fl_start == fl2->fl_start && fl1->fl_end == fl2->fl_end diff --git a/include/linux/log2.h b/include/linux/log2.h index ef3d4f67118c..c373295f359f 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -16,12 +16,6 @@ #include <linux/bitops.h> /* - * deal with unrepresentable constant logarithms - */ -extern __attribute__((const, noreturn)) -int ____ilog2_NaN(void); - -/* * non-constant log of base 2 calculators * - the arch may override these in asm/bitops.h if they can be implemented * more efficiently than using fls() and fls64() @@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n) < 1 ? ____ilog2_NaN() : \ + (n) < 2 ? 0 : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ @@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ - (n) & (1ULL << 1) ? 1 : \ - (n) & (1ULL << 0) ? 0 : \ - ____ilog2_NaN() \ - ) : \ + 1 ) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ diff --git a/include/linux/lz4.h b/include/linux/lz4.h index 6b784c59f321..394e3d9213b8 100644 --- a/include/linux/lz4.h +++ b/include/linux/lz4.h @@ -1,87 +1,648 @@ -#ifndef __LZ4_H__ -#define __LZ4_H__ -/* - * LZ4 Kernel Interface +/* LZ4 Kernel Interface * * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> + * Copyright (C) 2016, Sven Schmidt <4sschmid@informatik.uni-hamburg.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. + * + * This file is based on the original header file + * for LZ4 - Fast LZ compression algorithm. + * + * LZ4 - Fast LZ compression algorithm + * Copyright (C) 2011-2016, Yann Collet. + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * You can contact the author at : + * - LZ4 homepage : http://www.lz4.org + * - LZ4 source repository : https://github.com/lz4/lz4 */ -#define LZ4_MEM_COMPRESS (16384) -#define LZ4HC_MEM_COMPRESS (262144 + (2 * sizeof(unsigned char *))) +#ifndef __LZ4_H__ +#define __LZ4_H__ + +#include <linux/types.h> +#include <linux/string.h> /* memset, memcpy */ + +/*-************************************************************************ + * CONSTANTS + **************************************************************************/ /* - * lz4_compressbound() - * Provides the maximum size that LZ4 may output in a "worst case" scenario - * (input data not compressible) + * LZ4_MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes + * (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ -static inline size_t lz4_compressbound(size_t isize) -{ - return isize + (isize / 255) + 16; -} +#define LZ4_MEMORY_USAGE 14 + +#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ +#define LZ4_COMPRESSBOUND(isize) (\ + (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \ + ? 0 \ + : (isize) + ((isize)/255) + 16) + +#define LZ4_ACCELERATION_DEFAULT 1 +#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) +#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) +#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) + +#define LZ4HC_MIN_CLEVEL 3 +#define LZ4HC_DEFAULT_CLEVEL 9 +#define LZ4HC_MAX_CLEVEL 16 + +#define LZ4HC_DICTIONARY_LOGSIZE 16 +#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE) +#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1) +#define LZ4HC_HASH_LOG (LZ4HC_DICTIONARY_LOGSIZE - 1) +#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG) +#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1) + +/*-************************************************************************ + * STREAMING CONSTANTS AND STRUCTURES + **************************************************************************/ +#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE - 3)) + 4) +#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long)) + +#define LZ4_STREAMHCSIZE 262192 +#define LZ4_STREAMHCSIZE_SIZET (262192 / sizeof(size_t)) + +#define LZ4_STREAMDECODESIZE_U64 4 +#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * \ + sizeof(unsigned long long)) /* - * lz4_compress() - * src : source address of the original data - * src_len : size of the original data - * dst : output buffer address of the compressed data - * This requires 'dst' of size LZ4_COMPRESSBOUND. - * dst_len : is the output size, which is returned after compress done - * workmem : address of the working memory. - * This requires 'workmem' of size LZ4_MEM_COMPRESS. - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer and workmem must be already allocated with - * the defined size. - */ -int lz4_compress(const unsigned char *src, size_t src_len, - unsigned char *dst, size_t *dst_len, void *wrkmem); - - /* - * lz4hc_compress() - * src : source address of the original data - * src_len : size of the original data - * dst : output buffer address of the compressed data - * This requires 'dst' of size LZ4_COMPRESSBOUND. - * dst_len : is the output size, which is returned after compress done - * workmem : address of the working memory. - * This requires 'workmem' of size LZ4HC_MEM_COMPRESS. - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer and workmem must be already allocated with - * the defined size. - */ -int lz4hc_compress(const unsigned char *src, size_t src_len, - unsigned char *dst, size_t *dst_len, void *wrkmem); + * LZ4_stream_t - information structure to track an LZ4 stream. + */ +typedef struct { + uint32_t hashTable[LZ4_HASH_SIZE_U32]; + uint32_t currentOffset; + uint32_t initCheck; + const uint8_t *dictionary; + uint8_t *bufferStart; + uint32_t dictSize; +} LZ4_stream_t_internal; +typedef union { + unsigned long long table[LZ4_STREAMSIZE_U64]; + LZ4_stream_t_internal internal_donotuse; +} LZ4_stream_t; /* - * lz4_decompress() - * src : source address of the compressed data - * src_len : is the input size, whcih is returned after decompress done - * dest : output buffer address of the decompressed data - * actual_dest_len: is the size of uncompressed data, supposing it's known - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer must be already allocated. - * slightly faster than lz4_decompress_unknownoutputsize() - */ -int lz4_decompress(const unsigned char *src, size_t *src_len, - unsigned char *dest, size_t actual_dest_len); + * LZ4_streamHC_t - information structure to track an LZ4HC stream. + */ +typedef struct { + unsigned int hashTable[LZ4HC_HASHTABLESIZE]; + unsigned short chainTable[LZ4HC_MAXD]; + /* next block to continue on current prefix */ + const unsigned char *end; + /* All index relative to this position */ + const unsigned char *base; + /* alternate base for extDict */ + const unsigned char *dictBase; + /* below that point, need extDict */ + unsigned int dictLimit; + /* below that point, no more dict */ + unsigned int lowLimit; + /* index from which to continue dict update */ + unsigned int nextToUpdate; + unsigned int compressionLevel; +} LZ4HC_CCtx_internal; +typedef union { + size_t table[LZ4_STREAMHCSIZE_SIZET]; + LZ4HC_CCtx_internal internal_donotuse; +} LZ4_streamHC_t; /* - * lz4_decompress_unknownoutputsize() - * src : source address of the compressed data - * src_len : is the input size, therefore the compressed size - * dest : output buffer address of the decompressed data - * dest_len: is the max size of the destination buffer, which is - * returned with actual size of decompressed data after - * decompress done - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer must be already allocated. - */ -int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len, - unsigned char *dest, size_t *dest_len); + * LZ4_streamDecode_t - information structure to track an + * LZ4 stream during decompression. + * + * init this structure using LZ4_setStreamDecode (or memset()) before first use + */ +typedef struct { + const uint8_t *externalDict; + size_t extDictSize; + const uint8_t *prefixEnd; + size_t prefixSize; +} LZ4_streamDecode_t_internal; +typedef union { + unsigned long long table[LZ4_STREAMDECODESIZE_U64]; + LZ4_streamDecode_t_internal internal_donotuse; +} LZ4_streamDecode_t; + +/*-************************************************************************ + * SIZE OF STATE + **************************************************************************/ +#define LZ4_MEM_COMPRESS LZ4_STREAMSIZE +#define LZ4HC_MEM_COMPRESS LZ4_STREAMHCSIZE + +/*-************************************************************************ + * Compression Functions + **************************************************************************/ + +/** + * LZ4_compressBound() - Max. output size in worst case szenarios + * @isize: Size of the input data + * + * Return: Max. size LZ4 may output in a "worst case" szenario + * (data not compressible) + */ +static inline int LZ4_compressBound(size_t isize) +{ + return LZ4_COMPRESSBOUND(isize); +} + +/** + * LZ4_compress_default() - Compress data from source to dest + * @source: source address of the original data + * @dest: output buffer address of the compressed data + * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxOutputSize: full or partial size of buffer 'dest' + * which must be already allocated + * @wrkmem: address of the working memory. + * This requires 'workmem' of LZ4_MEM_COMPRESS. + * + * Compresses 'sourceSize' bytes from buffer 'source' + * into already allocated 'dest' buffer of size 'maxOutputSize'. + * Compression is guaranteed to succeed if + * 'maxOutputSize' >= LZ4_compressBound(inputSize). + * It also runs faster, so it's a recommended setting. + * If the function cannot compress 'source' into a more limited 'dest' budget, + * compression stops *immediately*, and the function result is zero. + * As a consequence, 'dest' content is not valid. + * + * Return: Number of bytes written into buffer 'dest' + * (necessarily <= maxOutputSize) or 0 if compression fails + */ +int LZ4_compress_default(const char *source, char *dest, int inputSize, + int maxOutputSize, void *wrkmem); + +/** + * LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param + * @source: source address of the original data + * @dest: output buffer address of the compressed data + * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxOutputSize: full or partial size of buffer 'dest' + * which must be already allocated + * @acceleration: acceleration factor + * @wrkmem: address of the working memory. + * This requires 'workmem' of LZ4_MEM_COMPRESS. + * + * Same as LZ4_compress_default(), but allows to select an "acceleration" + * factor. The larger the acceleration value, the faster the algorithm, + * but also the lesser the compression. It's a trade-off. It can be fine tuned, + * with each successive value providing roughly +~3% to speed. + * An acceleration value of "1" is the same as regular LZ4_compress_default() + * Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1. + * + * Return: Number of bytes written into buffer 'dest' + * (necessarily <= maxOutputSize) or 0 if compression fails + */ +int LZ4_compress_fast(const char *source, char *dest, int inputSize, + int maxOutputSize, int acceleration, void *wrkmem); + +/** + * LZ4_compress_destSize() - Compress as much data as possible + * from source to dest + * @source: source address of the original data + * @dest: output buffer address of the compressed data + * @sourceSizePtr: will be modified to indicate how many bytes where read + * from 'source' to fill 'dest'. New value is necessarily <= old value. + * @targetDestSize: Size of buffer 'dest' which must be already allocated + * @wrkmem: address of the working memory. + * This requires 'workmem' of LZ4_MEM_COMPRESS. + * + * Reverse the logic, by compressing as much data as possible + * from 'source' buffer into already allocated buffer 'dest' + * of size 'targetDestSize'. + * This function either compresses the entire 'source' content into 'dest' + * if it's large enough, or fill 'dest' buffer completely with as much data as + * possible from 'source'. + * + * Return: Number of bytes written into 'dest' (necessarily <= targetDestSize) + * or 0 if compression fails + */ +int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr, + int targetDestSize, void *wrkmem); + +/*-************************************************************************ + * Decompression Functions + **************************************************************************/ + +/** + * LZ4_decompress_fast() - Decompresses data from 'source' into 'dest' + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * + * Decompresses data from 'source' into 'dest'. + * This function fully respect memory boundaries for properly formed + * compressed data. + * It is a bit faster than LZ4_decompress_safe(). + * However, it does not provide any protection against intentionally + * modified data stream (malicious input). + * Use this function in trusted environment only + * (data to decode comes from a trusted source). + * + * Return: number of bytes read from the source buffer + * or a negative result if decompression fails. + */ +int LZ4_decompress_fast(const char *source, char *dest, int originalSize); + +/** + * LZ4_decompress_safe() - Decompression protected against buffer overflow + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * + * Decompresses data fom 'source' into 'dest'. + * If the source stream is detected malformed, the function will + * stop decoding and return a negative result. + * This function is protected against buffer overflow exploits, + * including malicious data packets. It never writes outside output buffer, + * nor reads outside input buffer. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, + int maxDecompressedSize); + +/** + * LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize' + * at position 'source' into buffer 'dest' + * @source: source address of the compressed data + * @dest: output buffer address of the decompressed data which must be + * already allocated + * @compressedSize: is the precise full size of the compressed block. + * @targetOutputSize: the decompression operation will try + * to stop as soon as 'targetOutputSize' has been reached + * @maxDecompressedSize: is the size of destination buffer + * + * This function decompresses a compressed block of size 'compressedSize' + * at position 'source' into destination buffer 'dest' + * of size 'maxDecompressedSize'. + * The function tries to stop decompressing operation as soon as + * 'targetOutputSize' has been reached, reducing decompression time. + * This function never writes outside of output buffer, + * and never reads outside of input buffer. + * It is therefore protected against malicious data packets. + * + * Return: the number of bytes decoded in the destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + * + */ +int LZ4_decompress_safe_partial(const char *source, char *dest, + int compressedSize, int targetOutputSize, int maxDecompressedSize); + +/*-************************************************************************ + * LZ4 HC Compression + **************************************************************************/ + +/** + * LZ4_compress_HC() - Compress data from `src` into `dst`, using HC algorithm + * @src: source address of the original data + * @dst: output buffer address of the compressed data + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @dstCapacity: full or partial size of buffer 'dst', + * which must be already allocated + * @compressionLevel: Recommended values are between 4 and 9, although any + * value between 1 and LZ4HC_MAX_CLEVEL will work. + * Values >LZ4HC_MAX_CLEVEL behave the same as 16. + * @wrkmem: address of the working memory. + * This requires 'wrkmem' of size LZ4HC_MEM_COMPRESS. + * + * Compress data from 'src' into 'dst', using the more powerful + * but slower "HC" algorithm. Compression is guaranteed to succeed if + * `dstCapacity >= LZ4_compressBound(srcSize) + * + * Return : the number of bytes written into 'dst' or 0 if compression fails. + */ +int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity, + int compressionLevel, void *wrkmem); + +/** + * LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure + * @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure + * @compressionLevel: Recommended values are between 4 and 9, although any + * value between 1 and LZ4HC_MAX_CLEVEL will work. + * Values >LZ4HC_MAX_CLEVEL behave the same as 16. + * + * An LZ4_streamHC_t structure can be allocated once + * and re-used multiple times. + * Use this function to init an allocated `LZ4_streamHC_t` structure + * and start a new compression. + */ +void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel); + +/** + * LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC + * @streamHCPtr: pointer to the LZ4HC_stream_t + * @dictionary: dictionary to load + * @dictSize: size of dictionary + * + * Use this function to load a static dictionary into LZ4HC_stream. + * Any previous data will be forgotten, only 'dictionary' + * will remain in memory. + * Loading a size of 0 is allowed. + * + * Return : dictionary size, in bytes (necessarily <= 64 KB) + */ +int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary, + int dictSize); + +/** + * LZ4_compress_HC_continue() - Compress 'src' using data from previously + * compressed blocks as a dictionary using the HC algorithm + * @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure + * @src: source address of the original data + * @dst: output buffer address of the compressed data, + * which must be already allocated + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxDstSize: full or partial size of buffer 'dest' + * which must be already allocated + * + * These functions compress data in successive blocks of any size, using + * previous blocks as dictionary. One key assumption is that previous + * blocks (up to 64 KB) remain read-accessible while + * compressing next blocks. There is an exception for ring buffers, + * which can be smaller than 64 KB. + * Ring buffers scenario is automatically detected and handled by + * LZ4_compress_HC_continue(). + * Before starting compression, state must be properly initialized, + * using LZ4_resetStreamHC(). + * A first "fictional block" can then be designated as + * initial dictionary, using LZ4_loadDictHC() (Optional). + * Then, use LZ4_compress_HC_continue() + * to compress each successive block. Previous memory blocks + * (including initial dictionary when present) must remain accessible + * and unmodified during compression. + * 'dst' buffer should be sized to handle worst case scenarios, using + * LZ4_compressBound(), to ensure operation success. + * If, for any reason, previous data blocks can't be preserved unmodified + * in memory during next compression block, + * you must save it to a safer memory space, using LZ4_saveDictHC(). + * Return value of LZ4_saveDictHC() is the size of dictionary + * effectively saved into 'safeBuffer'. + * + * Return: Number of bytes written into buffer 'dst' or 0 if compression fails + */ +int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src, + char *dst, int srcSize, int maxDstSize); + +/** + * LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream + * @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure + * @safeBuffer: buffer to save dictionary to, must be already allocated + * @maxDictSize: size of 'safeBuffer' + * + * If previously compressed data block is not guaranteed + * to remain available at its memory location, + * save it into a safer place (char *safeBuffer). + * Note : you don't need to call LZ4_loadDictHC() afterwards, + * dictionary is immediately usable, you can therefore call + * LZ4_compress_HC_continue(). + * + * Return : saved dictionary size in bytes (necessarily <= maxDictSize), + * or 0 if error. + */ +int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer, + int maxDictSize); + +/*-********************************************* + * Streaming Compression Functions + ***********************************************/ + +/** + * LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure + * @LZ4_stream: pointer to the 'LZ4_stream_t' structure + * + * An LZ4_stream_t structure can be allocated once + * and re-used multiple times. + * Use this function to init an allocated `LZ4_stream_t` structure + * and start a new compression. + */ +void LZ4_resetStream(LZ4_stream_t *LZ4_stream); + +/** + * LZ4_loadDict() - Load a static dictionary into LZ4_stream + * @streamPtr: pointer to the LZ4_stream_t + * @dictionary: dictionary to load + * @dictSize: size of dictionary + * + * Use this function to load a static dictionary into LZ4_stream. + * Any previous data will be forgotten, only 'dictionary' + * will remain in memory. + * Loading a size of 0 is allowed. + * + * Return : dictionary size, in bytes (necessarily <= 64 KB) + */ +int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary, + int dictSize); + +/** + * LZ4_saveDict() - Save static dictionary from LZ4_stream + * @streamPtr: pointer to the 'LZ4_stream_t' structure + * @safeBuffer: buffer to save dictionary to, must be already allocated + * @dictSize: size of 'safeBuffer' + * + * If previously compressed data block is not guaranteed + * to remain available at its memory location, + * save it into a safer place (char *safeBuffer). + * Note : you don't need to call LZ4_loadDict() afterwards, + * dictionary is immediately usable, you can therefore call + * LZ4_compress_fast_continue(). + * + * Return : saved dictionary size in bytes (necessarily <= dictSize), + * or 0 if error. + */ +int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize); + +/** + * LZ4_compress_fast_continue() - Compress 'src' using data from previously + * compressed blocks as a dictionary + * @streamPtr: Pointer to the previous 'LZ4_stream_t' structure + * @src: source address of the original data + * @dst: output buffer address of the compressed data, + * which must be already allocated + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxDstSize: full or partial size of buffer 'dest' + * which must be already allocated + * @acceleration: acceleration factor + * + * Compress buffer content 'src', using data from previously compressed blocks + * as dictionary to improve compression ratio. + * Important : Previous data blocks are assumed to still + * be present and unmodified ! + * If maxDstSize >= LZ4_compressBound(srcSize), + * compression is guaranteed to succeed, and runs faster. + * + * Return: Number of bytes written into buffer 'dst' or 0 if compression fails + */ +int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src, + char *dst, int srcSize, int maxDstSize, int acceleration); + +/** + * LZ4_setStreamDecode() - Instruct where to find dictionary + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @dictionary: dictionary to use + * @dictSize: size of dictionary + * + * Use this function to instruct where to find the dictionary. + * Setting a size of 0 is allowed (same effect as reset). + * + * Return: 1 if OK, 0 if error + */ +int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, + const char *dictionary, int dictSize); + +/** + * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * + * These decoding function allows decompression of multiple blocks + * in "streaming" mode. + * Previously decoded blocks *must* remain available at the memory position + * where they were decoded (up to 64 KB) + * In the case of a ring buffers, decoding buffer must be either : + * - Exactly same size as encoding buffer, with same update rule + * (block boundaries at same positions) In which case, + * the decoding & encoding ring buffer can have any size, + * including very small ones ( < 64 KB). + * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * maxBlockSize is implementation dependent. + * It's the maximum size you intend to compress into a single block. + * In which case, encoding and decoding buffers do not need + * to be synchronized, and encoding ring buffer can have any size, + * including small ones ( < 64 KB). + * - _At least_ 64 KB + 8 bytes + maxBlockSize. + * In which case, encoding and decoding buffers do not need to be + * synchronized, and encoding ring buffer can have any size, + * including larger than decoding buffer. W + * Whenever these conditions are not possible, save the last 64KB of decoded + * data into a safe buffer, and indicate where it is saved + * using LZ4_setStreamDecode() + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, + const char *source, char *dest, int compressedSize, + int maxDecompressedSize); + +/** + * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * + * These decoding function allows decompression of multiple blocks + * in "streaming" mode. + * Previously decoded blocks *must* remain available at the memory position + * where they were decoded (up to 64 KB) + * In the case of a ring buffers, decoding buffer must be either : + * - Exactly same size as encoding buffer, with same update rule + * (block boundaries at same positions) In which case, + * the decoding & encoding ring buffer can have any size, + * including very small ones ( < 64 KB). + * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * maxBlockSize is implementation dependent. + * It's the maximum size you intend to compress into a single block. + * In which case, encoding and decoding buffers do not need + * to be synchronized, and encoding ring buffer can have any size, + * including small ones ( < 64 KB). + * - _At least_ 64 KB + 8 bytes + maxBlockSize. + * In which case, encoding and decoding buffers do not need to be + * synchronized, and encoding ring buffer can have any size, + * including larger than decoding buffer. W + * Whenever these conditions are not possible, save the last 64KB of decoded + * data into a safe buffer, and indicate where it is saved + * using LZ4_setStreamDecode() + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, + const char *source, char *dest, int originalSize); + +/** + * LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode() + * followed by LZ4_decompress_safe_continue() + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * @dictStart: pointer to the start of the dictionary in memory + * @dictSize: size of dictionary + * + * These decoding function works the same as + * a combination of LZ4_setStreamDecode() followed by + * LZ4_decompress_safe_continue() + * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe_usingDict(const char *source, char *dest, + int compressedSize, int maxDecompressedSize, const char *dictStart, + int dictSize); + +/** + * LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode() + * followed by LZ4_decompress_fast_continue() + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * @dictStart: pointer to the start of the dictionary in memory + * @dictSize: size of dictionary + * + * These decoding function works the same as + * a combination of LZ4_setStreamDecode() followed by + * LZ4_decompress_safe_continue() + * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_fast_usingDict(const char *source, char *dest, + int originalSize, const char *dictStart, int dictSize); + #endif diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index a57f0dfb6db7..4055cf8cc978 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -17,8 +17,15 @@ #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 #define MARVELL_PHY_ID_88E1540 0x01410eb0 +#define MARVELL_PHY_ID_88E1545 0x01410ea0 #define MARVELL_PHY_ID_88E3016 0x01410e60 +/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do + * not have a model ID. So the switch driver traps reads to the ID2 + * register and returns the switch family ID + */ +#define MARVELL_PHY_ID_88E6390 0x01410f90 + /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 #define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 diff --git a/include/linux/mdio.h b/include/linux/mdio.h index bf9d1d750693..ca08ab16ecdc 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -10,6 +10,7 @@ #define __LINUX_MDIO_H__ #include <uapi/linux/mdio.h> +#include <linux/mod_devicetable.h> struct mii_bus; @@ -29,6 +30,7 @@ struct mdio_device { const struct dev_pm_ops *pm_ops; struct mii_bus *bus; + char modalias[MDIO_NAME_SIZE]; int (*bus_match)(struct device *dev, struct device_driver *drv); void (*device_free)(struct mdio_device *mdiodev); @@ -71,6 +73,7 @@ int mdio_device_register(struct mdio_device *mdiodev); void mdio_device_remove(struct mdio_device *mdiodev); int mdio_driver_register(struct mdio_driver *drv); void mdio_driver_unregister(struct mdio_driver *drv); +int mdio_device_bus_match(struct device *dev, struct device_driver *drv); static inline bool mdio_phy_id_is_c45(int phy_id) { @@ -130,6 +133,10 @@ extern int mdio45_nway_restart(const struct mdio_if_info *mdio); extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, struct ethtool_cmd *ecmd, u32 npage_adv, u32 npage_lpa); +extern void +mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio, + struct ethtool_link_ksettings *cmd, + u32 npage_adv, u32 npage_lpa); /** * mdio45_ethtool_gset - get settings for ETHTOOL_GSET @@ -147,6 +154,23 @@ static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio, mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0); } +/** + * mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS + * @mdio: MDIO interface + * @cmd: Ethtool request structure + * + * Since the CSRs for auto-negotiation using next pages are not fully + * standardised, this function does not attempt to decode them. Use + * mdio45_ethtool_ksettings_get_npage() to specify advertisement bits + * from next pages. + */ +static inline void +mdio45_ethtool_ksettings_get(const struct mdio_if_info *mdio, + struct ethtool_link_ksettings *cmd) +{ + mdio45_ethtool_ksettings_get_npage(mdio, cmd, 0, 0); +} + extern int mdio_mii_ioctl(const struct mdio_if_info *mdio, struct mii_ioctl_data *mii_data, int cmd); @@ -244,7 +268,7 @@ bool mdiobus_is_registered_device(struct mii_bus *bus, int addr); struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr); /** - * module_mdio_driver() - Helper macro for registering mdio drivers + * mdio_module_driver() - Helper macro for registering mdio drivers * * Helper macro for MDIO drivers which do not do anything special in module * init/exit. Each module may only use this macro once, and calling it diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 5b759c9acf97..bdfc65af4152 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -42,6 +42,7 @@ struct memblock_type { unsigned long max; /* size of the allocated array */ phys_addr_t total_size; /* size of all regions */ struct memblock_region *regions; + char *name; }; struct memblock { @@ -203,6 +204,7 @@ int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn); void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, unsigned long *out_end_pfn, int *out_nid); +unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn); /** * for_each_mem_pfn_range - early memory pfn range iterator diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 254698856b8f..5af377303880 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -253,6 +253,7 @@ struct mem_cgroup { /* Index in the kmem_cache->memcg_params.memcg_caches array */ int kmemcg_id; enum memcg_kmem_state kmem_state; + struct list_head kmem_caches; #endif int last_scanned_node; @@ -829,6 +830,7 @@ void memcg_kmem_uncharge(struct page *page, int order); #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) extern struct static_key_false memcg_kmem_enabled_key; +extern struct workqueue_struct *memcg_kmem_cache_wq; extern int memcg_nr_cache_ids; void memcg_get_cache_ids(void); diff --git a/include/linux/memory.h b/include/linux/memory.h index 093607f90b91..b723a686fc10 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -109,9 +109,6 @@ extern void unregister_memory_notifier(struct notifier_block *nb); extern int register_memory_isolate_notifier(struct notifier_block *nb); extern void unregister_memory_isolate_notifier(struct notifier_block *nb); extern int register_new_memory(int, struct mem_section *); -extern int memory_block_change_state(struct memory_block *mem, - unsigned long to_state, - unsigned long from_state_req); #ifdef CONFIG_MEMORY_HOTREMOVE extern int unregister_memory_section(struct mem_section *); #endif diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h index 552cc1d61cc7..44412c9d26e1 100644 --- a/include/linux/mfd/abx500.h +++ b/include/linux/mfd/abx500.h @@ -45,7 +45,7 @@ enum abx500_adc_therm { * struct abx500_res_to_temp - defines one point in a temp to res curve. To * be used in battery packs that combines the identification resistor with a * NTC resistor. - * @temp: battery pack temperature in Celcius + * @temp: battery pack temperature in Celsius * @resist: NTC resistor net total resistance */ struct abx500_res_to_temp { diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h index 12a5b396921e..e63681eb6c62 100644 --- a/include/linux/mfd/abx500/ab8500-bm.h +++ b/include/linux/mfd/abx500/ab8500-bm.h @@ -279,7 +279,7 @@ enum bup_vch_sel { * struct res_to_temp - defines one point in a temp to res curve. To * be used in battery packs that combines the identification resistor with a * NTC resistor. - * @temp: battery pack temperature in Celcius + * @temp: battery pack temperature in Celsius * @resist: NTC resistor net total resistance */ struct res_to_temp { @@ -290,7 +290,7 @@ struct res_to_temp { /** * struct batres_vs_temp - defines one point in a temp vs battery internal * resistance curve. - * @temp: battery pack temperature in Celcius + * @temp: battery pack temperature in Celsius * @resist: battery internal reistance in mOhm */ struct batres_vs_temp { diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index f848ee86a339..0d9a1ff38393 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -235,10 +235,20 @@ enum axp20x_variants { #define AXP22X_BATLOW_THRES1 0xe6 /* AXP288 specific registers */ +#define AXP288_POWER_REASON 0x02 +#define AXP288_BC_GLOBAL 0x2c +#define AXP288_BC_VBUS_CNTL 0x2d +#define AXP288_BC_USB_STAT 0x2e +#define AXP288_BC_DET_STAT 0x2f #define AXP288_PMIC_ADC_H 0x56 #define AXP288_PMIC_ADC_L 0x57 +#define AXP288_TS_ADC_H 0x58 +#define AXP288_TS_ADC_L 0x59 +#define AXP288_GP_ADC_H 0x5a +#define AXP288_GP_ADC_L 0x5b #define AXP288_ADC_TS_PIN_CTRL 0x84 -#define AXP288_PMIC_ADC_EN 0x84 +#define AXP288_RT_BATT_V_H 0xa0 +#define AXP288_RT_BATT_V_L 0xa1 /* Fuel Gauge */ #define AXP288_FG_RDC1_REG 0xba @@ -515,14 +525,10 @@ enum axp809_irqs { AXP809_IRQ_GPIO0_INPUT, }; -#define AXP288_TS_ADC_H 0x58 -#define AXP288_TS_ADC_L 0x59 -#define AXP288_GP_ADC_H 0x5a -#define AXP288_GP_ADC_L 0x5b - struct axp20x_dev { struct device *dev; int irq; + unsigned long irq_flags; struct regmap *regmap; struct regmap_irq_chip_data *regmap_irqc; long variant; @@ -582,7 +588,7 @@ int axp20x_match_device(struct axp20x_dev *axp20x); int axp20x_device_probe(struct axp20x_dev *axp20x); /** - * axp20x_device_probe(): Remove a axp20x device + * axp20x_device_remove(): Remove a axp20x device * * @axp20x: axp20x device to remove * diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index f62043a75f43..7a01c94496f1 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -103,6 +103,7 @@ struct cros_ec_command { * @din_size: size of din buffer to allocate (zero to use static din) * @dout_size: size of dout buffer to allocate (zero to use static dout) * @wake_enabled: true if this device can wake the system from sleep + * @suspended: true if this device had been suspended * @cmd_xfer: send command to EC and get response * Returns the number of bytes received if the communication succeeded, but * that doesn't mean the EC was happy with the command. The caller @@ -136,6 +137,7 @@ struct cros_ec_device { int din_size; int dout_size; bool wake_enabled; + bool suspended; int (*cmd_xfer)(struct cros_ec_device *ec, struct cros_ec_command *msg); int (*pkt_xfer)(struct cros_ec_device *ec, diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 1683003603f3..f1ef6388c233 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -1441,7 +1441,8 @@ enum motionsensor_type { MOTIONSENSE_TYPE_PROX = 3, MOTIONSENSE_TYPE_LIGHT = 4, MOTIONSENSE_TYPE_ACTIVITY = 5, - MOTIONSENSE_TYPE_MAX + MOTIONSENSE_TYPE_BARO = 6, + MOTIONSENSE_TYPE_MAX, }; /* List of motion sensor locations. */ @@ -1839,18 +1840,69 @@ struct ec_response_tmp006_get_raw { * * Returns raw data for keyboard cols; see ec_response_mkbp_info.cols for * expected response size. + * + * NOTE: This has been superseded by EC_CMD_MKBP_GET_NEXT_EVENT. If you wish + * to obtain the instantaneous state, use EC_CMD_MKBP_INFO with the type + * EC_MKBP_INFO_CURRENT and event EC_MKBP_EVENT_KEY_MATRIX. */ #define EC_CMD_MKBP_STATE 0x60 -/* Provide information about the matrix : number of rows and columns */ +/* + * Provide information about various MKBP things. See enum ec_mkbp_info_type. + */ #define EC_CMD_MKBP_INFO 0x61 struct ec_response_mkbp_info { uint32_t rows; uint32_t cols; - uint8_t switches; + /* Formerly "switches", which was 0. */ + uint8_t reserved; } __packed; +struct ec_params_mkbp_info { + uint8_t info_type; + uint8_t event_type; +} __packed; + +enum ec_mkbp_info_type { + /* + * Info about the keyboard matrix: number of rows and columns. + * + * Returns struct ec_response_mkbp_info. + */ + EC_MKBP_INFO_KBD = 0, + + /* + * For buttons and switches, info about which specifically are + * supported. event_type must be set to one of the values in enum + * ec_mkbp_event. + * + * For EC_MKBP_EVENT_BUTTON and EC_MKBP_EVENT_SWITCH, returns a 4 byte + * bitmask indicating which buttons or switches are present. See the + * bit inidices below. + */ + EC_MKBP_INFO_SUPPORTED = 1, + + /* + * Instantaneous state of buttons and switches. + * + * event_type must be set to one of the values in enum ec_mkbp_event. + * + * For EC_MKBP_EVENT_KEY_MATRIX, returns uint8_t key_matrix[13] + * indicating the current state of the keyboard matrix. + * + * For EC_MKBP_EVENT_HOST_EVENT, return uint32_t host_event, the raw + * event state. + * + * For EC_MKBP_EVENT_BUTTON, returns uint32_t buttons, indicating the + * state of supported buttons. + * + * For EC_MKBP_EVENT_SWITCH, returns uint32_t switches, indicating the + * state of supported switches. + */ + EC_MKBP_INFO_CURRENT = 2, +}; + /* Simulate key press */ #define EC_CMD_MKBP_SIMULATE_KEY 0x62 @@ -1983,6 +2035,12 @@ enum ec_mkbp_event { /* New Sensor FIFO data. The event data is fifo_info structure. */ EC_MKBP_EVENT_SENSOR_FIFO = 2, + /* The state of the non-matrixed buttons have changed. */ + EC_MKBP_EVENT_BUTTON = 3, + + /* The state of the switches have changed. */ + EC_MKBP_EVENT_SWITCH = 4, + /* Number of MKBP events */ EC_MKBP_EVENT_COUNT, }; @@ -1992,6 +2050,9 @@ union ec_response_get_next_data { /* Unaligned */ uint32_t host_event; + + uint32_t buttons; + uint32_t switches; } __packed; struct ec_response_get_next_event { @@ -2000,6 +2061,16 @@ struct ec_response_get_next_event { union ec_response_get_next_data data; } __packed; +/* Bit indices for buttons and switches.*/ +/* Buttons */ +#define EC_MKBP_POWER_BUTTON 0 +#define EC_MKBP_VOL_UP 1 +#define EC_MKBP_VOL_DOWN 2 + +/* Switches */ +#define EC_MKBP_LID_OPEN 0 +#define EC_MKBP_TABLET_MODE 1 + /*****************************************************************************/ /* Temperature sensor commands */ @@ -2477,6 +2548,20 @@ struct ec_params_ext_power_current_limit { uint32_t limit; /* in mA */ } __packed; +/* Inform the EC when entering a sleep state */ +#define EC_CMD_HOST_SLEEP_EVENT 0xa9 + +enum host_sleep_event { + HOST_SLEEP_EVENT_S3_SUSPEND = 1, + HOST_SLEEP_EVENT_S3_RESUME = 2, + HOST_SLEEP_EVENT_S0IX_SUSPEND = 3, + HOST_SLEEP_EVENT_S0IX_RESUME = 4 +}; + +struct ec_params_host_sleep_event { + uint8_t sleep_event; +} __packed; + /*****************************************************************************/ /* Smart battery pass-through */ diff --git a/include/linux/mfd/motorola-cpcap.h b/include/linux/mfd/motorola-cpcap.h new file mode 100644 index 000000000000..b4031c2b2214 --- /dev/null +++ b/include/linux/mfd/motorola-cpcap.h @@ -0,0 +1,292 @@ +/* + * The register defines are based on earlier cpcap.h in Motorola Linux kernel + * tree. + * + * Copyright (C) 2007-2009 Motorola, Inc. + * + * Rewritten for the real register offsets instead of enumeration + * to make the defines usable with Linux kernel regmap support + * + * Copyright (C) 2016 Tony Lindgren <tony@atomide.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define CPCAP_VENDOR_ST 0 +#define CPCAP_VENDOR_TI 1 + +#define CPCAP_REVISION_MAJOR(r) (((r) >> 4) + 1) +#define CPCAP_REVISION_MINOR(r) ((r) & 0xf) + +#define CPCAP_REVISION_1_0 0x08 +#define CPCAP_REVISION_1_1 0x09 +#define CPCAP_REVISION_2_0 0x10 +#define CPCAP_REVISION_2_1 0x11 + +/* CPCAP registers */ +#define CPCAP_REG_INT1 0x0000 /* Interrupt 1 */ +#define CPCAP_REG_INT2 0x0004 /* Interrupt 2 */ +#define CPCAP_REG_INT3 0x0008 /* Interrupt 3 */ +#define CPCAP_REG_INT4 0x000c /* Interrupt 4 */ +#define CPCAP_REG_INTM1 0x0010 /* Interrupt Mask 1 */ +#define CPCAP_REG_INTM2 0x0014 /* Interrupt Mask 2 */ +#define CPCAP_REG_INTM3 0x0018 /* Interrupt Mask 3 */ +#define CPCAP_REG_INTM4 0x001c /* Interrupt Mask 4 */ +#define CPCAP_REG_INTS1 0x0020 /* Interrupt Sense 1 */ +#define CPCAP_REG_INTS2 0x0024 /* Interrupt Sense 2 */ +#define CPCAP_REG_INTS3 0x0028 /* Interrupt Sense 3 */ +#define CPCAP_REG_INTS4 0x002c /* Interrupt Sense 4 */ +#define CPCAP_REG_ASSIGN1 0x0030 /* Resource Assignment 1 */ +#define CPCAP_REG_ASSIGN2 0x0034 /* Resource Assignment 2 */ +#define CPCAP_REG_ASSIGN3 0x0038 /* Resource Assignment 3 */ +#define CPCAP_REG_ASSIGN4 0x003c /* Resource Assignment 4 */ +#define CPCAP_REG_ASSIGN5 0x0040 /* Resource Assignment 5 */ +#define CPCAP_REG_ASSIGN6 0x0044 /* Resource Assignment 6 */ +#define CPCAP_REG_VERSC1 0x0048 /* Version Control 1 */ +#define CPCAP_REG_VERSC2 0x004c /* Version Control 2 */ + +#define CPCAP_REG_MI1 0x0200 /* Macro Interrupt 1 */ +#define CPCAP_REG_MIM1 0x0204 /* Macro Interrupt Mask 1 */ +#define CPCAP_REG_MI2 0x0208 /* Macro Interrupt 2 */ +#define CPCAP_REG_MIM2 0x020c /* Macro Interrupt Mask 2 */ +#define CPCAP_REG_UCC1 0x0210 /* UC Control 1 */ +#define CPCAP_REG_UCC2 0x0214 /* UC Control 2 */ + +#define CPCAP_REG_PC1 0x021c /* Power Cut 1 */ +#define CPCAP_REG_PC2 0x0220 /* Power Cut 2 */ +#define CPCAP_REG_BPEOL 0x0224 /* BP and EOL */ +#define CPCAP_REG_PGC 0x0228 /* Power Gate and Control */ +#define CPCAP_REG_MT1 0x022c /* Memory Transfer 1 */ +#define CPCAP_REG_MT2 0x0230 /* Memory Transfer 2 */ +#define CPCAP_REG_MT3 0x0234 /* Memory Transfer 3 */ +#define CPCAP_REG_PF 0x0238 /* Print Format */ + +#define CPCAP_REG_SCC 0x0400 /* System Clock Control */ +#define CPCAP_REG_SW1 0x0404 /* Stop Watch 1 */ +#define CPCAP_REG_SW2 0x0408 /* Stop Watch 2 */ +#define CPCAP_REG_UCTM 0x040c /* UC Turbo Mode */ +#define CPCAP_REG_TOD1 0x0410 /* Time of Day 1 */ +#define CPCAP_REG_TOD2 0x0414 /* Time of Day 2 */ +#define CPCAP_REG_TODA1 0x0418 /* Time of Day Alarm 1 */ +#define CPCAP_REG_TODA2 0x041c /* Time of Day Alarm 2 */ +#define CPCAP_REG_DAY 0x0420 /* Day */ +#define CPCAP_REG_DAYA 0x0424 /* Day Alarm */ +#define CPCAP_REG_VAL1 0x0428 /* Validity 1 */ +#define CPCAP_REG_VAL2 0x042c /* Validity 2 */ + +#define CPCAP_REG_SDVSPLL 0x0600 /* Switcher DVS and PLL */ +#define CPCAP_REG_SI2CC1 0x0604 /* Switcher I2C Control 1 */ +#define CPCAP_REG_Si2CC2 0x0608 /* Switcher I2C Control 2 */ +#define CPCAP_REG_S1C1 0x060c /* Switcher 1 Control 1 */ +#define CPCAP_REG_S1C2 0x0610 /* Switcher 1 Control 2 */ +#define CPCAP_REG_S2C1 0x0614 /* Switcher 2 Control 1 */ +#define CPCAP_REG_S2C2 0x0618 /* Switcher 2 Control 2 */ +#define CPCAP_REG_S3C 0x061c /* Switcher 3 Control */ +#define CPCAP_REG_S4C1 0x0620 /* Switcher 4 Control 1 */ +#define CPCAP_REG_S4C2 0x0624 /* Switcher 4 Control 2 */ +#define CPCAP_REG_S5C 0x0628 /* Switcher 5 Control */ +#define CPCAP_REG_S6C 0x062c /* Switcher 6 Control */ +#define CPCAP_REG_VCAMC 0x0630 /* VCAM Control */ +#define CPCAP_REG_VCSIC 0x0634 /* VCSI Control */ +#define CPCAP_REG_VDACC 0x0638 /* VDAC Control */ +#define CPCAP_REG_VDIGC 0x063c /* VDIG Control */ +#define CPCAP_REG_VFUSEC 0x0640 /* VFUSE Control */ +#define CPCAP_REG_VHVIOC 0x0644 /* VHVIO Control */ +#define CPCAP_REG_VSDIOC 0x0648 /* VSDIO Control */ +#define CPCAP_REG_VPLLC 0x064c /* VPLL Control */ +#define CPCAP_REG_VRF1C 0x0650 /* VRF1 Control */ +#define CPCAP_REG_VRF2C 0x0654 /* VRF2 Control */ +#define CPCAP_REG_VRFREFC 0x0658 /* VRFREF Control */ +#define CPCAP_REG_VWLAN1C 0x065c /* VWLAN1 Control */ +#define CPCAP_REG_VWLAN2C 0x0660 /* VWLAN2 Control */ +#define CPCAP_REG_VSIMC 0x0664 /* VSIM Control */ +#define CPCAP_REG_VVIBC 0x0668 /* VVIB Control */ +#define CPCAP_REG_VUSBC 0x066c /* VUSB Control */ +#define CPCAP_REG_VUSBINT1C 0x0670 /* VUSBINT1 Control */ +#define CPCAP_REG_VUSBINT2C 0x0674 /* VUSBINT2 Control */ +#define CPCAP_REG_URT 0x0678 /* Useroff Regulator Trigger */ +#define CPCAP_REG_URM1 0x067c /* Useroff Regulator Mask 1 */ +#define CPCAP_REG_URM2 0x0680 /* Useroff Regulator Mask 2 */ + +#define CPCAP_REG_VAUDIOC 0x0800 /* VAUDIO Control */ +#define CPCAP_REG_CC 0x0804 /* Codec Control */ +#define CPCAP_REG_CDI 0x0808 /* Codec Digital Interface */ +#define CPCAP_REG_SDAC 0x080c /* Stereo DAC */ +#define CPCAP_REG_SDACDI 0x0810 /* Stereo DAC Digital Interface */ +#define CPCAP_REG_TXI 0x0814 /* TX Inputs */ +#define CPCAP_REG_TXMP 0x0818 /* TX MIC PGA's */ +#define CPCAP_REG_RXOA 0x081c /* RX Output Amplifiers */ +#define CPCAP_REG_RXVC 0x0820 /* RX Volume Control */ +#define CPCAP_REG_RXCOA 0x0824 /* RX Codec to Output Amps */ +#define CPCAP_REG_RXSDOA 0x0828 /* RX Stereo DAC to Output Amps */ +#define CPCAP_REG_RXEPOA 0x082c /* RX External PGA to Output Amps */ +#define CPCAP_REG_RXLL 0x0830 /* RX Low Latency */ +#define CPCAP_REG_A2LA 0x0834 /* A2 Loudspeaker Amplifier */ +#define CPCAP_REG_MIPIS1 0x0838 /* MIPI Slimbus 1 */ +#define CPCAP_REG_MIPIS2 0x083c /* MIPI Slimbus 2 */ +#define CPCAP_REG_MIPIS3 0x0840 /* MIPI Slimbus 3. */ +#define CPCAP_REG_LVAB 0x0844 /* LMR Volume and A4 Balanced. */ + +#define CPCAP_REG_CCC1 0x0a00 /* Coulomb Counter Control 1 */ +#define CPCAP_REG_CRM 0x0a04 /* Charger and Reverse Mode */ +#define CPCAP_REG_CCCC2 0x0a08 /* Coincell and Coulomb Ctr Ctrl 2 */ +#define CPCAP_REG_CCS1 0x0a0c /* Coulomb Counter Sample 1 */ +#define CPCAP_REG_CCS2 0x0a10 /* Coulomb Counter Sample 2 */ +#define CPCAP_REG_CCA1 0x0a14 /* Coulomb Counter Accumulator 1 */ +#define CPCAP_REG_CCA2 0x0a18 /* Coulomb Counter Accumulator 2 */ +#define CPCAP_REG_CCM 0x0a1c /* Coulomb Counter Mode */ +#define CPCAP_REG_CCO 0x0a20 /* Coulomb Counter Offset */ +#define CPCAP_REG_CCI 0x0a24 /* Coulomb Counter Integrator */ + +#define CPCAP_REG_ADCC1 0x0c00 /* A/D Converter Configuration 1 */ +#define CPCAP_REG_ADCC2 0x0c04 /* A/D Converter Configuration 2 */ +#define CPCAP_REG_ADCD0 0x0c08 /* A/D Converter Data 0 */ +#define CPCAP_REG_ADCD1 0x0c0c /* A/D Converter Data 1 */ +#define CPCAP_REG_ADCD2 0x0c10 /* A/D Converter Data 2 */ +#define CPCAP_REG_ADCD3 0x0c14 /* A/D Converter Data 3 */ +#define CPCAP_REG_ADCD4 0x0c18 /* A/D Converter Data 4 */ +#define CPCAP_REG_ADCD5 0x0c1c /* A/D Converter Data 5 */ +#define CPCAP_REG_ADCD6 0x0c20 /* A/D Converter Data 6 */ +#define CPCAP_REG_ADCD7 0x0c24 /* A/D Converter Data 7 */ +#define CPCAP_REG_ADCAL1 0x0c28 /* A/D Converter Calibration 1 */ +#define CPCAP_REG_ADCAL2 0x0c2c /* A/D Converter Calibration 2 */ + +#define CPCAP_REG_USBC1 0x0e00 /* USB Control 1 */ +#define CPCAP_REG_USBC2 0x0e04 /* USB Control 2 */ +#define CPCAP_REG_USBC3 0x0e08 /* USB Control 3 */ +#define CPCAP_REG_UVIDL 0x0e0c /* ULPI Vendor ID Low */ +#define CPCAP_REG_UVIDH 0x0e10 /* ULPI Vendor ID High */ +#define CPCAP_REG_UPIDL 0x0e14 /* ULPI Product ID Low */ +#define CPCAP_REG_UPIDH 0x0e18 /* ULPI Product ID High */ +#define CPCAP_REG_UFC1 0x0e1c /* ULPI Function Control 1 */ +#define CPCAP_REG_UFC2 0x0e20 /* ULPI Function Control 2 */ +#define CPCAP_REG_UFC3 0x0e24 /* ULPI Function Control 3 */ +#define CPCAP_REG_UIC1 0x0e28 /* ULPI Interface Control 1 */ +#define CPCAP_REG_UIC2 0x0e2c /* ULPI Interface Control 2 */ +#define CPCAP_REG_UIC3 0x0e30 /* ULPI Interface Control 3 */ +#define CPCAP_REG_USBOTG1 0x0e34 /* USB OTG Control 1 */ +#define CPCAP_REG_USBOTG2 0x0e38 /* USB OTG Control 2 */ +#define CPCAP_REG_USBOTG3 0x0e3c /* USB OTG Control 3 */ +#define CPCAP_REG_UIER1 0x0e40 /* USB Interrupt Enable Rising 1 */ +#define CPCAP_REG_UIER2 0x0e44 /* USB Interrupt Enable Rising 2 */ +#define CPCAP_REG_UIER3 0x0e48 /* USB Interrupt Enable Rising 3 */ +#define CPCAP_REG_UIEF1 0x0e4c /* USB Interrupt Enable Falling 1 */ +#define CPCAP_REG_UIEF2 0x0e50 /* USB Interrupt Enable Falling 1 */ +#define CPCAP_REG_UIEF3 0x0e54 /* USB Interrupt Enable Falling 1 */ +#define CPCAP_REG_UIS 0x0e58 /* USB Interrupt Status */ +#define CPCAP_REG_UIL 0x0e5c /* USB Interrupt Latch */ +#define CPCAP_REG_USBD 0x0e60 /* USB Debug */ +#define CPCAP_REG_SCR1 0x0e64 /* Scratch 1 */ +#define CPCAP_REG_SCR2 0x0e68 /* Scratch 2 */ +#define CPCAP_REG_SCR3 0x0e6c /* Scratch 3 */ + +#define CPCAP_REG_VMC 0x0eac /* Video Mux Control */ +#define CPCAP_REG_OWDC 0x0eb0 /* One Wire Device Control */ +#define CPCAP_REG_GPIO0 0x0eb4 /* GPIO 0 Control */ + +#define CPCAP_REG_GPIO1 0x0ebc /* GPIO 1 Control */ + +#define CPCAP_REG_GPIO2 0x0ec4 /* GPIO 2 Control */ + +#define CPCAP_REG_GPIO3 0x0ecc /* GPIO 3 Control */ + +#define CPCAP_REG_GPIO4 0x0ed4 /* GPIO 4 Control */ + +#define CPCAP_REG_GPIO5 0x0edc /* GPIO 5 Control */ + +#define CPCAP_REG_GPIO6 0x0ee4 /* GPIO 6 Control */ + +#define CPCAP_REG_MDLC 0x1000 /* Main Display Lighting Control */ +#define CPCAP_REG_KLC 0x1004 /* Keypad Lighting Control */ +#define CPCAP_REG_ADLC 0x1008 /* Aux Display Lighting Control */ +#define CPCAP_REG_REDC 0x100c /* Red Triode Control */ +#define CPCAP_REG_GREENC 0x1010 /* Green Triode Control */ +#define CPCAP_REG_BLUEC 0x1014 /* Blue Triode Control */ +#define CPCAP_REG_CFC 0x1018 /* Camera Flash Control */ +#define CPCAP_REG_ABC 0x101c /* Adaptive Boost Control */ +#define CPCAP_REG_BLEDC 0x1020 /* Bluetooth LED Control */ +#define CPCAP_REG_CLEDC 0x1024 /* Camera Privacy LED Control */ + +#define CPCAP_REG_OW1C 0x1200 /* One Wire 1 Command */ +#define CPCAP_REG_OW1D 0x1204 /* One Wire 1 Data */ +#define CPCAP_REG_OW1I 0x1208 /* One Wire 1 Interrupt */ +#define CPCAP_REG_OW1IE 0x120c /* One Wire 1 Interrupt Enable */ + +#define CPCAP_REG_OW1 0x1214 /* One Wire 1 Control */ + +#define CPCAP_REG_OW2C 0x1220 /* One Wire 2 Command */ +#define CPCAP_REG_OW2D 0x1224 /* One Wire 2 Data */ +#define CPCAP_REG_OW2I 0x1228 /* One Wire 2 Interrupt */ +#define CPCAP_REG_OW2IE 0x122c /* One Wire 2 Interrupt Enable */ + +#define CPCAP_REG_OW2 0x1234 /* One Wire 2 Control */ + +#define CPCAP_REG_OW3C 0x1240 /* One Wire 3 Command */ +#define CPCAP_REG_OW3D 0x1244 /* One Wire 3 Data */ +#define CPCAP_REG_OW3I 0x1248 /* One Wire 3 Interrupt */ +#define CPCAP_REG_OW3IE 0x124c /* One Wire 3 Interrupt Enable */ + +#define CPCAP_REG_OW3 0x1254 /* One Wire 3 Control */ +#define CPCAP_REG_GCAIC 0x1258 /* GCAI Clock Control */ +#define CPCAP_REG_GCAIM 0x125c /* GCAI GPIO Mode */ +#define CPCAP_REG_LGDIR 0x1260 /* LMR GCAI GPIO Direction */ +#define CPCAP_REG_LGPU 0x1264 /* LMR GCAI GPIO Pull-up */ +#define CPCAP_REG_LGPIN 0x1268 /* LMR GCAI GPIO Pin */ +#define CPCAP_REG_LGMASK 0x126c /* LMR GCAI GPIO Mask */ +#define CPCAP_REG_LDEB 0x1270 /* LMR Debounce Settings */ +#define CPCAP_REG_LGDET 0x1274 /* LMR GCAI Detach Detect */ +#define CPCAP_REG_LMISC 0x1278 /* LMR Misc Bits */ +#define CPCAP_REG_LMACE 0x127c /* LMR Mace IC Support */ + +#define CPCAP_REG_TEST 0x7c00 /* Test */ + +#define CPCAP_REG_ST_TEST1 0x7d08 /* ST Test1 */ + +#define CPCAP_REG_ST_TEST2 0x7d18 /* ST Test2 */ + +/* + * Helpers for child devices to check the revision and vendor. + * + * REVISIT: No documentation for the bits below, please update + * to use proper names for defines when available. + */ + +static inline int cpcap_get_revision(struct device *dev, + struct regmap *regmap, + u16 *revision) +{ + unsigned int val; + int ret; + + ret = regmap_read(regmap, CPCAP_REG_VERSC1, &val); + if (ret) { + dev_err(dev, "Could not read revision\n"); + + return ret; + } + + *revision = ((val >> 3) & 0x7) | ((val << 3) & 0x38); + + return 0; +} + +static inline int cpcap_get_vendor(struct device *dev, + struct regmap *regmap, + u16 *vendor) +{ + unsigned int val; + int ret; + + ret = regmap_read(regmap, CPCAP_REG_VERSC1, &val); + if (ret) { + dev_err(dev, "Could not read vendor\n"); + + return ret; + } + + *vendor = (val >> 6) & 0x7; + + return 0; +} diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h new file mode 100644 index 000000000000..d0300045f04a --- /dev/null +++ b/include/linux/mfd/stm32-timers.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) STMicroelectronics 2016 + * + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _LINUX_STM32_GPTIMER_H_ +#define _LINUX_STM32_GPTIMER_H_ + +#include <linux/clk.h> +#include <linux/regmap.h> + +#define TIM_CR1 0x00 /* Control Register 1 */ +#define TIM_CR2 0x04 /* Control Register 2 */ +#define TIM_SMCR 0x08 /* Slave mode control reg */ +#define TIM_DIER 0x0C /* DMA/interrupt register */ +#define TIM_SR 0x10 /* Status register */ +#define TIM_EGR 0x14 /* Event Generation Reg */ +#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */ +#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */ +#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */ +#define TIM_PSC 0x28 /* Prescaler */ +#define TIM_ARR 0x2c /* Auto-Reload Register */ +#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */ +#define TIM_CCR2 0x38 /* Capt/Comp Register 2 */ +#define TIM_CCR3 0x3C /* Capt/Comp Register 3 */ +#define TIM_CCR4 0x40 /* Capt/Comp Register 4 */ +#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */ + +#define TIM_CR1_CEN BIT(0) /* Counter Enable */ +#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */ +#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */ +#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */ +#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */ +#define TIM_DIER_UIE BIT(0) /* Update interrupt */ +#define TIM_SR_UIF BIT(0) /* Update interrupt flag */ +#define TIM_EGR_UG BIT(0) /* Update Generation */ +#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */ +#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */ +#define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */ +#define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */ +#define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */ +#define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */ +#define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */ +#define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */ +#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */ +#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12)) +#define TIM_BDTR_BKE BIT(12) /* Break input enable */ +#define TIM_BDTR_BKP BIT(13) /* Break input polarity */ +#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */ +#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */ +#define TIM_BDTR_BKF (BIT(16) | BIT(17) | BIT(18) | BIT(19)) +#define TIM_BDTR_BK2F (BIT(20) | BIT(21) | BIT(22) | BIT(23)) +#define TIM_BDTR_BK2E BIT(24) /* Break 2 input enable */ +#define TIM_BDTR_BK2P BIT(25) /* Break 2 input polarity */ + +#define MAX_TIM_PSC 0xFFFF +#define TIM_CR2_MMS_SHIFT 4 +#define TIM_SMCR_TS_SHIFT 4 +#define TIM_BDTR_BKF_MASK 0xF +#define TIM_BDTR_BKF_SHIFT 16 +#define TIM_BDTR_BK2F_SHIFT 20 + +struct stm32_timers { + struct clk *clk; + struct regmap *regmap; + u32 max_arr; +}; +#endif diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h index 6483a6fdce59..ffb21e79204d 100644 --- a/include/linux/mfd/tps65910.h +++ b/include/linux/mfd/tps65910.h @@ -134,6 +134,7 @@ /* RTC_CTRL_REG bitfields */ #define TPS65910_RTC_CTRL_STOP_RTC 0x01 /*0=stop, 1=run */ +#define TPS65910_RTC_CTRL_AUTO_COMP 0x04 #define TPS65910_RTC_CTRL_GET_TIME 0x40 /* RTC_STATUS_REG bitfields */ diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h index 27d7c95fd0da..504d54c71bdb 100644 --- a/include/linux/mic_bus.h +++ b/include/linux/mic_bus.h @@ -90,7 +90,7 @@ struct mbus_hw_ops { }; struct mbus_device * -mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, +mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct mbus_hw_ops *hw_ops, int index, void __iomem *mmio_va); void mbus_unregister_device(struct mbus_device *mbdev); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index ae8d475a9385..fa76b516fa47 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -37,7 +37,7 @@ extern int migrate_page(struct address_space *, struct page *, struct page *, enum migrate_mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); -extern bool isolate_movable_page(struct page *page, isolate_mode_t mode); +extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); extern int migrate_prep(void); @@ -56,6 +56,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason) { return -ENOSYS; } +static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) + { return -EBUSY; } static inline int migrate_prep(void) { return -ENOSYS; } static inline int migrate_prep_local(void) { return -ENOSYS; } diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index ed30d5d713e3..762b5fec3383 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -22,6 +22,7 @@ /*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ #define WATCHDOG_MINOR 130 /* Watchdog timer */ #define TEMP_MINOR 131 /* Temperature Sensor */ +#define APM_MINOR_DEV 134 #define RTC_MINOR 135 #define EFI_RTC_MINOR 136 /* EFI Time services */ #define VHCI_MINOR 137 @@ -31,6 +32,7 @@ #define SGI_MMTIMER 153 #define STORE_QUEUE_MINOR 155 /* unused */ #define I2O_MINOR 166 +#define HWRNG_MINOR 183 #define MICROCODE_MINOR 184 #define IRNET_MINOR 187 #define VFIO_MINOR 196 diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 1f3568694a57..7b74afcbbab2 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -308,7 +308,7 @@ int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index, int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx, struct ifla_vf_stats *vf_stats); u32 mlx4_comm_get_version(void); -int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); +int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos, __be16 proto); int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6533c16e27ad..7e66e4f62858 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1374,6 +1374,7 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port); int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); +int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu); int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, u8 promisc); int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time); @@ -1539,8 +1540,13 @@ enum mlx4_ptys_proto { MLX4_PTYS_EN = 1<<2, }; +enum mlx4_ptys_flags { + MLX4_PTYS_AN_DISABLE_CAP = 1 << 5, + MLX4_PTYS_AN_DISABLE_ADMIN = 1 << 6, +}; + struct mlx4_ptys_reg { - u8 resrvd1; + u8 flags; u8 local_port; u8 resrvd2; u8 proto_mask; diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index bd0e7075ea6d..a858bcb6220b 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -104,4 +104,14 @@ static inline u64 mlx4_mac_to_u64(u8 *addr) return mac; } +static inline void mlx4_u64_to_mac(u8 *addr, u64 mac) +{ + int i; + + for (i = ETH_ALEN; i > 0; i--) { + addr[i - 1] = mac & 0xFF; + mac >>= 8; + } +} + #endif /* MLX4_DRIVER_H */ diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 7c3c0d3aca37..95898847c7d4 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -42,13 +42,13 @@ struct mlx5_core_cq { int cqe_sz; __be32 *set_ci_db; __be32 *arm_db; + struct mlx5_uars_page *uar; atomic_t refcount; struct completion free; unsigned vector; unsigned int irqn; void (*comp) (struct mlx5_core_cq *); void (*event) (struct mlx5_core_cq *, enum mlx5_event); - struct mlx5_uar *uar; u32 cons_index; unsigned arm_sn; struct mlx5_rsc_debug *dbg; @@ -144,7 +144,6 @@ enum { static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, void __iomem *uar_page, - spinlock_t *doorbell_lock, u32 cons_index) { __be32 doorbell[2]; @@ -164,7 +163,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); doorbell[1] = cpu_to_be32(cq->cqn); - mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock); + mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL); } int mlx5_init_cq_table(struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 52b437431c6a..dd9a263ed368 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -67,10 +67,11 @@ /* insert a value to a struct */ #define MLX5_SET(typ, p, fld, v) do { \ + u32 _v = v; \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ - (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ + (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) @@ -212,10 +213,20 @@ enum { }; enum { - MLX5_BF_REGS_PER_PAGE = 4, - MLX5_MAX_UAR_PAGES = 1 << 8, - MLX5_NON_FP_BF_REGS_PER_PAGE = 2, - MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE, + MLX5_ADAPTER_PAGE_SHIFT = 12, + MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, +}; + +enum { + MLX5_BFREGS_PER_UAR = 4, + MLX5_MAX_UARS = 1 << 8, + MLX5_NON_FP_BFREGS_PER_UAR = 2, + MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR - + MLX5_NON_FP_BFREGS_PER_UAR, + MLX5_MAX_BFREGS = MLX5_MAX_UARS * + MLX5_NON_FP_BFREGS_PER_UAR, + MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, + MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE, }; enum { @@ -279,6 +290,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, + MLX5_EVENT_TYPE_PPS_EVENT = 0x25, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, @@ -389,11 +401,6 @@ enum { }; enum { - MLX5_ADAPTER_PAGE_SHIFT = 12, - MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, -}; - -enum { MLX5_CAP_OFF_CMDIF_CSUM = 46, }; @@ -534,7 +541,9 @@ struct mlx5_eqe_page_fault { __be16 wqe_index; u16 reserved2; __be16 packet_length; - u8 reserved3[12]; + __be32 token; + u8 reserved4[8]; + __be32 pftype_wq; } __packed wqe; struct { __be32 r_key; @@ -542,9 +551,9 @@ struct mlx5_eqe_page_fault { __be16 packet_length; __be32 rdma_op_len; __be64 rdma_va; + __be32 pftype_token; } __packed rdma; } __packed; - __be32 flags_qpn; } __packed; struct mlx5_eqe_vport_change { @@ -562,6 +571,22 @@ struct mlx5_eqe_port_module { u8 error_type; } __packed; +struct mlx5_eqe_pps { + u8 rsvd0[3]; + u8 pin; + u8 rsvd1[4]; + union { + struct { + __be32 time_sec; + __be32 time_nsec; + }; + struct { + __be64 time_stamp; + }; + }; + u8 rsvd2[12]; +} __packed; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -576,6 +601,7 @@ union ev_data { struct mlx5_eqe_page_fault page_fault; struct mlx5_eqe_vport_change vport_change; struct mlx5_eqe_port_module port_module; + struct mlx5_eqe_pps pps; } __packed; struct mlx5_eqe { @@ -945,38 +971,54 @@ enum mlx5_cap_type { MLX5_CAP_NUM }; +enum mlx5_pcam_reg_groups { + MLX5_PCAM_REGS_5000_TO_507F = 0x0, +}; + +enum mlx5_pcam_feature_groups { + MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0, +}; + +enum mlx5_mcam_reg_groups { + MLX5_MCAM_REGS_FIRST_128 = 0x0, +}; + +enum mlx5_mcam_feature_groups { + MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0, +}; + /* GET Dev Caps macros */ #define MLX5_CAP_GEN(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) + MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_GEN_MAX(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) + MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_ETH(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) + mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_ETH_MAX(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) + mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_ROCE(mdev, cap) \ - MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) + MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ROCE_MAX(mdev, cap) \ - MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) + MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ATOMIC(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) + MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) + MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_FLOWTABLE(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) @@ -998,11 +1040,11 @@ enum mlx5_cap_type { #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) @@ -1024,21 +1066,27 @@ enum mlx5_cap_type { #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) + mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP_ESW_MAX(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap) + mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP_ODP(mdev, cap)\ - MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) + MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ MLX5_GET(vector_calc_cap, \ - mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap) + mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) #define MLX5_CAP_QOS(mdev, cap)\ - MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap) + MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) + +#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ + MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) + +#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ + MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) enum { MLX5_CMD_STAT_OK = 0x0, @@ -1068,9 +1116,14 @@ enum { MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, + MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; +enum { + MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, +}; + static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) { if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h index afc78a3f4462..0787de28f2fc 100644 --- a/include/linux/mlx5/doorbell.h +++ b/include/linux/mlx5/doorbell.h @@ -68,10 +68,12 @@ static inline void mlx5_write64(__be32 val[2], void __iomem *dest, { unsigned long flags; - spin_lock_irqsave(doorbell_lock, flags); + if (doorbell_lock) + spin_lock_irqsave(doorbell_lock, flags); __raw_writel((__force u32) val[0], dest); __raw_writel((__force u32) val[1], dest + 4); - spin_unlock_irqrestore(doorbell_lock, flags); + if (doorbell_lock) + spin_unlock_irqrestore(doorbell_lock, flags); } #endif diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 735b36335f29..2fcff6b4503f 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -42,6 +42,7 @@ #include <linux/vmalloc.h> #include <linux/radix-tree.h> #include <linux/workqueue.h> +#include <linux/mempool.h> #include <linux/interrupt.h> #include <linux/mlx5/device.h> @@ -83,6 +84,7 @@ enum { MLX5_EQ_VEC_PAGES = 0, MLX5_EQ_VEC_CMD = 1, MLX5_EQ_VEC_ASYNC = 2, + MLX5_EQ_VEC_PFAULT = 3, MLX5_EQ_VEC_COMP_BASE, }; @@ -119,10 +121,15 @@ enum { MLX5_REG_PVLC = 0x500f, MLX5_REG_PCMR = 0x5041, MLX5_REG_PMLP = 0x5002, + MLX5_REG_PCAM = 0x507f, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, MLX5_REG_MLCR = 0x902b, + MLX5_REG_MPCNT = 0x9051, + MLX5_REG_MTPPS = 0x9053, + MLX5_REG_MTPPSE = 0x9054, + MLX5_REG_MCAM = 0x907f, }; enum mlx5_dcbx_oper_mode { @@ -170,6 +177,7 @@ enum mlx5_dev_event { MLX5_DEV_EVENT_PKEY_CHANGE, MLX5_DEV_EVENT_GUID_CHANGE, MLX5_DEV_EVENT_CLIENT_REREG, + MLX5_DEV_EVENT_PPS, }; enum mlx5_port_status { @@ -177,36 +185,26 @@ enum mlx5_port_status { MLX5_PORT_DOWN = 2, }; -struct mlx5_uuar_info { - struct mlx5_uar *uars; - int num_uars; - int num_low_latency_uuars; - unsigned long *bitmap; +enum mlx5_eq_type { + MLX5_EQ_TYPE_COMP, + MLX5_EQ_TYPE_ASYNC, +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + MLX5_EQ_TYPE_PF, +#endif +}; + +struct mlx5_bfreg_info { + u32 *sys_pages; + int num_low_latency_bfregs; unsigned int *count; - struct mlx5_bf *bfs; /* - * protect uuar allocation data structs + * protect bfreg allocation data structs */ struct mutex lock; u32 ver; -}; - -struct mlx5_bf { - void __iomem *reg; - void __iomem *regreg; - int buf_size; - struct mlx5_uar *uar; - unsigned long offset; - int need_lock; - /* protect blue flame buffer selection when needed - */ - spinlock_t lock; - - /* serialize 64 bit writes when done as two 32 bit accesses - */ - spinlock_t lock32; - int uuarn; + bool lib_uar_4k; + u32 num_sys_pages; }; struct mlx5_cmd_first { @@ -297,6 +295,7 @@ struct mlx5_port_caps { int gid_table_len; int pkey_table_len; u8 ext_port_cap; + bool has_smi; }; struct mlx5_cmd_mailbox { @@ -332,6 +331,14 @@ struct mlx5_eq_tasklet { spinlock_t lock; }; +struct mlx5_eq_pagefault { + struct work_struct work; + /* Pagefaults lock */ + spinlock_t lock; + struct workqueue_struct *wq; + mempool_t *pool; +}; + struct mlx5_eq { struct mlx5_core_dev *dev; __be32 __iomem *doorbell; @@ -345,7 +352,13 @@ struct mlx5_eq { struct list_head list; int index; struct mlx5_rsc_debug *dbg; - struct mlx5_eq_tasklet tasklet_ctx; + enum mlx5_eq_type type; + union { + struct mlx5_eq_tasklet tasklet_ctx; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + struct mlx5_eq_pagefault pf_ctx; +#endif + }; }; struct mlx5_core_psv { @@ -369,13 +382,21 @@ struct mlx5_core_sig_ctx { u32 sigerr_count; }; +enum { + MLX5_MKEY_MR = 1, + MLX5_MKEY_MW, +}; + struct mlx5_core_mkey { u64 iova; u64 size; u32 key; u32 pd; + u32 type; }; +#define MLX5_24BIT_MASK ((1 << 24) - 1) + enum mlx5_res_type { MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, @@ -410,20 +431,47 @@ struct mlx5_eq_table { struct mlx5_eq pages_eq; struct mlx5_eq async_eq; struct mlx5_eq cmd_eq; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + struct mlx5_eq pfault_eq; +#endif int num_comp_vectors; /* protect EQs list */ spinlock_t lock; }; -struct mlx5_uar { - u32 index; - struct list_head bf_list; - unsigned free_bf_bmap; - void __iomem *bf_map; +struct mlx5_uars_page { void __iomem *map; + bool wc; + u32 index; + struct list_head list; + unsigned int bfregs; + unsigned long *reg_bitmap; /* for non fast path bf regs */ + unsigned long *fp_bitmap; + unsigned int reg_avail; + unsigned int fp_avail; + struct kref ref_count; + struct mlx5_core_dev *mdev; +}; + +struct mlx5_bfreg_head { + /* protect blue flame registers allocations */ + struct mutex lock; + struct list_head list; +}; + +struct mlx5_bfreg_data { + struct mlx5_bfreg_head reg_head; + struct mlx5_bfreg_head wc_head; }; +struct mlx5_sq_bfreg { + void __iomem *map; + struct mlx5_uars_page *up; + bool wc; + u32 index; + unsigned int offset; +}; struct mlx5_core_health { struct health_buffer __iomem *health; @@ -496,6 +544,7 @@ struct mlx5_fc_stats { struct mlx5_eswitch; struct mlx5_lag; +struct mlx5_pagefault; struct mlx5_rl_entry { u32 rate; @@ -542,8 +591,6 @@ struct mlx5_priv { struct mlx5_eq_table eq_table; struct msix_entry *msix_arr; struct mlx5_irq_info *irq_info; - struct mlx5_uuar_info uuari; - MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); /* pages stuff */ struct workqueue_struct *pg_wq; @@ -600,6 +647,16 @@ struct mlx5_priv { struct mlx5_rl_table rl_table; struct mlx5_port_module_event_stats pme_stats; + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + void (*pfault)(struct mlx5_core_dev *dev, + void *context, + struct mlx5_pagefault *pfault); + void *pfault_ctx; + struct srcu_struct pfault_srcu; +#endif + struct mlx5_bfreg_data bfregs; + struct mlx5_uars_page *uar; }; enum mlx5_device_state { @@ -618,13 +675,56 @@ enum mlx5_pci_status { MLX5_PCI_STATUS_ENABLED, }; +enum mlx5_pagefault_type_flags { + MLX5_PFAULT_REQUESTOR = 1 << 0, + MLX5_PFAULT_WRITE = 1 << 1, + MLX5_PFAULT_RDMA = 1 << 2, +}; + +/* Contains the details of a pagefault. */ +struct mlx5_pagefault { + u32 bytes_committed; + u32 token; + u8 event_subtype; + u8 type; + union { + /* Initiator or send message responder pagefault details. */ + struct { + /* Received packet size, only valid for responders. */ + u32 packet_size; + /* + * Number of resource holding WQE, depends on type. + */ + u32 wq_num; + /* + * WQE index. Refers to either the send queue or + * receive queue, according to event_subtype. + */ + u16 wqe_index; + } wqe; + /* RDMA responder pagefault details */ + struct { + u32 r_key; + /* + * Received packet size, minimal size page fault + * resolution required for forward progress. + */ + u32 packet_size; + u32 rdma_op_len; + u64 rdma_va; + } rdma; + }; + + struct mlx5_eq *eq; + struct work_struct work; +}; + struct mlx5_td { struct list_head tirs_list; u32 tdn; }; struct mlx5e_resources { - struct mlx5_uar cq_uar; u32 pdn; struct mlx5_td td; struct mlx5_core_mkey mkey; @@ -639,8 +739,12 @@ struct mlx5_core_dev { char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; - u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; - u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + struct { + u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; + u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; + } caps; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; enum mlx5_device_state state; @@ -814,11 +918,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); -int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); -int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); -int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, - bool map_wc); -void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); @@ -878,15 +977,13 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); -#endif void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, - int nent, u64 mask, const char *name, struct mlx5_uar *uar); + int nent, u64 mask, const char *name, + enum mlx5_eq_type type); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev); @@ -925,12 +1022,19 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *odp_caps); int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out, size_t sz); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, + u32 wq_num, u8 type, int error); +#endif int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index); void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate); bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); +int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, + bool map_wc, bool fast_path); +void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); static inline int fw_initializing(struct mlx5_core_dev *dev) { @@ -958,7 +1062,10 @@ enum { }; enum { - MAX_MR_CACHE_ENTRIES = 16, + MAX_UMR_CACHE_ENTRY = 20, + MLX5_IMR_MTT_CACHE_ENTRY, + MLX5_IMR_KSM_CACHE_ENTRY, + MAX_MR_CACHE_ENTRIES }; enum { @@ -973,6 +1080,9 @@ struct mlx5_interface { void (*detach)(struct mlx5_core_dev *dev, void *context); void (*event)(struct mlx5_core_dev *dev, void *context, enum mlx5_dev_event event, unsigned long param); + void (*pfault)(struct mlx5_core_dev *dev, + void *context, + struct mlx5_pagefault *pfault); void * (*get_dev)(void *context); int protocol; struct list_head list; @@ -987,6 +1097,8 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); +struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); +void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); struct mlx5_profile { u64 mask; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index a852e9db6f0d..838242697541 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -328,7 +328,7 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits { u8 receive[0x1]; u8 write[0x1]; u8 read[0x1]; - u8 reserved_at_4[0x1]; + u8 atomic[0x1]; u8 srq_receive[0x1]; u8 reserved_at_6[0x1a]; }; @@ -365,8 +365,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 ip_protocol[0x8]; u8 ip_dscp[0x6]; u8 ip_ecn[0x2]; - u8 vlan_tag[0x1]; - u8 reserved_at_91[0x1]; + u8 cvlan_tag[0x1]; + u8 svlan_tag[0x1]; u8 frag[0x1]; u8 reserved_at_93[0x4]; u8 tcp_flags[0x9]; @@ -398,9 +398,11 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 inner_second_cfi[0x1]; u8 inner_second_vid[0xc]; - u8 outer_second_vlan_tag[0x1]; - u8 inner_second_vlan_tag[0x1]; - u8 reserved_at_62[0xe]; + u8 outer_second_cvlan_tag[0x1]; + u8 inner_second_cvlan_tag[0x1]; + u8 outer_second_svlan_tag[0x1]; + u8 inner_second_svlan_tag[0x1]; + u8 reserved_at_64[0xc]; u8 gre_protocol[0x10]; u8 gre_key_h[0x18]; @@ -545,7 +547,9 @@ struct mlx5_ifc_e_switch_cap_bits { struct mlx5_ifc_qos_cap_bits { u8 packet_pacing[0x1]; u8 esw_scheduling[0x1]; - u8 reserved_at_2[0x1e]; + u8 esw_bw_share[0x1]; + u8 esw_rate_limit[0x1]; + u8 reserved_at_4[0x1c]; u8 reserved_at_20[0x20]; @@ -573,7 +577,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 lro_cap[0x1]; u8 lro_psh_flag[0x1]; u8 lro_time_stamp[0x1]; - u8 reserved_at_5[0x3]; + u8 reserved_at_5[0x2]; + u8 wqe_vlan_insert[0x1]; u8 self_lb_en_modifiable[0x1]; u8 reserved_at_9[0x2]; u8 max_lso_cap[0x5]; @@ -782,11 +787,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_eq[0x4]; u8 max_indirection[0x8]; - u8 reserved_at_108[0x1]; + u8 fixed_buffer_size[0x1]; u8 log_max_mrw_sz[0x7]; u8 reserved_at_110[0x2]; u8 log_max_bsf_list_size[0x6]; - u8 reserved_at_118[0x2]; + u8 umr_extended_translation_offset[0x1]; + u8 null_mkey[0x1]; u8 log_max_klm_list_size[0x6]; u8 reserved_at_120[0xa]; @@ -799,10 +805,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_150[0xa]; u8 log_max_ra_res_qp[0x6]; - u8 pad_cap[0x1]; + u8 end_pad[0x1]; u8 cc_query_allowed[0x1]; u8 cc_modify_allowed[0x1]; - u8 reserved_at_163[0xd]; + u8 start_pad[0x1]; + u8 cache_line_128byte[0x1]; + u8 reserved_at_163[0xb]; u8 gid_table_size[0x10]; u8 out_of_seq_cnt[0x1]; @@ -823,18 +831,21 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 nic_flow_table[0x1]; u8 eswitch_flow_table[0x1]; u8 early_vf_enable[0x1]; - u8 reserved_at_1a9[0x2]; + u8 mcam_reg[0x1]; + u8 pcam_reg[0x1]; u8 local_ca_ack_delay[0x5]; u8 port_module_event[0x1]; - u8 reserved_at_1b0[0x1]; + u8 reserved_at_1b1[0x1]; u8 ports_check[0x1]; - u8 reserved_at_1b2[0x1]; + u8 reserved_at_1b3[0x1]; u8 disable_link_up[0x1]; u8 beacon_led[0x1]; u8 port_type[0x2]; u8 num_ports[0x8]; - u8 reserved_at_1c0[0x3]; + u8 reserved_at_1c0[0x1]; + u8 pps[0x1]; + u8 pps_modify[0x1]; u8 log_max_msg[0x5]; u8 reserved_at_1c8[0x4]; u8 max_tc[0x4]; @@ -858,7 +869,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 compact_address_vector[0x1]; u8 striding_rq[0x1]; - u8 reserved_at_201[0x2]; + u8 reserved_at_202[0x2]; u8 ipoib_basic_offloads[0x1]; u8 reserved_at_205[0xa]; u8 drain_sigerr[0x1]; @@ -904,7 +915,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 uc[0x1]; u8 rc[0x1]; - u8 reserved_at_240[0xa]; + u8 uar_4k[0x1]; + u8 reserved_at_241[0x9]; u8 uar_sz[0x6]; u8 reserved_at_250[0x8]; u8 log_pg_sz[0x8]; @@ -996,7 +1008,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 device_frequency_mhz[0x20]; u8 device_frequency_khz[0x20]; - u8 reserved_at_500[0x80]; + u8 reserved_at_500[0x20]; + u8 num_of_uars_per_page[0x20]; + u8 reserved_at_540[0x40]; u8 reserved_at_580[0x3f]; u8 cqe_compression[0x1]; @@ -1009,10 +1023,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 rndv_offload_rc[0x1]; u8 rndv_offload_dc[0x1]; u8 log_tag_matching_list_sz[0x5]; - u8 reserved_at_5e8[0x3]; + u8 reserved_at_5f8[0x3]; u8 log_max_xrq[0x5]; - u8 reserved_at_5f0[0x200]; + u8 reserved_at_600[0x200]; }; enum mlx5_flow_destination_type { @@ -1375,6 +1389,42 @@ struct mlx5_ifc_phys_layer_cntrs_bits { u8 reserved_at_640[0x180]; }; +struct mlx5_ifc_phys_layer_statistical_cntrs_bits { + u8 time_since_last_clear_high[0x20]; + + u8 time_since_last_clear_low[0x20]; + + u8 phy_received_bits_high[0x20]; + + u8 phy_received_bits_low[0x20]; + + u8 phy_symbol_errors_high[0x20]; + + u8 phy_symbol_errors_low[0x20]; + + u8 phy_corrected_bits_high[0x20]; + + u8 phy_corrected_bits_low[0x20]; + + u8 phy_corrected_bits_lane0_high[0x20]; + + u8 phy_corrected_bits_lane0_low[0x20]; + + u8 phy_corrected_bits_lane1_high[0x20]; + + u8 phy_corrected_bits_lane1_low[0x20]; + + u8 phy_corrected_bits_lane2_high[0x20]; + + u8 phy_corrected_bits_lane2_low[0x20]; + + u8 phy_corrected_bits_lane3_high[0x20]; + + u8 phy_corrected_bits_lane3_low[0x20]; + + u8 reserved_at_200[0x5c0]; +}; + struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { u8 symbol_error_counter[0x10]; @@ -1757,6 +1807,30 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { u8 reserved_at_4c0[0x300]; }; +struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits { + u8 life_time_counter_high[0x20]; + + u8 life_time_counter_low[0x20]; + + u8 rx_errors[0x20]; + + u8 tx_errors[0x20]; + + u8 l0_to_recovery_eieos[0x20]; + + u8 l0_to_recovery_ts[0x20]; + + u8 l0_to_recovery_framing[0x20]; + + u8 l0_to_recovery_retrain[0x20]; + + u8 crc_error_dllp[0x20]; + + u8 crc_error_tlp[0x20]; + + u8 reserved_at_140[0x680]; +}; + struct mlx5_ifc_cmd_inter_comp_event_bits { u8 command_completion_vector[0x20]; @@ -2495,6 +2569,7 @@ enum { MLX5_MKC_ACCESS_MODE_PA = 0x0, MLX5_MKC_ACCESS_MODE_MTT = 0x1, MLX5_MKC_ACCESS_MODE_KLMS = 0x2, + MLX5_MKC_ACCESS_MODE_KSM = 0x3, }; struct mlx5_ifc_mkc_bits { @@ -2918,6 +2993,12 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; + struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs; + u8 reserved_at_0[0x7c0]; +}; + +union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits { + struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout; u8 reserved_at_0[0x7c0]; }; @@ -3597,6 +3678,10 @@ struct mlx5_ifc_query_special_contexts_out_bits { u8 dump_fill_mkey[0x20]; u8 resd_lkey[0x20]; + + u8 null_mkey[0x20]; + + u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_query_special_contexts_in_bits { @@ -4689,12 +4774,11 @@ struct mlx5_ifc_page_fault_resume_in_bits { u8 error[0x1]; u8 reserved_at_41[0x4]; - u8 rdma[0x1]; - u8 read_write[0x1]; - u8 req_res[0x1]; - u8 qpn[0x18]; + u8 page_fault_type[0x3]; + u8 wq_number[0x18]; - u8 reserved_at_60[0x20]; + u8 reserved_at_60[0x8]; + u8 token[0x18]; }; struct mlx5_ifc_nop_out_bits { @@ -4929,7 +5013,7 @@ struct mlx5_ifc_modify_rq_out_bits { enum { MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1, - MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3, }; struct mlx5_ifc_modify_rq_in_bits { @@ -7240,6 +7324,18 @@ struct mlx5_ifc_ppcnt_reg_bits { union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; }; +struct mlx5_ifc_mpcnt_reg_bits { + u8 reserved_at_0[0x8]; + u8 pcie_index[0x8]; + u8 reserved_at_10[0xa]; + u8 grp[0x6]; + + u8 clr[0x1]; + u8 reserved_at_21[0x1f]; + + union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set; +}; + struct mlx5_ifc_ppad_reg_bits { u8 reserved_at_0[0x3]; u8 single_mac[0x1]; @@ -7469,6 +7565,63 @@ struct mlx5_ifc_peir_reg_bits { u8 error_type[0x8]; }; +struct mlx5_ifc_pcam_enhanced_features_bits { + u8 reserved_at_0[0x7e]; + + u8 ppcnt_discard_group[0x1]; + u8 ppcnt_statistical_group[0x1]; +}; + +struct mlx5_ifc_pcam_reg_bits { + u8 reserved_at_0[0x8]; + u8 feature_group[0x8]; + u8 reserved_at_10[0x8]; + u8 access_reg_group[0x8]; + + u8 reserved_at_20[0x20]; + + union { + u8 reserved_at_0[0x80]; + } port_access_reg_cap_mask; + + u8 reserved_at_c0[0x80]; + + union { + struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features; + u8 reserved_at_0[0x80]; + } feature_cap_mask; + + u8 reserved_at_1c0[0xc0]; +}; + +struct mlx5_ifc_mcam_enhanced_features_bits { + u8 reserved_at_0[0x7f]; + + u8 pcie_performance_group[0x1]; +}; + +struct mlx5_ifc_mcam_reg_bits { + u8 reserved_at_0[0x8]; + u8 feature_group[0x8]; + u8 reserved_at_10[0x8]; + u8 access_reg_group[0x8]; + + u8 reserved_at_20[0x20]; + + union { + u8 reserved_at_0[0x80]; + } mng_access_reg_cap_mask; + + u8 reserved_at_c0[0x80]; + + union { + struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features; + u8 reserved_at_0[0x80]; + } mng_feature_cap_mask; + + u8 reserved_at_1c0[0x80]; +}; + struct mlx5_ifc_pcap_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; @@ -7813,6 +7966,60 @@ struct mlx5_ifc_initial_seg_bits { u8 reserved_at_80a0[0x17fc0]; }; +struct mlx5_ifc_mtpps_reg_bits { + u8 reserved_at_0[0xc]; + u8 cap_number_of_pps_pins[0x4]; + u8 reserved_at_10[0x4]; + u8 cap_max_num_of_pps_in_pins[0x4]; + u8 reserved_at_18[0x4]; + u8 cap_max_num_of_pps_out_pins[0x4]; + + u8 reserved_at_20[0x24]; + u8 cap_pin_3_mode[0x4]; + u8 reserved_at_48[0x4]; + u8 cap_pin_2_mode[0x4]; + u8 reserved_at_50[0x4]; + u8 cap_pin_1_mode[0x4]; + u8 reserved_at_58[0x4]; + u8 cap_pin_0_mode[0x4]; + + u8 reserved_at_60[0x4]; + u8 cap_pin_7_mode[0x4]; + u8 reserved_at_68[0x4]; + u8 cap_pin_6_mode[0x4]; + u8 reserved_at_70[0x4]; + u8 cap_pin_5_mode[0x4]; + u8 reserved_at_78[0x4]; + u8 cap_pin_4_mode[0x4]; + + u8 reserved_at_80[0x80]; + + u8 enable[0x1]; + u8 reserved_at_101[0xb]; + u8 pattern[0x4]; + u8 reserved_at_110[0x4]; + u8 pin_mode[0x4]; + u8 pin[0x8]; + + u8 reserved_at_120[0x20]; + + u8 time_stamp[0x40]; + + u8 out_pulse_duration[0x10]; + u8 out_periodic_adjustment[0x10]; + + u8 reserved_at_1a0[0x60]; +}; + +struct mlx5_ifc_mtppse_reg_bits { + u8 reserved_at_0[0x18]; + u8 pin[0x8]; + u8 event_arm[0x1]; + u8 reserved_at_21[0x1b]; + u8 event_generation_mode[0x4]; + u8 reserved_at_40[0x40]; +}; + union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; @@ -7845,6 +8052,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pmtu_reg_bits pmtu_reg; struct mlx5_ifc_ppad_reg_bits ppad_reg; struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; + struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg; struct mlx5_ifc_pplm_reg_bits pplm_reg; struct mlx5_ifc_pplr_reg_bits pplr_reg; struct mlx5_ifc_ppsc_reg_bits ppsc_reg; @@ -7857,6 +8065,8 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pvlc_reg_bits pvlc_reg; struct mlx5_ifc_slrg_reg_bits slrg_reg; struct mlx5_ifc_sltp_reg_bits sltp_reg; + struct mlx5_ifc_mtpps_reg_bits mtpps_reg; + struct mlx5_ifc_mtppse_reg_bits mtppse_reg; u8 reserved_at_0[0x60e0]; }; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 0aacb2a7480d..3096370fe831 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -50,9 +50,6 @@ #define MLX5_BSF_APPTAG_ESCAPE 0x1 #define MLX5_BSF_APPREF_ESCAPE 0x2 -#define MLX5_QPN_BITS 24 -#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1) - enum mlx5_qp_optpar { MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MLX5_QP_OPTPAR_RRE = 1 << 1, @@ -215,6 +212,7 @@ struct mlx5_wqe_ctrl_seg { #define MLX5_WQE_CTRL_OPCODE_MASK 0xff #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 +#define MLX5_WQE_AV_EXT 0x80000000 enum { MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, @@ -223,14 +221,26 @@ enum { MLX5_ETH_WQE_L4_CSUM = 1 << 7, }; +enum { + MLX5_ETH_WQE_INSERT_VLAN = 1 << 15, +}; + struct mlx5_wqe_eth_seg { u8 rsvd0[4]; u8 cs_flags; u8 rsvd1; __be16 mss; __be32 rsvd2; - __be16 inline_hdr_sz; - u8 inline_hdr_start[2]; + union { + struct { + __be16 sz; + u8 start[2]; + } inline_hdr; + struct { + __be16 type; + __be16 vlan_tci; + } insert; + }; }; struct mlx5_wqe_xrc_seg { @@ -245,6 +255,23 @@ struct mlx5_wqe_masked_atomic_seg { __be64 compare_mask; }; +struct mlx5_base_av { + union { + struct { + __be32 qkey; + __be32 reserved; + } qkey; + __be64 dc_key; + } key; + __be32 dqp_dct; + u8 stat_rate_sl; + u8 fl_mlid; + union { + __be16 rlid; + __be16 udp_sport; + }; +}; + struct mlx5_av { union { struct { @@ -292,10 +319,14 @@ struct mlx5_wqe_data_seg { struct mlx5_wqe_umr_ctrl_seg { u8 flags; u8 rsvd0[3]; - __be16 klm_octowords; - __be16 bsf_octowords; + __be16 xlt_octowords; + union { + __be16 xlt_offset; + __be16 bsf_octowords; + }; __be64 mkey_mask; - u8 rsvd1[32]; + __be32 xlt_offset_47_16; + u8 rsvd1[28]; }; struct mlx5_seg_set_psv { @@ -389,6 +420,10 @@ struct mlx5_bsf { struct mlx5_bsf_inl m_inl; }; +struct mlx5_mtt { + __be64 ptag; +}; + struct mlx5_klm { __be32 bcount; __be32 key; @@ -410,46 +445,9 @@ struct mlx5_stride_block_ctrl_seg { __be16 num_entries; }; -enum mlx5_pagefault_flags { - MLX5_PFAULT_REQUESTOR = 1 << 0, - MLX5_PFAULT_WRITE = 1 << 1, - MLX5_PFAULT_RDMA = 1 << 2, -}; - -/* Contains the details of a pagefault. */ -struct mlx5_pagefault { - u32 bytes_committed; - u8 event_subtype; - enum mlx5_pagefault_flags flags; - union { - /* Initiator or send message responder pagefault details. */ - struct { - /* Received packet size, only valid for responders. */ - u32 packet_size; - /* - * WQE index. Refers to either the send queue or - * receive queue, according to event_subtype. - */ - u16 wqe_index; - } wqe; - /* RDMA responder pagefault details */ - struct { - u32 r_key; - /* - * Received packet size, minimal size page fault - * resolution required for forward progress. - */ - u32 packet_size; - u32 rdma_op_len; - u64 rdma_va; - } rdma; - }; -}; - struct mlx5_core_qp { struct mlx5_core_rsc_common common; /* must be first */ void (*event) (struct mlx5_core_qp *, int); - void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *); int qpn; struct mlx5_rsc_debug *dbg; int pid; @@ -549,10 +547,6 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev); void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, - u8 context, int error); -#endif int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *rq); void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index ec35157ea725..656c70b65dd2 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -51,6 +51,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr); int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 *min_inline); +void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, diff --git a/include/linux/mm.h b/include/linux/mm.h index b84615b0f64c..5f01c88f0800 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -76,6 +76,10 @@ extern int mmap_rnd_compat_bits __read_mostly; #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) #endif +#ifndef lm_alias +#define lm_alias(x) __va(__pa_symbol(x)) +#endif + /* * To prevent common memory management code establishing * a zero page mapping on a read fault. @@ -281,6 +285,17 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ +#define FAULT_FLAG_TRACE \ + { FAULT_FLAG_WRITE, "WRITE" }, \ + { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ + { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ + { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ + { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ + { FAULT_FLAG_TRIED, "TRIED" }, \ + { FAULT_FLAG_USER, "USER" }, \ + { FAULT_FLAG_REMOTE, "REMOTE" }, \ + { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" } + /* * vm_fault is filled by the the pagefault handler and passed to the vma's * ->fault function. The vma's ->fault is responsible for returning a bitmask @@ -299,6 +314,9 @@ struct vm_fault { unsigned long address; /* Faulting virtual address */ pmd_t *pmd; /* Pointer to pmd entry matching * the 'address' */ + pud_t *pud; /* Pointer to pud entry matching + * the 'address' + */ pte_t orig_pte; /* Value of PTE at the time of fault */ struct page *cow_page; /* Page handler may use for COW fault */ @@ -326,6 +344,13 @@ struct vm_fault { */ }; +/* page entry size for vm->huge_fault() */ +enum page_entry_size { + PE_SIZE_PTE = 0, + PE_SIZE_PMD, + PE_SIZE_PUD, +}; + /* * These are the virtual MM functions - opening of an area, closing and * unmapping it (needed to keep files on disk up-to-date etc), pointer @@ -335,18 +360,17 @@ struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); int (*mremap)(struct vm_area_struct * area); - int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); - int (*pmd_fault)(struct vm_area_struct *, unsigned long address, - pmd_t *, unsigned int flags); + int (*fault)(struct vm_fault *vmf); + int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); void (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ - int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); + int (*page_mkwrite)(struct vm_fault *vmf); /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ - int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); + int (*pfn_mkwrite)(struct vm_fault *vmf); /* called by access_process_vm when get_user_pages() fails, typically * for use by special VMAs that can switch between memory and hardware @@ -402,6 +426,10 @@ static inline int pmd_devmap(pmd_t pmd) { return 0; } +static inline int pud_devmap(pud_t pud) +{ + return 0; +} #endif /* @@ -1107,6 +1135,20 @@ static inline void clear_page_pfmemalloc(struct page *page) VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ VM_FAULT_FALLBACK) +#define VM_FAULT_RESULT_TRACE \ + { VM_FAULT_OOM, "OOM" }, \ + { VM_FAULT_SIGBUS, "SIGBUS" }, \ + { VM_FAULT_MAJOR, "MAJOR" }, \ + { VM_FAULT_WRITE, "WRITE" }, \ + { VM_FAULT_HWPOISON, "HWPOISON" }, \ + { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \ + { VM_FAULT_SIGSEGV, "SIGSEGV" }, \ + { VM_FAULT_NOPAGE, "NOPAGE" }, \ + { VM_FAULT_LOCKED, "LOCKED" }, \ + { VM_FAULT_RETRY, "RETRY" }, \ + { VM_FAULT_FALLBACK, "FALLBACK" }, \ + { VM_FAULT_DONE_COW, "DONE_COW" } + /* Encode hstate index for a hwpoisoned large page */ #define VM_FAULT_SET_HINDEX(x) ((x) << 12) #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) @@ -1124,18 +1166,7 @@ extern void pagefault_out_of_memory(void); */ #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ -extern void show_free_areas(unsigned int flags); -extern bool skip_free_areas_node(unsigned int flags, int nid); - -int shmem_zero_setup(struct vm_area_struct *); -#ifdef CONFIG_SHMEM -bool shmem_mapping(struct address_space *mapping); -#else -static inline bool shmem_mapping(struct address_space *mapping) -{ - return false; -} -#endif +extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); extern bool can_do_mlock(void); extern int user_shm_lock(size_t, struct user_struct *); @@ -1148,8 +1179,6 @@ struct zap_details { struct address_space *check_mapping; /* Check page->mapping if set */ pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */ - bool ignore_dirty; /* Ignore dirty pages */ - bool check_swap_entries; /* Check also swap entries */ }; struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, @@ -1160,12 +1189,16 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); void zap_page_range(struct vm_area_struct *vma, unsigned long address, - unsigned long size, struct zap_details *); + unsigned long size); void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long start, unsigned long end); /** * mm_walk - callbacks for walk_page_range + * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry + * this handler should only handle pud_trans_huge() puds. + * the pmd_entry or pte_entry callbacks will be used for + * regular PUDs. * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry * this handler is required to be able to handle * pmd_trans_huge() pmds. They may simply choose to @@ -1185,6 +1218,8 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, * (see the comment on walk_page_range() for more details) */ struct mm_walk { + int (*pud_entry)(pud_t *pud, unsigned long addr, + unsigned long next, struct mm_walk *walk); int (*pmd_entry)(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk); int (*pte_entry)(pte_t *pte, unsigned long addr, @@ -1355,6 +1390,16 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma) return !vma->vm_ops; } +#ifdef CONFIG_SHMEM +/* + * The vma_is_shmem is not inline because it is used only by slow + * paths in userfault. + */ +bool vma_is_shmem(struct vm_area_struct *vma); +#else +static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } +#endif + static inline int stack_guard_page_start(struct vm_area_struct *vma, unsigned long addr) { @@ -1515,14 +1560,24 @@ static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, return ptep; } +#ifdef __PAGETABLE_P4D_FOLDED +static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, + unsigned long address) +{ + return 0; +} +#else +int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); +#endif + #ifdef __PAGETABLE_PUD_FOLDED -static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, +static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) { return 0; } #else -int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); +int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); #endif #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) @@ -1574,11 +1629,22 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); * Remove it when 4level-fixup.h has been removed. */ #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) -static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) + +#ifndef __ARCH_HAS_5LEVEL_HACK +static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, + unsigned long address) +{ + return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? + NULL : p4d_offset(pgd, address); +} + +static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, + unsigned long address) { - return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? - NULL: pud_offset(pgd, address); + return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? + NULL : pud_offset(p4d, address); } +#endif /* !__ARCH_HAS_5LEVEL_HACK */ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { @@ -1758,8 +1824,26 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) return ptl; } -extern void __init pagecache_init(void); +/* + * No scalability reason to split PUD locks yet, but follow the same pattern + * as the PMD locks to make it easier if we decide to. The VM should not be + * considered ready to switch to split PUD locks yet; there may be places + * which need to be converted from page_table_lock. + */ +static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) +{ + return &mm->page_table_lock; +} + +static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) +{ + spinlock_t *ptl = pud_lockptr(mm, pud); + + spin_lock(ptl); + return ptl; +} +extern void __init pagecache_init(void); extern void free_area_init(unsigned long * zones_size); extern void free_area_init_node(int nid, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size); @@ -1896,7 +1980,7 @@ extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); extern void __init mmap_init(void); -extern void show_mem(unsigned int flags); +extern void show_mem(unsigned int flags, nodemask_t *nodemask); extern long si_mem_available(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); @@ -1904,8 +1988,8 @@ extern void si_meminfo_node(struct sysinfo *val, int nid); extern unsigned long arch_reserved_kernel_pages(void); #endif -extern __printf(2, 3) -void warn_alloc(gfp_t gfp_mask, const char *fmt, ...); +extern __printf(3, 4) +void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); extern void setup_per_cpu_pageset(void); @@ -1968,8 +2052,10 @@ extern struct vm_area_struct *vma_merge(struct mm_struct *, unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx); extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); -extern int split_vma(struct mm_struct *, - struct vm_area_struct *, unsigned long addr, int new_below); +extern int __split_vma(struct mm_struct *, struct vm_area_struct *, + unsigned long addr, int new_below); +extern int split_vma(struct mm_struct *, struct vm_area_struct *, + unsigned long addr, int new_below); extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, struct rb_node **, struct rb_node *); @@ -2017,18 +2103,22 @@ extern int install_special_mapping(struct mm_struct *mm, extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, + struct list_head *uf); extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate); -extern int do_munmap(struct mm_struct *, unsigned long, size_t); + vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, + struct list_head *uf); +extern int do_munmap(struct mm_struct *, unsigned long, size_t, + struct list_head *uf); static inline unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - unsigned long pgoff, unsigned long *populate) + unsigned long pgoff, unsigned long *populate, + struct list_head *uf) { - return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate); + return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); } #ifdef CONFIG_MMU @@ -2045,6 +2135,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {} /* These take the mm semaphore themselves */ extern int __must_check vm_brk(unsigned long, unsigned long); +extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); extern int vm_munmap(unsigned long, size_t); extern unsigned long __must_check vm_mmap(struct file *, unsigned long, unsigned long, unsigned long, @@ -2088,10 +2179,10 @@ extern void truncate_inode_pages_range(struct address_space *, extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ -extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); +extern int filemap_fault(struct vm_fault *vmf); extern void filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); -extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); +extern int filemap_page_mkwrite(struct vm_fault *vmf); /* mm/page-writeback.c */ int write_one_page(struct page *page, int wait); @@ -2315,7 +2406,8 @@ void sparse_mem_maps_populate_node(struct page **map_map, struct page *sparse_mem_map_populate(unsigned long pnum, int nid); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); -pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); +p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); +pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); void *vmemmap_alloc_block(unsigned long size, int node); @@ -2396,6 +2488,10 @@ extern void clear_huge_page(struct page *page, extern void copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page); +extern long copy_huge_page_from_user(struct page *dst_page, + const void __user *usr_src, + unsigned int pages_per_huge_page, + bool allow_pagefault); #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ extern struct page_ext_operations debug_guardpage_ops; diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 41d376e7116d..e030a68ead7e 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -50,6 +50,13 @@ static __always_inline void add_page_to_lru_list(struct page *page, list_add(&page->lru, &lruvec->lists[lru]); } +static __always_inline void add_page_to_lru_list_tail(struct page *page, + struct lruvec *lruvec, enum lru_list lru) +{ + update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + list_add_tail(&page->lru, &lruvec->lists[lru]); +} + static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 808751d7b737..f60f45fe226f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1,9 +1,9 @@ #ifndef _LINUX_MM_TYPES_H #define _LINUX_MM_TYPES_H +#include <linux/mm_types_task.h> + #include <linux/auxvec.h> -#include <linux/types.h> -#include <linux/threads.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/rbtree.h> @@ -13,7 +13,7 @@ #include <linux/uprobes.h> #include <linux/page-flags-layout.h> #include <linux/workqueue.h> -#include <asm/page.h> + #include <asm/mmu.h> #ifndef AT_VECTOR_SIZE_ARCH @@ -24,11 +24,6 @@ struct address_space; struct mem_cgroup; -#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) -#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ - IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) -#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) - /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -231,17 +226,6 @@ struct page { #endif ; -struct page_frag { - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 offset; - __u32 size; -#else - __u16 offset; - __u16 size; -#endif -}; - #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) @@ -371,27 +355,6 @@ struct core_state { struct completion startup; }; -enum { - MM_FILEPAGES, /* Resident file mapping pages */ - MM_ANONPAGES, /* Resident anonymous pages */ - MM_SWAPENTS, /* Anonymous swap entries */ - MM_SHMEMPAGES, /* Resident shared memory pages */ - NR_MM_COUNTERS -}; - -#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) -#define SPLIT_RSS_COUNTING -/* per-thread cached information, */ -struct task_rss_stat { - int events; /* for synchronization threshold */ - int count[NR_MM_COUNTERS]; -}; -#endif /* USE_SPLIT_PTE_PTLOCKS */ - -struct mm_rss_stat { - atomic_long_t count[NR_MM_COUNTERS]; -}; - struct kioctx_table; struct mm_struct { struct vm_area_struct *mmap; /* list of VMAs */ @@ -407,8 +370,27 @@ struct mm_struct { unsigned long task_size; /* size of task vm space */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; - atomic_t mm_users; /* How many users with user space? */ - atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ + + /** + * @mm_users: The number of users including userspace. + * + * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops + * to 0 (i.e. when the task exits and there are no other temporary + * reference holders), we also release a reference on @mm_count + * (which may then free the &struct mm_struct if @mm_count also + * drops to 0). + */ + atomic_t mm_users; + + /** + * @mm_count: The number of references to &struct mm_struct + * (@mm_users count as 1). + * + * Use mmgrab()/mmdrop() to modify. When this drops to 0, the + * &struct mm_struct is freed. + */ + atomic_t mm_count; + atomic_long_t nr_ptes; /* PTE page table pages */ #if CONFIG_PGTABLE_LEVELS > 2 atomic_long_t nr_pmds; /* PMD page table pages */ @@ -515,6 +497,8 @@ struct mm_struct { struct work_struct async_put_work; }; +extern struct mm_struct init_mm; + static inline void mm_init_cpumask(struct mm_struct *mm) { #ifdef CONFIG_CPUMASK_OFFSTACK diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h new file mode 100644 index 000000000000..136dfdf63ba1 --- /dev/null +++ b/include/linux/mm_types_task.h @@ -0,0 +1,87 @@ +#ifndef _LINUX_MM_TYPES_TASK_H +#define _LINUX_MM_TYPES_TASK_H + +/* + * Here are the definitions of the MM data types that are embedded in 'struct task_struct'. + * + * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) + */ + +#include <linux/types.h> +#include <linux/threads.h> +#include <linux/atomic.h> +#include <linux/cpumask.h> + +#include <asm/page.h> + +#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) +#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ + IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) +#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) + +/* + * The per task VMA cache array: + */ +#define VMACACHE_BITS 2 +#define VMACACHE_SIZE (1U << VMACACHE_BITS) +#define VMACACHE_MASK (VMACACHE_SIZE - 1) + +struct vmacache { + u32 seqnum; + struct vm_area_struct *vmas[VMACACHE_SIZE]; +}; + +enum { + MM_FILEPAGES, /* Resident file mapping pages */ + MM_ANONPAGES, /* Resident anonymous pages */ + MM_SWAPENTS, /* Anonymous swap entries */ + MM_SHMEMPAGES, /* Resident shared memory pages */ + NR_MM_COUNTERS +}; + +#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) +#define SPLIT_RSS_COUNTING +/* per-thread cached information, */ +struct task_rss_stat { + int events; /* for synchronization threshold */ + int count[NR_MM_COUNTERS]; +}; +#endif /* USE_SPLIT_PTE_PTLOCKS */ + +struct mm_rss_stat { + atomic_long_t count[NR_MM_COUNTERS]; +}; + +struct page_frag { + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 offset; + __u32 size; +#else + __u16 offset; + __u16 size; +#endif +}; + +/* Track pages that require TLB flushes */ +struct tlbflush_unmap_batch { +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + /* + * Each bit set is a CPU that potentially has a TLB entry for one of + * the PFNs being flushed. See set_tlb_ubc_flush_pending(). + */ + struct cpumask cpumask; + + /* True if any bit in cpumask is set */ + bool flush_required; + + /* + * If true then the PTE was dirty when unmapped. The entry must be + * flushed before IO is initiated or a stale TLB entry potentially + * allows an update without redirtying the page. + */ + bool writable; +#endif +}; + +#endif /* _LINUX_MM_TYPES_TASK_H */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 46794a7a531c..b733eb404ffc 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -36,6 +36,7 @@ #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 +#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 #define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index a1a210d59961..51891fb0d3ce 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -381,6 +381,19 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) ___pmd; \ }) +#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \ +({ \ + unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \ + struct mm_struct *___mm = (__vma)->vm_mm; \ + pud_t ___pud; \ + \ + ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \ + mmu_notifier_invalidate_range(___mm, ___haddr, \ + ___haddr + HPAGE_PUD_SIZE); \ + \ + ___pud; \ +}) + #define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \ ({ \ unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ @@ -475,6 +488,7 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define pmdp_clear_young_notify pmdp_test_and_clear_young #define ptep_clear_flush_notify ptep_clear_flush #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush +#define pudp_huge_clear_flush_notify pudp_huge_clear_flush #define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear #define set_pte_at_notify set_pte_at diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f4aac87adcc3..8e02b3750fe0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -236,8 +236,6 @@ struct lruvec { #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) -/* Isolate clean file */ -#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) /* Isolate unmapped file */ #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) /* Isolate for asynchronous migration */ @@ -779,7 +777,7 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) #endif } -extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru); +extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); #ifdef CONFIG_HAVE_MEMORY_PRESENT void memory_present(int nid, unsigned long start, unsigned long end); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 8a57f0b1242d..8850fcaf50db 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -501,6 +501,7 @@ struct platform_device_id { kernel_ulong_t driver_data; }; +#define MDIO_NAME_SIZE 32 #define MDIO_MODULE_PREFIX "mdio:" #define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" diff --git a/include/linux/module.h b/include/linux/module.h index 5cddadff2c25..0297c5cd7cdf 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -126,13 +126,13 @@ extern void cleanup_module(void); /* Each module must use one module_init(). */ #define module_init(initfn) \ - static inline initcall_t __inittest(void) \ + static inline initcall_t __maybe_unused __inittest(void) \ { return initfn; } \ int init_module(void) __attribute__((alias(#initfn))); /* This is only required if you want to be unloadable. */ #define module_exit(exitfn) \ - static inline exitcall_t __exittest(void) \ + static inline exitcall_t __maybe_unused __exittest(void) \ { return exitfn; } \ void cleanup_module(void) __attribute__((alias(#exitfn))); @@ -281,8 +281,6 @@ enum module_state { MODULE_STATE_UNFORMED, /* Still setting it up. */ }; -struct module; - struct mod_tree_node { struct module *mod; struct latch_tree_node node; @@ -763,7 +761,7 @@ extern int module_sysfs_initialized; #define __MODULE_STRING(x) __stringify(x) -#ifdef CONFIG_DEBUG_SET_MODULE_RONX +#ifdef CONFIG_STRICT_MODULE_RWX extern void set_all_modules_text_rw(void); extern void set_all_modules_text_ro(void); extern void module_enable_ro(const struct module *mod, bool after_init); diff --git a/include/linux/mount.h b/include/linux/mount.h index c6f55158d5e5..8e0352af06b7 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -90,6 +90,9 @@ struct file_system_type; extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data); +extern struct vfsmount *vfs_submount(const struct dentry *mountpoint, + struct file_system_type *type, + const char *name, void *data); extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list); extern void mark_mounts_for_expiry(struct list_head *mounts); diff --git a/include/linux/mroute.h b/include/linux/mroute.h index e5fb81376e92..d7f63339ef0b 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -3,6 +3,7 @@ #include <linux/in.h> #include <linux/pim.h> +#include <linux/rhashtable.h> #include <net/sock.h> #include <uapi/linux/mroute.h> @@ -60,7 +61,6 @@ struct vif_device { #define VIFF_STATIC 0x8000 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) -#define MFC_LINES 64 struct mr_table { struct list_head list; @@ -69,8 +69,9 @@ struct mr_table { struct sock __rcu *mroute_sk; struct timer_list ipmr_expire_timer; struct list_head mfc_unres_queue; - struct list_head mfc_cache_array[MFC_LINES]; struct vif_device vif_table[MAXVIFS]; + struct rhltable mfc_hash; + struct list_head mfc_cache_list; int maxvif; atomic_t cache_resolve_queue_len; bool mroute_do_assert; @@ -85,17 +86,48 @@ enum { MFC_STATIC = BIT(0), }; +struct mfc_cache_cmp_arg { + __be32 mfc_mcastgrp; + __be32 mfc_origin; +}; + +/** + * struct mfc_cache - multicast routing entries + * @mnode: rhashtable list + * @mfc_mcastgrp: destination multicast group address + * @mfc_origin: source address + * @cmparg: used for rhashtable comparisons + * @mfc_parent: source interface (iif) + * @mfc_flags: entry flags + * @expires: unresolved entry expire time + * @unresolved: unresolved cached skbs + * @last_assert: time of last assert + * @minvif: minimum VIF id + * @maxvif: maximum VIF id + * @bytes: bytes that have passed for this entry + * @pkt: packets that have passed for this entry + * @wrong_if: number of wrong source interface hits + * @lastuse: time of last use of the group (traffic or update) + * @ttls: OIF TTL threshold array + * @list: global entry list + * @rcu: used for entry destruction + */ struct mfc_cache { - struct list_head list; - __be32 mfc_mcastgrp; /* Group the entry belongs to */ - __be32 mfc_origin; /* Source of packet */ - vifi_t mfc_parent; /* Source interface */ - int mfc_flags; /* Flags on line */ + struct rhlist_head mnode; + union { + struct { + __be32 mfc_mcastgrp; + __be32 mfc_origin; + }; + struct mfc_cache_cmp_arg cmparg; + }; + vifi_t mfc_parent; + int mfc_flags; union { struct { unsigned long expires; - struct sk_buff_head unresolved; /* Unresolved buffers */ + struct sk_buff_head unresolved; } unres; struct { unsigned long last_assert; @@ -105,20 +137,15 @@ struct mfc_cache { unsigned long pkt; unsigned long wrong_if; unsigned long lastuse; - unsigned char ttls[MAXVIFS]; /* TTL thresholds */ + unsigned char ttls[MAXVIFS]; } res; } mfc_un; + struct list_head list; struct rcu_head rcu; }; -#ifdef __BIG_ENDIAN -#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1)) -#else -#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1)) -#endif - struct rtmsg; int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, - struct rtmsg *rtm, int nowait, u32 portid); + struct rtmsg *rtm, u32 portid); #endif diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 19a1c0c2993b..ce44e3e96d27 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -116,7 +116,7 @@ struct mfc6_cache { struct rtmsg; extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, - struct rtmsg *rtm, int nowait, u32 portid); + struct rtmsg *rtm, u32 portid); #ifdef CONFIG_IPV6_MROUTE extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); diff --git a/include/linux/msi.h b/include/linux/msi.h index a83b84ff70e5..df6d59201d31 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -325,12 +325,6 @@ void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); -int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, - int nvec, int type); -void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); -struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode, - struct msi_domain_info *info, struct irq_domain *parent); - irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, struct msi_desc *desc); int pci_msi_domain_check_cap(struct irq_domain *domain, diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h index 7b3d487d8b3f..b532ce524dae 100644 --- a/include/linux/mtd/qinfo.h +++ b/include/linux/mtd/qinfo.h @@ -14,7 +14,7 @@ * @DevId - Chip Device ID * @qinfo - pointer to qinfo records describing the chip * @numchips - number of chips including virual RWW partitions - * @chipshift - Chip/partiton size 2^chipshift + * @chipshift - Chip/partition size 2^chipshift * @chips - per-chip data structure */ struct lpddr_private { diff --git a/include/linux/net.h b/include/linux/net.h index cd0c8bd0a1de..0620f5e18c96 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -146,7 +146,7 @@ struct proto_ops { int (*socketpair)(struct socket *sock1, struct socket *sock2); int (*accept) (struct socket *sock, - struct socket *newsock, int flags); + struct socket *newsock, int flags, bool kern); int (*getname) (struct socket *sock, struct sockaddr *addr, int *sockaddr_len, int peer); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 9c6c8ef2e9e7..9a0419594e84 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -71,7 +71,6 @@ enum { NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */ NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ - NETIF_F_BUSY_POLL_BIT, /* Busy poll */ NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ @@ -134,7 +133,6 @@ enum { #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) -#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) #define NETIF_F_HW_TC __NETIF_F(HW_TC) #define for_each_netdev_feature(mask_addr, bit) \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 27914672602d..97456b2539e4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -330,6 +330,7 @@ struct napi_struct { enum { NAPI_STATE_SCHED, /* Poll is scheduled */ + NAPI_STATE_MISSED, /* reschedule a napi */ NAPI_STATE_DISABLE, /* Disable pending */ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ @@ -338,12 +339,13 @@ enum { }; enum { - NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED), - NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE), - NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC), - NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED), - NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL), - NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL), + NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), + NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), + NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), + NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), + NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), + NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), + NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), }; enum gro_result { @@ -352,6 +354,7 @@ enum gro_result { GRO_HELD, GRO_NORMAL, GRO_DROP, + GRO_CONSUMED, }; typedef enum gro_result gro_result_t; @@ -413,20 +416,7 @@ static inline bool napi_disable_pending(struct napi_struct *n) return test_bit(NAPI_STATE_DISABLE, &n->state); } -/** - * napi_schedule_prep - check if NAPI can be scheduled - * @n: NAPI context - * - * Test if NAPI routine is already running, and if not mark - * it as running. This is used as a condition variable to - * insure only one NAPI poll instance runs. We also make - * sure there is no pending NAPI disable. - */ -static inline bool napi_schedule_prep(struct napi_struct *n) -{ - return !napi_disable_pending(n) && - !test_and_set_bit(NAPI_STATE_SCHED, &n->state); -} +bool napi_schedule_prep(struct napi_struct *n); /** * napi_schedule - schedule NAPI poll @@ -463,7 +453,6 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } -bool __napi_complete(struct napi_struct *n); bool napi_complete_done(struct napi_struct *n, int work_done); /** * napi_complete - NAPI processing complete @@ -917,8 +906,8 @@ struct netdev_xdp { * Callback used when the transmitter has not made any progress * for dev->watchdog ticks. * - * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, - * struct rtnl_link_stats64 *storage); + * void (*ndo_get_stats64)(struct net_device *dev, + * struct rtnl_link_stats64 *storage); * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); * Called when a user wants to get the network device usage * statistics. Drivers must do one of the following: @@ -968,11 +957,12 @@ struct netdev_xdp { * with PF and querying it may introduce a theoretical security risk. * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); - * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) - * Called to setup 'tc' number of traffic classes in the net device. This - * is always called from the stack with the rtnl lock held and netif tx - * queues stopped. This allows the netdevice to perform queue management - * safely. + * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, + * __be16 protocol, struct tc_to_netdev *tc); + * Called to setup any 'tc' scheduler, classifier or action on @dev. + * This is always called from the stack with the rtnl lock held and netif + * tx queues stopped. This allows the netdevice to perform queue + * management safely. * * Fiber Channel over Ethernet (FCoE) offload functions. * int (*ndo_fcoe_enable)(struct net_device *dev); @@ -1166,8 +1156,8 @@ struct net_device_ops { struct neigh_parms *); void (*ndo_tx_timeout) (struct net_device *dev); - struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, - struct rtnl_link_stats64 *storage); + void (*ndo_get_stats64)(struct net_device *dev, + struct rtnl_link_stats64 *storage); bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, @@ -1184,9 +1174,6 @@ struct net_device_ops { struct netpoll_info *info); void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif -#ifdef CONFIG_NET_RX_BUSY_POLL - int (*ndo_busy_poll)(struct napi_struct *dev); -#endif int (*ndo_set_vf_mac)(struct net_device *dev, int queue, u8 *mac); int (*ndo_set_vf_vlan)(struct net_device *dev, @@ -1553,7 +1540,6 @@ enum netdev_priv_flags { * @ax25_ptr: AX.25 specific data * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering * - * @last_rx: Time of last Rx * @dev_addr: Hw address (before bcast, * because most packets are unicast) * @@ -1780,8 +1766,6 @@ struct net_device { /* * Cache lines mostly used on receive path (including eth_type_trans()) */ - unsigned long last_rx; - /* Interface address info used in eth_type_trans() */ unsigned char *dev_addr; @@ -1871,8 +1855,12 @@ struct net_device { struct pcpu_vstats __percpu *vstats; }; +#if IS_ENABLED(CONFIG_GARP) struct garp_port __rcu *garp_port; +#endif +#if IS_ENABLED(CONFIG_MRP) struct mrp_port __rcu *mrp_port; +#endif struct device dev; const struct attribute_group *sysfs_groups[4]; @@ -2669,6 +2657,19 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, remcsum_unadjust((__sum16 *)ptr, grc->delta); } +#ifdef CONFIG_XFRM_OFFLOAD +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) +{ + if (PTR_ERR(pp) != -EINPROGRESS) + NAPI_GRO_CB(skb)->flush |= flush; +} +#else +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) +{ + NAPI_GRO_CB(skb)->flush |= flush; +} +#endif + static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, @@ -3111,7 +3112,19 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev, return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); } -void netif_wake_subqueue(struct net_device *dev, u16 queue_index); +/** + * netif_wake_subqueue - allow sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Resume individual transmit queue of a device with multiple transmit queues. + */ +static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + netif_tx_wake_queue(txq); +} #ifdef CONFIG_XPS int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, @@ -3805,6 +3818,10 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; extern int weight_p; +extern int dev_weight_rx_bias; +extern int dev_weight_tx_bias; +extern int dev_rx_weight; +extern int dev_tx_weight; bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, @@ -3882,10 +3899,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); void netdev_lower_state_changed(struct net_device *lower_dev, void *lower_state_info); -int netdev_default_l2upper_neigh_construct(struct net_device *dev, - struct neighbour *n); -void netdev_default_l2upper_neigh_destroy(struct net_device *dev, - struct neighbour *n); /* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 @@ -4338,6 +4351,15 @@ do { \ }) #endif +/* if @cond then downgrade to debug, else print at @level */ +#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \ + do { \ + if (cond) \ + netif_dbg(priv, type, netdev, fmt, ##args); \ + else \ + netif_ ## level(priv, type, netdev, fmt, ##args); \ + } while (0) + #if defined(VERBOSE_DEBUG) #define netif_vdbg netif_dbg #else diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 1d82dd5e9a08..1b49209dd5c7 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -28,6 +28,7 @@ struct nfnetlink_subsystem { const struct nfnl_callback *cb; /* callback for individual types */ int (*commit)(struct net *net, struct sk_buff *skb); int (*abort)(struct net *net, struct sk_buff *skb); + bool (*valid_genid)(struct net *net, u32 genid); }; int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 5117e4d2ddfa..be378cf47fcc 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -167,6 +167,7 @@ struct xt_match { const char *table; unsigned int matchsize; + unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif @@ -207,6 +208,7 @@ struct xt_target { const char *table; unsigned int targetsize; + unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif @@ -287,6 +289,13 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); +int xt_match_to_user(const struct xt_entry_match *m, + struct xt_entry_match __user *u); +int xt_target_to_user(const struct xt_entry_target *t, + struct xt_entry_target __user *u); +int xt_data_to_user(void __user *dst, const void *src, + int usersize, int size); + void *xt_copy_counters_from_user(const void __user *user, unsigned int len, struct xt_counters_info *info, bool compat); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index f1da8c8dd473..287f34161086 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -335,7 +335,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); -extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int nfs_getattr(const struct path *, struct kstat *, u32, unsigned int); extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); extern int nfs_permission(struct inode *, int); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 0a3fadc32693..aa3cd0878270 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -7,6 +7,43 @@ #include <linux/sched.h> #include <asm/irq.h> +#ifdef CONFIG_LOCKUP_DETECTOR +extern void touch_softlockup_watchdog_sched(void); +extern void touch_softlockup_watchdog(void); +extern void touch_softlockup_watchdog_sync(void); +extern void touch_all_softlockup_watchdogs(void); +extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); +extern unsigned int softlockup_panic; +extern unsigned int hardlockup_panic; +void lockup_detector_init(void); +#else +static inline void touch_softlockup_watchdog_sched(void) +{ +} +static inline void touch_softlockup_watchdog(void) +{ +} +static inline void touch_softlockup_watchdog_sync(void) +{ +} +static inline void touch_all_softlockup_watchdogs(void) +{ +} +static inline void lockup_detector_init(void) +{ +} +#endif + +#ifdef CONFIG_DETECT_HUNG_TASK +void reset_hung_task_detector(void); +#else +static inline void reset_hung_task_detector(void) +{ +} +#endif + /* * The run state of the lockup detectors is controlled by the content of the * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h index bf240a3cbf99..a72fd04aa5e1 100644 --- a/include/linux/nvme-rdma.h +++ b/include/linux/nvme-rdma.h @@ -29,6 +29,30 @@ enum nvme_rdma_cm_status { NVME_RDMA_CM_INVALID_ORD = 0x08, }; +static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status) +{ + switch (status) { + case NVME_RDMA_CM_INVALID_LEN: + return "invalid length"; + case NVME_RDMA_CM_INVALID_RECFMT: + return "invalid record format"; + case NVME_RDMA_CM_INVALID_QID: + return "invalid queue ID"; + case NVME_RDMA_CM_INVALID_HSQSIZE: + return "invalid host SQ size"; + case NVME_RDMA_CM_INVALID_HRQSIZE: + return "invalid host RQ size"; + case NVME_RDMA_CM_NO_RSC: + return "resource not found"; + case NVME_RDMA_CM_INVALID_IRD: + return "invalid IRD"; + case NVME_RDMA_CM_INVALID_ORD: + return "Invalid ORD"; + default: + return "unrecognized reason"; + } +} + /** * struct nvme_rdma_cm_req - rdma connect request * diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 0b676a02cf3e..c43d435d4225 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -579,6 +579,12 @@ struct nvme_write_zeroes_cmd { __le16 appmask; }; +/* Features */ + +struct nvme_feat_auto_pst { + __le64 entries[32]; +}; + /* Admin commands */ enum nvme_admin_opcode { @@ -644,7 +650,9 @@ struct nvme_identify { __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; - __le32 cns; + __u8 cns; + __u8 rsvd3; + __le16 ctrlid; __u32 rsvd11[5]; }; diff --git a/include/linux/of.h b/include/linux/of.h index d72f01009297..21e6323de0f3 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -280,6 +280,7 @@ extern struct device_node *of_get_child_by_name(const struct device_node *node, /* cache lookup */ extern struct device_node *of_find_next_cache_node(const struct device_node *); +extern int of_find_last_cache_level(unsigned int cpu); extern struct device_node *of_find_node_with_property( struct device_node *from, const char *prop_name); diff --git a/include/linux/of_device.h b/include/linux/of_device.h index cc7dd687a89d..c12dace043f3 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -13,7 +13,6 @@ struct device; #ifdef CONFIG_OF extern const struct of_device_id *of_match_device( const struct of_device_id *matches, const struct device *dev); -extern void of_device_make_bus_id(struct device *dev); /** * of_driver_match_device - Tell if a driver's of_match_table matches a device. @@ -37,6 +36,7 @@ extern const void *of_device_get_match_data(const struct device *dev); extern ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len); +extern int of_device_request_module(struct device *dev); extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env); extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env); @@ -78,6 +78,11 @@ static inline int of_device_get_modalias(struct device *dev, return -ENODEV; } +static inline int of_device_request_module(struct device *dev) +{ + return -ENODEV; +} + static inline int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) { diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index bb3a5a2cd570..abdb02eaef06 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h @@ -51,6 +51,8 @@ struct device_node *of_graph_get_endpoint_by_regs( struct device_node *of_graph_get_remote_port_parent( const struct device_node *node); struct device_node *of_graph_get_remote_port(const struct device_node *node); +struct device_node *of_graph_get_remote_node(const struct device_node *node, + u32 port, u32 endpoint); #else static inline int of_graph_parse_endpoint(const struct device_node *node, @@ -89,6 +91,12 @@ static inline struct device_node *of_graph_get_remote_port( { return NULL; } +static inline struct device_node *of_graph_get_remote_node( + const struct device_node *node, + u32 port, u32 endpoint) +{ + return NULL; +} #endif /* CONFIG_OF */ diff --git a/include/linux/oom.h b/include/linux/oom.h index b4e36e92bc87..8a266e2be5a6 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -2,7 +2,7 @@ #define __INCLUDE_LINUX_OOM_H -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/nodemask.h> #include <uapi/linux/oom.h> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 324c8dbad1e1..84943e8057ef 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -266,7 +266,6 @@ static inline struct page *find_get_page_flags(struct address_space *mapping, /** * find_lock_page - locate, pin and lock a pagecache page - * pagecache_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * @@ -482,19 +481,11 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, } /* - * This is exported only for wait_on_page_locked/wait_on_page_writeback, - * and for filesystems which need to wait on PG_private. + * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., + * and should not be used directly. */ extern void wait_on_page_bit(struct page *page, int bit_nr); extern int wait_on_page_bit_killable(struct page *page, int bit_nr); -extern void wake_up_page_bit(struct page *page, int bit_nr); - -static inline void wake_up_page(struct page *page, int bit) -{ - if (!PageWaiters(page)) - return; - wake_up_page_bit(page, bit); -} /* * Wait for a page to be unlocked. diff --git a/include/linux/parman.h b/include/linux/parman.h new file mode 100644 index 000000000000..3c8cccc7d4da --- /dev/null +++ b/include/linux/parman.h @@ -0,0 +1,76 @@ +/* + * include/linux/parman.h - Manager for linear priority array areas + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _PARMAN_H +#define _PARMAN_H + +#include <linux/list.h> + +enum parman_algo_type { + PARMAN_ALGO_TYPE_LSORT, +}; + +struct parman_item { + struct list_head list; + unsigned long index; +}; + +struct parman_prio { + struct list_head list; + struct list_head item_list; + unsigned long priority; +}; + +struct parman_ops { + unsigned long base_count; + unsigned long resize_step; + int (*resize)(void *priv, unsigned long new_count); + void (*move)(void *priv, unsigned long from_index, + unsigned long to_index, unsigned long count); + enum parman_algo_type algo; +}; + +struct parman; + +struct parman *parman_create(const struct parman_ops *ops, void *priv); +void parman_destroy(struct parman *parman); +void parman_prio_init(struct parman *parman, struct parman_prio *prio, + unsigned long priority); +void parman_prio_fini(struct parman_prio *prio); +int parman_item_add(struct parman *parman, struct parman_prio *prio, + struct parman_item *item); +void parman_item_remove(struct parman *parman, struct parman_prio *prio, + struct parman_item *item); + +#endif diff --git a/include/linux/pci.h b/include/linux/pci.h index e2d1a124216a..eb3da1a04e6c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -678,9 +678,6 @@ struct pci_error_handlers { /* MMIO has been re-enabled, but not DMA */ pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); - /* PCI Express link has been reset */ - pci_ers_result_t (*link_reset)(struct pci_dev *dev); - /* PCI slot has been reset */ pci_ers_result_t (*slot_reset)(struct pci_dev *dev); @@ -885,7 +882,6 @@ void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type); void pci_sort_breadthfirst(void); #define dev_is_pci(d) ((d)->bus == &pci_bus_type) #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false)) -#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0)) /* Generic PCI functions exported to card drivers */ @@ -1309,14 +1305,7 @@ void pci_msix_shutdown(struct pci_dev *dev); void pci_disable_msix(struct pci_dev *dev); void pci_restore_msi_state(struct pci_dev *dev); int pci_msi_enabled(void); -int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); -static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec) -{ - int rc = pci_enable_msi_range(dev, nvec, nvec); - if (rc < 0) - return rc; - return 0; -} +int pci_enable_msi(struct pci_dev *dev); int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec); static inline int pci_enable_msix_exact(struct pci_dev *dev, @@ -1334,6 +1323,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); +int pci_irq_get_node(struct pci_dev *pdev, int vec); #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } @@ -1347,10 +1337,7 @@ static inline void pci_msix_shutdown(struct pci_dev *dev) { } static inline void pci_disable_msix(struct pci_dev *dev) { } static inline void pci_restore_msi_state(struct pci_dev *dev) { } static inline int pci_msi_enabled(void) { return 0; } -static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, - int maxvec) -{ return -ENOSYS; } -static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec) +static inline int pci_enable_msi(struct pci_dev *dev) { return -ENOSYS; } static inline int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec) @@ -1384,6 +1371,11 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, { return cpu_possible_mask; } + +static inline int pci_irq_get_node(struct pci_dev *pdev, int vec) +{ + return first_online_node; +} #endif static inline int @@ -1426,8 +1418,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } static inline void pcie_ecrc_get_policy(char *str) { } #endif -#define pci_enable_msi(pdev) pci_enable_msi_exact(pdev, 1) - #ifdef CONFIG_HT_IRQ /* The functions a driver should call */ int ht_create_irq(struct pci_dev *dev, int idx); @@ -1630,7 +1620,6 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } #define dev_is_pci(d) (false) #define dev_is_pf(d) (false) -#define dev_num_vf(d) (0) #endif /* CONFIG_PCI */ /* Include architecture-dependent settings and functions */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 73dda0edcb97..a4f77feecbb0 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2516,6 +2516,8 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff +#define PCI_VENDOR_ID_HUAWEI 0x19e5 + #define PCI_VENDOR_ID_NETRONOME 0x19ee #define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200 #define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240 diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index a5f98d53d732..9b7dd59fe28d 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h @@ -1,6 +1,8 @@ #ifndef _LINUX_PERF_REGS_H #define _LINUX_PERF_REGS_H +#include <linux/sched/task_stack.h> + struct perf_regs { __u64 abi; struct pt_regs *regs; diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index a3d90b9da18d..a49b3259cad7 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -15,6 +15,12 @@ #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) #define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) +#define PFN_FLAGS_TRACE \ + { PFN_SG_CHAIN, "SG_CHAIN" }, \ + { PFN_SG_LAST, "SG_LAST" }, \ + { PFN_DEV, "DEV" }, \ + { PFN_MAP, "MAP" } + static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags) { pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), }; @@ -84,6 +90,13 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) { return pfn_pmd(pfn_t_to_pfn(pfn), pgprot); } + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) +{ + return pfn_pud(pfn_t_to_pfn(pfn), pgprot); +} +#endif #endif #ifdef __HAVE_ARCH_PTE_DEVMAP @@ -100,5 +113,10 @@ static inline bool pfn_t_devmap(pfn_t pfn) } pte_t pte_mkdevmap(pte_t pte); pmd_t pmd_mkdevmap(pmd_t pmd); +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) +pud_t pud_mkdevmap(pud_t pud); #endif +#endif /* __HAVE_ARCH_PTE_DEVMAP */ + #endif /* _LINUX_PFN_T_H_ */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 7fc1105605bf..43a774873aa9 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -81,6 +81,9 @@ typedef enum { PHY_INTERFACE_MODE_MOCA, PHY_INTERFACE_MODE_QSGMII, PHY_INTERFACE_MODE_TRGMII, + PHY_INTERFACE_MODE_1000BASEX, + PHY_INTERFACE_MODE_2500BASEX, + PHY_INTERFACE_MODE_RXAUI, PHY_INTERFACE_MODE_MAX, } phy_interface_t; @@ -141,6 +144,12 @@ static inline const char *phy_modes(phy_interface_t interface) return "qsgmii"; case PHY_INTERFACE_MODE_TRGMII: return "trgmii"; + case PHY_INTERFACE_MODE_1000BASEX: + return "1000base-x"; + case PHY_INTERFACE_MODE_2500BASEX: + return "2500base-x"; + case PHY_INTERFACE_MODE_RXAUI: + return "rxaui"; default: return "unknown"; } @@ -157,11 +166,7 @@ static inline const char *phy_modes(phy_interface_t interface) /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ #define PHY_ID_FMT "%s:%02x" -/* - * Need to be a little smaller than phydev->dev.bus_id to leave room - * for the ":%02x" - */ -#define MII_BUS_ID_SIZE (20 - 3) +#define MII_BUS_ID_SIZE 61 /* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ @@ -631,7 +636,7 @@ struct phy_driver { /* A Structure for boards to register fixups with the PHY Lib */ struct phy_fixup { struct list_head list; - char bus_id[20]; + char bus_id[MII_BUS_ID_SIZE + 3]; u32 phy_uid; u32 phy_uid_mask; int (*run)(struct phy_device *phydev); @@ -802,6 +807,9 @@ int phy_stop_interrupts(struct phy_device *phydev); static inline int phy_read_status(struct phy_device *phydev) { + if (!phydev->drv) + return -EIO; + return phydev->drv->read_status(phydev); } @@ -829,6 +837,10 @@ int genphy_read_status(struct phy_device *phydev); int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); int genphy_soft_reset(struct phy_device *phydev); +static inline int genphy_no_soft_reset(struct phy_device *phydev) +{ + return 0; +} void phy_driver_unregister(struct phy_driver *drv); void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_driver_register(struct phy_driver *new_driver, struct module *owner); @@ -881,6 +893,25 @@ void mdio_bus_exit(void); extern struct bus_type mdio_bus_type; +struct mdio_board_info { + const char *bus_id; + char modalias[MDIO_NAME_SIZE]; + int mdio_addr; + const void *platform_data; +}; + +#if IS_ENABLED(CONFIG_PHYLIB) +int mdiobus_register_board_info(const struct mdio_board_info *info, + unsigned int n); +#else +static inline int mdiobus_register_board_info(const struct mdio_board_info *i, + unsigned int n) +{ + return 0; +} +#endif + + /** * module_phy_driver() - Helper macro for registering PHY drivers * @__phy_drivers: array of PHY drivers to register diff --git a/include/linux/pid.h b/include/linux/pid.h index 23705a53abba..4d179316e431 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -1,7 +1,7 @@ #ifndef _LINUX_PID_H #define _LINUX_PID_H -#include <linux/rcupdate.h> +#include <linux/rculist.h> enum pid_type { @@ -191,10 +191,10 @@ pid_t pid_vnr(struct pid *pid); #define do_each_pid_thread(pid, type, task) \ do_each_pid_task(pid, type, task) { \ struct task_struct *tg___ = task; \ - do { + for_each_thread(tg___, task) { #define while_each_pid_thread(pid, type, task) \ - } while_each_thread(tg___, task); \ + } \ task = tg___; \ } while_each_pid_task(pid, type, task) #endif /* _LINUX_PID_H */ diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 34cce96741bc..c2a989dee876 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -21,6 +21,12 @@ struct pidmap { struct fs_pin; +enum { /* definitions for pid_namespace's hide_pid field */ + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, +}; + struct pid_namespace { struct kref kref; struct pidmap pidmap[PIDMAP_ENTRIES]; diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index 15bf56ee8af7..90641a5daaf0 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h @@ -18,7 +18,7 @@ extern void s3c64xx_ac97_setup_gpio(int); -struct samsung_i2s { +struct samsung_i2s_type { /* If the Primary DAI has 5.1 Channels */ #define QUIRK_PRI_6CHAN (1 << 0) /* If the I2S block has a Stereo Overlay Channel */ @@ -47,7 +47,5 @@ struct s3c_audio_pdata { void *dma_capture; void *dma_play_sec; void *dma_capture_mic; - union { - struct samsung_i2s i2s; - } type; + struct samsung_i2s_type type; }; diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h index 6ace3fd32b6a..90ae19ca828f 100644 --- a/include/linux/platform_data/gpio-davinci.h +++ b/include/linux/platform_data/gpio-davinci.h @@ -21,23 +21,28 @@ #include <asm-generic/gpio.h> +#define MAX_REGS_BANKS 5 + struct davinci_gpio_platform_data { u32 ngpio; u32 gpio_unbanked; }; +struct davinci_gpio_irq_data { + void __iomem *regs; + struct davinci_gpio_controller *chip; + int bank_num; +}; struct davinci_gpio_controller { struct gpio_chip chip; struct irq_domain *irq_domain; /* Serialize access to GPIO registers */ spinlock_t lock; - void __iomem *regs; - void __iomem *set_data; - void __iomem *clr_data; - void __iomem *in_data; + void __iomem *regs[MAX_REGS_BANKS]; int gpio_unbanked; - unsigned gpio_irq; + unsigned int base_irq; + unsigned int base; }; /* diff --git a/include/linux/platform_data/rtc-m48t86.h b/include/linux/platform_data/rtc-m48t86.h deleted file mode 100644 index 915d6b4f0f89..000000000000 --- a/include/linux/platform_data/rtc-m48t86.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * ST M48T86 / Dallas DS12887 RTC driver - * Copyright (c) 2006 Tower Technologies - * - * Author: Alessandro Zummo <a.zummo@towertech.it> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -struct m48t86_ops -{ - void (*writebyte)(unsigned char value, unsigned long addr); - unsigned char (*readbyte)(unsigned long addr); -}; diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h new file mode 100644 index 000000000000..ac72e115093c --- /dev/null +++ b/include/linux/platform_data/ti-aemif.h @@ -0,0 +1,23 @@ +/* + * TI DaVinci AEMIF platform glue. + * + * Copyright (C) 2017 BayLibre SAS + * + * Author: + * Bartosz Golaszewski <bgolaszewski@baylibre.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __TI_DAVINCI_AEMIF_DATA_H__ +#define __TI_DAVINCI_AEMIF_DATA_H__ + +#include <linux/of_platform.h> + +struct aemif_platform_data { + struct of_dev_auxdata *dev_lookup; +}; + +#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */ diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h index 18e908324549..a5c0a71ec914 100644 --- a/include/linux/platform_data/video-imxfb.h +++ b/include/linux/platform_data/video-imxfb.h @@ -47,10 +47,6 @@ #define LSCR1_GRAY2(x) (((x) & 0xf) << 4) #define LSCR1_GRAY1(x) (((x) & 0xf)) -#define DMACR_BURST (1 << 31) -#define DMACR_HM(x) (((x) & 0xf) << 16) -#define DMACR_TM(x) ((x) & 0xf) - struct imx_fb_videomode { struct fb_videomode mode; u32 pcr; diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h new file mode 100644 index 000000000000..3ab892208343 --- /dev/null +++ b/include/linux/platform_data/x86/clk-pmc-atom.h @@ -0,0 +1,44 @@ +/* + * Intel Atom platform clocks for BayTrail and CherryTrail SoC. + * + * Copyright (C) 2016, Intel Corporation + * Author: Irina Tirdea <irina.tirdea@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __PLATFORM_DATA_X86_CLK_PMC_ATOM_H +#define __PLATFORM_DATA_X86_CLK_PMC_ATOM_H + +/** + * struct pmc_clk - PMC platform clock configuration + * + * @name: identified, typically pmc_plt_clk_<x>, x=[0..5] + * @freq: in Hz, 19.2MHz and 25MHz (Baytrail only) supported + * @parent_name: one of 'xtal' or 'osc' + */ +struct pmc_clk { + const char *name; + unsigned long freq; + const char *parent_name; +}; + +/** + * struct pmc_clk_data - common PMC clock configuration + * + * @base: PMC clock register base offset + * @clks: pointer to set of registered clocks, typically 0..5 + */ +struct pmc_clk_data { + void __iomem *base; + const struct pmc_clk *clks; +}; + +#endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */ diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h new file mode 100644 index 000000000000..e4905fe69c38 --- /dev/null +++ b/include/linux/platform_data/x86/pmc_atom.h @@ -0,0 +1,158 @@ +/* + * Intel Atom SOC Power Management Controller Header File + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef PMC_ATOM_H +#define PMC_ATOM_H + +/* ValleyView Power Control Unit PCI Device ID */ +#define PCI_DEVICE_ID_VLV_PMC 0x0F1C +/* CherryTrail Power Control Unit PCI Device ID */ +#define PCI_DEVICE_ID_CHT_PMC 0x229C + +/* PMC Memory mapped IO registers */ +#define PMC_BASE_ADDR_OFFSET 0x44 +#define PMC_BASE_ADDR_MASK 0xFFFFFE00 +#define PMC_MMIO_REG_LEN 0x100 +#define PMC_REG_BIT_WIDTH 32 + +/* BIOS uses FUNC_DIS to disable specific function */ +#define PMC_FUNC_DIS 0x34 +#define PMC_FUNC_DIS_2 0x38 + +/* CHT specific bits in FUNC_DIS2 register */ +#define BIT_FD_GMM BIT(3) +#define BIT_FD_ISH BIT(4) + +/* S0ix wake event control */ +#define PMC_S0IX_WAKE_EN 0x3C + +#define BIT_LPC_CLOCK_RUN BIT(4) +#define BIT_SHARED_IRQ_GPSC BIT(5) +#define BIT_ORED_DEDICATED_IRQ_GPSS BIT(18) +#define BIT_ORED_DEDICATED_IRQ_GPSC BIT(19) +#define BIT_SHARED_IRQ_GPSS BIT(20) + +#define PMC_WAKE_EN_SETTING ~(BIT_LPC_CLOCK_RUN | \ + BIT_SHARED_IRQ_GPSC | \ + BIT_ORED_DEDICATED_IRQ_GPSS | \ + BIT_ORED_DEDICATED_IRQ_GPSC | \ + BIT_SHARED_IRQ_GPSS) + +/* The timers accumulate time spent in sleep state */ +#define PMC_S0IR_TMR 0x80 +#define PMC_S0I1_TMR 0x84 +#define PMC_S0I2_TMR 0x88 +#define PMC_S0I3_TMR 0x8C +#define PMC_S0_TMR 0x90 +/* Sleep state counter is in units of of 32us */ +#define PMC_TMR_SHIFT 5 + +/* Power status of power islands */ +#define PMC_PSS 0x98 + +#define PMC_PSS_BIT_GBE BIT(0) +#define PMC_PSS_BIT_SATA BIT(1) +#define PMC_PSS_BIT_HDA BIT(2) +#define PMC_PSS_BIT_SEC BIT(3) +#define PMC_PSS_BIT_PCIE BIT(4) +#define PMC_PSS_BIT_LPSS BIT(5) +#define PMC_PSS_BIT_LPE BIT(6) +#define PMC_PSS_BIT_DFX BIT(7) +#define PMC_PSS_BIT_USH_CTRL BIT(8) +#define PMC_PSS_BIT_USH_SUS BIT(9) +#define PMC_PSS_BIT_USH_VCCS BIT(10) +#define PMC_PSS_BIT_USH_VCCA BIT(11) +#define PMC_PSS_BIT_OTG_CTRL BIT(12) +#define PMC_PSS_BIT_OTG_VCCS BIT(13) +#define PMC_PSS_BIT_OTG_VCCA_CLK BIT(14) +#define PMC_PSS_BIT_OTG_VCCA BIT(15) +#define PMC_PSS_BIT_USB BIT(16) +#define PMC_PSS_BIT_USB_SUS BIT(17) + +/* CHT specific bits in PSS register */ +#define PMC_PSS_BIT_CHT_UFS BIT(7) +#define PMC_PSS_BIT_CHT_UXD BIT(11) +#define PMC_PSS_BIT_CHT_UXD_FD BIT(12) +#define PMC_PSS_BIT_CHT_UX_ENG BIT(15) +#define PMC_PSS_BIT_CHT_USB_SUS BIT(16) +#define PMC_PSS_BIT_CHT_GMM BIT(17) +#define PMC_PSS_BIT_CHT_ISH BIT(18) +#define PMC_PSS_BIT_CHT_DFX_MASTER BIT(26) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER1 BIT(27) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER2 BIT(28) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER3 BIT(29) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER4 BIT(30) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER5 BIT(31) + +/* These registers reflect D3 status of functions */ +#define PMC_D3_STS_0 0xA0 + +#define BIT_LPSS1_F0_DMA BIT(0) +#define BIT_LPSS1_F1_PWM1 BIT(1) +#define BIT_LPSS1_F2_PWM2 BIT(2) +#define BIT_LPSS1_F3_HSUART1 BIT(3) +#define BIT_LPSS1_F4_HSUART2 BIT(4) +#define BIT_LPSS1_F5_SPI BIT(5) +#define BIT_LPSS1_F6_XXX BIT(6) +#define BIT_LPSS1_F7_XXX BIT(7) +#define BIT_SCC_EMMC BIT(8) +#define BIT_SCC_SDIO BIT(9) +#define BIT_SCC_SDCARD BIT(10) +#define BIT_SCC_MIPI BIT(11) +#define BIT_HDA BIT(12) +#define BIT_LPE BIT(13) +#define BIT_OTG BIT(14) +#define BIT_USH BIT(15) +#define BIT_GBE BIT(16) +#define BIT_SATA BIT(17) +#define BIT_USB_EHCI BIT(18) +#define BIT_SEC BIT(19) +#define BIT_PCIE_PORT0 BIT(20) +#define BIT_PCIE_PORT1 BIT(21) +#define BIT_PCIE_PORT2 BIT(22) +#define BIT_PCIE_PORT3 BIT(23) +#define BIT_LPSS2_F0_DMA BIT(24) +#define BIT_LPSS2_F1_I2C1 BIT(25) +#define BIT_LPSS2_F2_I2C2 BIT(26) +#define BIT_LPSS2_F3_I2C3 BIT(27) +#define BIT_LPSS2_F4_I2C4 BIT(28) +#define BIT_LPSS2_F5_I2C5 BIT(29) +#define BIT_LPSS2_F6_I2C6 BIT(30) +#define BIT_LPSS2_F7_I2C7 BIT(31) + +#define PMC_D3_STS_1 0xA4 +#define BIT_SMB BIT(0) +#define BIT_OTG_SS_PHY BIT(1) +#define BIT_USH_SS_PHY BIT(2) +#define BIT_DFX BIT(3) + +/* CHT specific bits in PMC_D3_STS_1 register */ +#define BIT_STS_GMM BIT(1) +#define BIT_STS_ISH BIT(2) + +/* PMC I/O Registers */ +#define ACPI_BASE_ADDR_OFFSET 0x40 +#define ACPI_BASE_ADDR_MASK 0xFFFFFE00 +#define ACPI_MMIO_REG_LEN 0x100 + +#define PM1_CNT 0x4 +#define SLEEP_TYPE_MASK 0xFFFFECFF +#define SLEEP_TYPE_S5 0x1C00 +#define SLEEP_ENABLE 0x2000 + +extern int pmc_atom_read(int offset, u32 *value); +extern int pmc_atom_write(int offset, u32 value); + +#endif /* PMC_ATOM_H */ diff --git a/include/linux/pm.h b/include/linux/pm.h index f926af41e122..a0894bc52bb4 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -64,24 +64,7 @@ typedef struct pm_message { } pm_message_t; /** - * struct dev_pm_ops - device PM callbacks - * - * Several device power state transitions are externally visible, affecting - * the state of pending I/O queues and (for drivers that touch hardware) - * interrupts, wakeups, DMA, and other hardware state. There may also be - * internal transitions to various low-power modes which are transparent - * to the rest of the driver stack (such as a driver that's ON gating off - * clocks which are not in active use). - * - * The externally visible transitions are handled with the help of callbacks - * included in this structure in such a way that two levels of callbacks are - * involved. First, the PM core executes callbacks provided by PM domains, - * device types, classes and bus types. They are the subsystem-level callbacks - * supposed to execute callbacks provided by device drivers, although they may - * choose not to do that. If the driver callbacks are executed, they have to - * collaborate with the subsystem-level callbacks to achieve the goals - * appropriate for the given system transition, given transition phase and the - * subsystem the device belongs to. + * struct dev_pm_ops - device PM callbacks. * * @prepare: The principal role of this callback is to prevent new children of * the device from being registered after it has returned (the driver's @@ -240,34 +223,6 @@ typedef struct pm_message { * driver's interrupt handler, which is guaranteed not to run while * @restore_noirq() is being executed. Analogous to @resume_noirq(). * - * All of the above callbacks, except for @complete(), return error codes. - * However, the error codes returned by the resume operations, @resume(), - * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do - * not cause the PM core to abort the resume transition during which they are - * returned. The error codes returned in those cases are only printed by the PM - * core to the system logs for debugging purposes. Still, it is recommended - * that drivers only return error codes from their resume methods in case of an - * unrecoverable failure (i.e. when the device being handled refuses to resume - * and becomes unusable) to allow us to modify the PM core in the future, so - * that it can avoid attempting to handle devices that failed to resume and - * their children. - * - * It is allowed to unregister devices while the above callbacks are being - * executed. However, a callback routine must NOT try to unregister the device - * it was called for, although it may unregister children of that device (for - * example, if it detects that a child was unplugged while the system was - * asleep). - * - * Refer to Documentation/power/admin-guide/devices.rst for more information about the role - * of the above callbacks in the system suspend process. - * - * There also are callbacks related to runtime power management of devices. - * Again, these callbacks are executed by the PM core only for subsystems - * (PM domains, device types, classes and bus types) and the subsystem-level - * callbacks are supposed to invoke the driver callbacks. Moreover, the exact - * actions to be performed by a device driver's callbacks generally depend on - * the platform and subsystem the device belongs to. - * * @runtime_suspend: Prepare the device for a condition in which it won't be * able to communicate with the CPU(s) and RAM due to power management. * This need not mean that the device should be put into a low-power state. @@ -287,11 +242,51 @@ typedef struct pm_message { * Check these conditions, and return 0 if it's appropriate to let the PM * core queue a suspend request for the device. * - * Refer to Documentation/power/runtime_pm.txt for more information about the - * role of the above callbacks in device runtime power management. + * Several device power state transitions are externally visible, affecting + * the state of pending I/O queues and (for drivers that touch hardware) + * interrupts, wakeups, DMA, and other hardware state. There may also be + * internal transitions to various low-power modes which are transparent + * to the rest of the driver stack (such as a driver that's ON gating off + * clocks which are not in active use). * + * The externally visible transitions are handled with the help of callbacks + * included in this structure in such a way that, typically, two levels of + * callbacks are involved. First, the PM core executes callbacks provided by PM + * domains, device types, classes and bus types. They are the subsystem-level + * callbacks expected to execute callbacks provided by device drivers, although + * they may choose not to do that. If the driver callbacks are executed, they + * have to collaborate with the subsystem-level callbacks to achieve the goals + * appropriate for the given system transition, given transition phase and the + * subsystem the device belongs to. + * + * All of the above callbacks, except for @complete(), return error codes. + * However, the error codes returned by @resume(), @thaw(), @restore(), + * @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do not cause the PM + * core to abort the resume transition during which they are returned. The + * error codes returned in those cases are only printed to the system logs for + * debugging purposes. Still, it is recommended that drivers only return error + * codes from their resume methods in case of an unrecoverable failure (i.e. + * when the device being handled refuses to resume and becomes unusable) to + * allow the PM core to be modified in the future, so that it can avoid + * attempting to handle devices that failed to resume and their children. + * + * It is allowed to unregister devices while the above callbacks are being + * executed. However, a callback routine MUST NOT try to unregister the device + * it was called for, although it may unregister children of that device (for + * example, if it detects that a child was unplugged while the system was + * asleep). + * + * There also are callbacks related to runtime power management of devices. + * Again, as a rule these callbacks are executed by the PM core for subsystems + * (PM domains, device types, classes and bus types) and the subsystem-level + * callbacks are expected to invoke the driver callbacks. Moreover, the exact + * actions to be performed by a device driver's callbacks generally depend on + * the platform and subsystem the device belongs to. + * + * Refer to Documentation/power/runtime_pm.txt for more information about the + * role of the @runtime_suspend(), @runtime_resume() and @runtime_idle() + * callbacks in device runtime power management. */ - struct dev_pm_ops { int (*prepare)(struct device *dev); void (*complete)(struct device *dev); @@ -391,7 +386,7 @@ const struct dev_pm_ops name = { \ SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ } -/** +/* * PM_EVENT_ messages * * The following PM_EVENT_ messages are defined for the internal use of the PM @@ -487,7 +482,7 @@ const struct dev_pm_ops name = { \ #define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) -/** +/* * Device run-time power management status. * * These status labels are used internally by the PM core to indicate the @@ -517,7 +512,7 @@ enum rpm_status { RPM_SUSPENDING, }; -/** +/* * Device run-time power management request types. * * RPM_REQ_NONE Do nothing. @@ -616,15 +611,18 @@ extern void update_pm_runtime_accounting(struct device *dev); extern int dev_pm_get_subsys_data(struct device *dev); extern void dev_pm_put_subsys_data(struct device *dev); -/* - * Power domains provide callbacks that are executed during system suspend, - * hibernation, system resume and during runtime PM transitions along with - * subsystem-level and driver-level callbacks. +/** + * struct dev_pm_domain - power management domain representation. * + * @ops: Power management operations associated with this domain. * @detach: Called when removing a device from the domain. * @activate: Called before executing probe routines for bus types and drivers. * @sync: Called after successful driver probe. * @dismiss: Called after unsuccessful driver probe and after driver removal. + * + * Power domains provide callbacks that are executed during system suspend, + * hibernation, system resume and during runtime PM transitions instead of + * subsystem-level and driver-level callbacks. */ struct dev_pm_domain { struct dev_pm_ops ops; diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index d4d34791e463..032b55909145 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -146,8 +146,6 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier); int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier); -int dev_pm_qos_add_global_notifier(struct notifier_block *notifier); -int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, @@ -172,6 +170,12 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return dev->power.qos->flags_req->data.flr.flags; } + +static inline s32 dev_pm_qos_raw_read_value(struct device *dev) +{ + return IS_ERR_OR_NULL(dev->power.qos) ? + 0 : pm_qos_read_value(&dev->power.qos->resume_latency); +} #else static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) @@ -199,12 +203,6 @@ static inline int dev_pm_qos_add_notifier(struct device *dev, static inline int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier) { return 0; } -static inline int dev_pm_qos_add_global_notifier( - struct notifier_block *notifier) - { return 0; } -static inline int dev_pm_qos_remove_global_notifier( - struct notifier_block *notifier) - { return 0; } static inline void dev_pm_qos_constraints_init(struct device *dev) { dev->power.power_state = PMSG_ON; @@ -236,6 +234,7 @@ static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } +static inline s32 dev_pm_qos_raw_read_value(struct device *dev) { return 0; } #endif #endif diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 7eeceac52dea..cae461224948 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -55,6 +55,27 @@ /* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 +#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + +/* + * Disable preemption until the scheduler is running -- use an unconditional + * value so that it also works on !PREEMPT_COUNT kernels. + * + * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). + */ +#define INIT_PREEMPT_COUNT PREEMPT_OFFSET + +/* + * Initial preempt_count value; reflects the preempt_count schedule invariant + * which states that during context switches: + * + * preempt_count() == 2*PREEMPT_DISABLE_OFFSET + * + * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. + * Note: See finish_task_switch(). + */ +#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ #include <asm/preempt.h> diff --git a/include/linux/prime_numbers.h b/include/linux/prime_numbers.h new file mode 100644 index 000000000000..14ec4f567342 --- /dev/null +++ b/include/linux/prime_numbers.h @@ -0,0 +1,37 @@ +#ifndef __LINUX_PRIME_NUMBERS_H +#define __LINUX_PRIME_NUMBERS_H + +#include <linux/types.h> + +bool is_prime_number(unsigned long x); +unsigned long next_prime_number(unsigned long x); + +/** + * for_each_prime_number - iterate over each prime upto a value + * @prime: the current prime number in this iteration + * @max: the upper limit + * + * Starting from the first prime number 2 iterate over each prime number up to + * the @max value. On each iteration, @prime is set to the current prime number. + * @max should be less than ULONG_MAX to ensure termination. To begin with + * @prime set to 1 on the first iteration use for_each_prime_number_from() + * instead. + */ +#define for_each_prime_number(prime, max) \ + for_each_prime_number_from((prime), 2, (max)) + +/** + * for_each_prime_number_from - iterate over each prime upto a value + * @prime: the current prime number in this iteration + * @from: the initial value + * @max: the upper limit + * + * Starting from @from iterate over each successive prime number up to the + * @max value. On each iteration, @prime is set to the current prime number. + * @max should be less than ULONG_MAX, and @from less than @max, to ensure + * termination. + */ +#define for_each_prime_number_from(prime, from, max) \ + for (prime = (from); prime <= (max); prime = next_prime_number(prime)) + +#endif /* !__LINUX_PRIME_NUMBERS_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h index 3472cc6b7a60..571257e0f53d 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -147,17 +147,11 @@ void early_printk(const char *s, ...) { } #endif #ifdef CONFIG_PRINTK_NMI -extern void printk_nmi_init(void); extern void printk_nmi_enter(void); extern void printk_nmi_exit(void); -extern void printk_nmi_flush(void); -extern void printk_nmi_flush_on_panic(void); #else -static inline void printk_nmi_init(void) { } static inline void printk_nmi_enter(void) { } static inline void printk_nmi_exit(void) { } -static inline void printk_nmi_flush(void) { } -static inline void printk_nmi_flush_on_panic(void) { } #endif /* PRINTK_NMI */ #ifdef CONFIG_PRINTK @@ -209,6 +203,9 @@ void __init setup_log_buf(int early); __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); +extern void printk_safe_init(void); +extern void printk_safe_flush(void); +extern void printk_safe_flush_on_panic(void); #else static inline __printf(1, 0) int vprintk(const char *s, va_list args) @@ -268,6 +265,18 @@ static inline void dump_stack_print_info(const char *log_lvl) static inline void show_regs_print_info(const char *log_lvl) { } + +static inline void printk_safe_init(void) +{ +} + +static inline void printk_safe_flush(void) +{ +} + +static inline void printk_safe_flush_on_panic(void) +{ +} #endif extern asmlinkage void dump_stack(void) __cold; diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 2052011bf9fb..6c70444da3b9 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -111,6 +111,11 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) return 0; } +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * consume in interrupt or BH context, you must disable interrupts/BH when + * calling this. + */ static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) { int ret; @@ -242,6 +247,11 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) return ptr; } +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * call this in interrupt or BH context, you must disable interrupts/BH when + * producing. + */ static inline void *ptr_ring_consume(struct ptr_ring *r) { void *ptr; @@ -357,7 +367,7 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, void **old; void *ptr; - while ((ptr = ptr_ring_consume(r))) + while ((ptr = __ptr_ring_consume(r))) if (producer < size) queue[producer++] = ptr; else if (destroy) @@ -372,6 +382,12 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, return old; } +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, void (*destroy)(void *)) { @@ -382,17 +398,25 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, if (!queue) return -ENOMEM; - spin_lock_irqsave(&(r)->producer_lock, flags); + spin_lock_irqsave(&(r)->consumer_lock, flags); + spin_lock(&(r)->producer_lock); old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); - spin_unlock_irqrestore(&(r)->producer_lock, flags); + spin_unlock(&(r)->producer_lock); + spin_unlock_irqrestore(&(r)->consumer_lock, flags); kfree(old); return 0; } +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, int size, gfp_t gfp, void (*destroy)(void *)) @@ -412,10 +436,12 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, } for (i = 0; i < nrings; ++i) { - spin_lock_irqsave(&(rings[i])->producer_lock, flags); + spin_lock_irqsave(&(rings[i])->consumer_lock, flags); + spin_lock(&(rings[i])->producer_lock); queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], size, gfp, destroy); - spin_unlock_irqrestore(&(rings[i])->producer_lock, flags); + spin_unlock(&(rings[i])->producer_lock); + spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); } for (i = 0; i < nrings; ++i) diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index e0e539321ab9..422bc2e4cb6a 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -3,6 +3,7 @@ #include <linux/compiler.h> /* For unlikely. */ #include <linux/sched.h> /* For struct task_struct. */ +#include <linux/sched/signal.h> /* For send_sig(), same_thread_group(), etc. */ #include <linux/err.h> /* for IS_ERR_VALUE */ #include <linux/bug.h> /* For BUG_ON. */ #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h new file mode 100644 index 000000000000..d60d4e278609 --- /dev/null +++ b/include/linux/purgatory.h @@ -0,0 +1,23 @@ +#ifndef _LINUX_PURGATORY_H +#define _LINUX_PURGATORY_H + +#include <linux/types.h> +#include <crypto/sha.h> +#include <uapi/linux/kexec.h> + +struct kexec_sha_region { + unsigned long start; + unsigned long len; +}; + +/* + * These forward declarations serve two purposes: + * + * 1) Make sparse happy when checking arch/purgatory + * 2) Document that these are required to be global so the symbol + * lookup in kexec works + */ +extern struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX]; +extern u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE]; + +#endif diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 2c6c5114c089..08fad7c6a471 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -287,8 +287,6 @@ struct pwm_ops { * @pwms: array of PWM devices allocated by the framework * @of_xlate: request a PWM device given a device tree PWM specifier * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier - * @can_sleep: must be true if the .config(), .enable() or .disable() - * operations may sleep */ struct pwm_chip { struct device *dev; @@ -302,7 +300,6 @@ struct pwm_chip { struct pwm_device * (*of_xlate)(struct pwm_chip *pc, const struct of_phandle_args *args); unsigned int of_pwm_n_cells; - bool can_sleep; }; /** @@ -451,8 +448,6 @@ struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id); struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, const char *con_id); void devm_pwm_put(struct device *dev, struct pwm_device *pwm); - -bool pwm_can_sleep(struct pwm_device *pwm); #else static inline struct pwm_device *pwm_request(int pwm_id, const char *label) { @@ -566,11 +561,6 @@ static inline struct pwm_device *devm_of_pwm_get(struct device *dev, static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) { } - -static inline bool pwm_can_sleep(struct pwm_device *pwm) -{ - return false; -} #endif static inline void pwm_apply_args(struct pwm_device *pwm) @@ -613,18 +603,25 @@ struct pwm_lookup { const char *con_id; unsigned int period; enum pwm_polarity polarity; + const char *module; /* optional, may be NULL */ }; -#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id, _period, _polarity) \ - { \ - .provider = _provider, \ - .index = _index, \ - .dev_id = _dev_id, \ - .con_id = _con_id, \ - .period = _period, \ - .polarity = _polarity \ +#define PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, \ + _period, _polarity, _module) \ + { \ + .provider = _provider, \ + .index = _index, \ + .dev_id = _dev_id, \ + .con_id = _con_id, \ + .period = _period, \ + .polarity = _polarity, \ + .module = _module, \ } +#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id, _period, _polarity) \ + PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, _period, \ + _polarity, NULL) + #if IS_ENABLED(CONFIG_PWM) void pwm_add_table(struct pwm_lookup *table, size_t num); void pwm_remove_table(struct pwm_lookup *table, size_t num); diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index cc32ab852fbc..d32f6f1a5225 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -13,9 +13,9 @@ #ifndef __QCOM_SCM_H #define __QCOM_SCM_H -extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); -extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); - +#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) +#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 +#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 #define QCOM_SCM_HDCP_MAX_REQ_CNT 5 struct qcom_scm_hdcp_req { @@ -23,27 +23,49 @@ struct qcom_scm_hdcp_req { u32 val; }; +#if IS_ENABLED(CONFIG_QCOM_SCM) +extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); +extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); extern bool qcom_scm_is_available(void); - extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, - u32 *resp); - + u32 *resp); extern bool qcom_scm_pas_supported(u32 peripheral); extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, - size_t size); + size_t size); extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, - phys_addr_t size); + phys_addr_t size); extern int qcom_scm_pas_auth_and_reset(u32 peripheral); extern int qcom_scm_pas_shutdown(u32 peripheral); - -#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 -#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 - extern void qcom_scm_cpu_power_down(u32 flags); - -#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) - extern u32 qcom_scm_get_version(void); - +extern int qcom_scm_set_remote_state(u32 state, u32 id); +#else +static inline +int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) +{ + return -ENODEV; +} +static inline +int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +{ + return -ENODEV; +} +static inline bool qcom_scm_is_available(void) { return false; } +static inline bool qcom_scm_hdcp_available(void) { return false; } +static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, + u32 *resp) { return -ENODEV; } +static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; } +static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, + size_t size) { return -ENODEV; } +static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, + phys_addr_t size) { return -ENODEV; } +static inline int +qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; } +static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; } +static inline void qcom_scm_cpu_power_down(u32 flags) {} +static inline u32 qcom_scm_get_version(void) { return 0; } +static inline u32 +qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; } +#endif #endif diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 734deb094618..52966b9bfde3 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -1,10 +1,35 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2016 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ + #ifndef _COMMON_HSI_H #define _COMMON_HSI_H #include <linux/types.h> @@ -37,6 +62,7 @@ #define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 #define ISCSI_CDU_TASK_SEG_TYPE 0 +#define FCOE_CDU_TASK_SEG_TYPE 0 #define RDMA_CDU_TASK_SEG_TYPE 1 #define FW_ASSERT_GENERAL_ATTN_IDX 32 @@ -180,6 +206,9 @@ #define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 #define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 @@ -236,6 +265,7 @@ #define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) @@ -266,6 +296,9 @@ #define DQ_TCM_AGG_FLG_SHIFT_CF6 6 #define DQ_TCM_AGG_FLG_SHIFT_CF7 7 /* TCM agg counter flag selection (FW) */ +#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) +#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) @@ -703,7 +736,7 @@ enum mf_mode { /* Per-protocol connection types */ enum protocol_type { PROTOCOLID_ISCSI, - PROTOCOLID_RESERVED2, + PROTOCOLID_FCOE, PROTOCOLID_ROCE, PROTOCOLID_CORE, PROTOCOLID_ETH, diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 1aa0727c4136..4b402fb0eaad 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __ETH_COMMON__ diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h new file mode 100644 index 000000000000..2e417a45c5f7 --- /dev/null +++ b/include/linux/qed/fcoe_common.h @@ -0,0 +1,715 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __FCOE_COMMON__ +#define __FCOE_COMMON__ +/*********************/ +/* FCOE FW CONSTANTS */ +/*********************/ + +#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 +#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600) + +struct fcoe_abts_pkt { + __le32 abts_rsp_fc_payload_lo; + __le16 abts_rsp_rx_id; + u8 abts_rsp_rctl; + u8 reserved2; +}; + +/* FCoE additional WQE (Sq/XferQ) information */ +union fcoe_additional_info_union { + __le32 previous_tid; + __le32 parent_tid; + __le32 burst_length; + __le32 seq_rec_updated_offset; +}; + +struct fcoe_exp_ro { + __le32 data_offset; + __le32 reserved; +}; + +union fcoe_cleanup_addr_exp_ro_union { + struct regpair abts_rsp_fc_payload_hi; + struct fcoe_exp_ro exp_ro; +}; + +/* FCoE Ramrod Command IDs */ +enum fcoe_completion_status { + FCOE_COMPLETION_STATUS_SUCCESS, + FCOE_COMPLETION_STATUS_FCOE_VER_ERR, + FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, + MAX_FCOE_COMPLETION_STATUS +}; + +struct fc_addr_nw { + u8 addr_lo; + u8 addr_mid; + u8 addr_hi; +}; + +/* FCoE connection offload */ +struct fcoe_conn_offload_ramrod_data { + struct regpair sq_pbl_addr; + struct regpair sq_curr_page_addr; + struct regpair sq_next_page_addr; + struct regpair xferq_pbl_addr; + struct regpair xferq_curr_page_addr; + struct regpair xferq_next_page_addr; + struct regpair respq_pbl_addr; + struct regpair respq_curr_page_addr; + struct regpair respq_next_page_addr; + __le16 dst_mac_addr_lo; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_hi; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; + __le16 src_mac_addr_hi; + __le16 tx_max_fc_pay_len; + __le16 e_d_tov_timer_val; + __le16 rx_max_fc_pay_len; + __le16 vlan_tag; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 + __le16 physical_q0; + __le16 rec_rr_tov_timer_val; + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6 + __le16 conn_id; + u8 def_q_idx; + u8 reserved[5]; +}; + +/* FCoE terminate connection request */ +struct fcoe_conn_terminate_ramrod_data { + struct regpair terminate_params_addr; +}; + +struct fcoe_fast_sgl_ctx { + struct regpair sgl_start_addr; + __le32 sgl_byte_offset; + __le16 task_reuse_cnt; + __le16 init_offset_in_first_sge; +}; + +struct fcoe_slow_sgl_ctx { + struct regpair base_sgl_addr; + __le16 curr_sge_off; + __le16 remainder_num_sges; + __le16 curr_sgl_index; + __le16 reserved; +}; + +struct fcoe_sge { + struct regpair sge_addr; + __le16 size; + __le16 reserved0; + u8 reserved1[3]; + u8 is_valid_sge; +}; + +union fcoe_data_desc_ctx { + struct fcoe_fast_sgl_ctx fast; + struct fcoe_slow_sgl_ctx slow; + struct fcoe_sge single_sge; +}; + +union fcoe_dix_desc_ctx { + struct fcoe_slow_sgl_ctx dix_sgl; + struct fcoe_sge cached_dix_sge; +}; + +struct fcoe_fcp_cmd_payload { + __le32 opaque[8]; +}; + +struct fcoe_fcp_rsp_payload { + __le32 opaque[6]; +}; + +struct fcoe_fcp_xfer_payload { + __le32 opaque[3]; +}; + +/* FCoE firmware function init */ +struct fcoe_init_func_ramrod_data { + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; + __le16 mtu; + __le16 sq_num_pages_in_pbl; + __le32 reserved; +}; + +/* FCoE: Mode of the connection: Target or Initiator or both */ +enum fcoe_mode_type { + FCOE_INITIATOR_MODE = 0x0, + FCOE_TARGET_MODE = 0x1, + FCOE_BOTH_OR_NOT_CHOSEN = 0x3, + MAX_FCOE_MODE_TYPE +}; + +struct fcoe_mstorm_fcoe_task_st_ctx_fp { + __le16 flags; +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15 + __le16 difDataResidue; + __le16 parent_id; + __le16 single_sge_saved_offset; + __le32 data_2_trns_rem; + __le32 offset_in_io; + union fcoe_dix_desc_ctx dix_desc; + union fcoe_data_desc_ctx data_desc; +}; + +struct fcoe_mstorm_fcoe_task_st_ctx_non_fp { + __le16 flags; +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15 + u8 tx_rx_sgl_mode; +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6 + u8 rsrv2; + __le32 num_prm_zero_read; + struct regpair rsp_buf_addr; +}; + +struct fcoe_rx_stat { + struct regpair fcoe_rx_byte_cnt; + struct regpair fcoe_rx_data_pkt_cnt; + struct regpair fcoe_rx_xfer_pkt_cnt; + struct regpair fcoe_rx_other_pkt_cnt; + __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; + __le32 fcoe_silent_drop_pkt_rq_full_cnt; + __le32 fcoe_silent_drop_pkt_crc_error_cnt; + __le32 fcoe_silent_drop_pkt_task_invalid_cnt; + __le32 fcoe_silent_drop_total_pkt_cnt; + __le32 rsrv; +}; + +enum fcoe_sgl_mode { + FCOE_SLOW_SGL, + FCOE_SINGLE_FAST_SGE, + FCOE_2_FAST_SGE, + FCOE_3_FAST_SGE, + FCOE_4_FAST_SGE, + FCOE_MUL_FAST_SGES, + MAX_FCOE_SGL_MODE +}; + +struct fcoe_stat_ramrod_data { + struct regpair stat_params_addr; +}; + +struct protection_info_ctx { + __le16 flags; +#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 +#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 +#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F +#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 + u8 dix_block_size; + u8 dst_size; +}; + +union protection_info_union_ctx { + struct protection_info_ctx info; + __le32 value; +}; + +struct fcp_rsp_payload_padded { + struct fcoe_fcp_rsp_payload rsp_payload; + __le32 reserved[2]; +}; + +struct fcp_xfer_payload_padded { + struct fcoe_fcp_xfer_payload xfer_payload; + __le32 reserved[5]; +}; + +struct fcoe_tx_data_params { + __le32 data_offset; + __le32 offset_in_io; + u8 flags; +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 +#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F +#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 + u8 dif_residual; + __le16 seq_cnt; + __le16 single_sge_saved_offset; + __le16 next_dif_offset; + __le16 seq_id; + __le16 reserved3; +}; + +struct fcoe_tx_mid_path_params { + __le32 parameter; + u8 r_ctl; + u8 type; + u8 cs_ctl; + u8 df_ctl; + __le16 rx_id; + __le16 ox_id; +}; + +struct fcoe_tx_params { + struct fcoe_tx_data_params data; + struct fcoe_tx_mid_path_params mid_path; +}; + +union fcoe_tx_info_union_ctx { + struct fcoe_fcp_cmd_payload fcp_cmd_payload; + struct fcp_rsp_payload_padded fcp_rsp_payload; + struct fcp_xfer_payload_padded fcp_xfer_payload; + struct fcoe_tx_params tx_params; +}; + +struct ystorm_fcoe_task_st_ctx { + u8 task_type; + u8 sgl_mode; +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7 +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3 + u8 cached_dix_sge; + u8 expect_first_xfer; + __le32 num_pbf_zero_write; + union protection_info_union_ctx protection_info_union; + __le32 data_2_trns_rem; + union fcoe_tx_info_union_ctx tx_info_union; + union fcoe_dix_desc_ctx dix_desc; + union fcoe_data_desc_ctx data_desc; + __le16 ox_id; + __le16 rx_id; + __le32 task_rety_identifier; + __le32 reserved1[2]; +}; + +struct ystorm_fcoe_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 word0; + u8 flags0; +#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 rx_id; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le32 reg1; + __le32 reg2; +}; + +struct tstorm_fcoe_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 + u8 flags1; +#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 + u8 flags2; +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 + u8 flags3; +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 +#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 + u8 cleanup_state; + __le16 last_sent_tid; + __le32 rec_rr_tov_exp_timeout; + u8 byte3; + u8 byte4; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 data_offset_end_of_seq; + __le32 data_offset_next; +}; + +struct fcoe_tstorm_fcoe_task_st_ctx_read_write { + union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; + __le16 flags; +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10 + __le16 seq_cnt; + u8 seq_id; + u8 ooo_rx_seq_id; + __le16 rx_id; + struct fcoe_abts_pkt abts_data; + __le32 e_d_tov_exp_timeout_val; + __le16 ooo_rx_seq_cnt; + __le16 reserved1; +}; + +struct fcoe_tstorm_fcoe_task_st_ctx_read_only { + u8 task_type; + u8 dev_type; + u8 conf_supported; + u8 glbl_q_num; + __le32 cid; + __le32 fcp_cmd_trns_size; + __le32 rsrv; +}; + +struct tstorm_fcoe_task_st_ctx { + struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; + struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; +}; + +struct mstorm_fcoe_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 icid; + u8 flags0; +#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 +#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 cleanup_state; + __le32 received_bytes; + u8 byte3; + u8 glbl_q_num; + __le16 word1; + __le16 tid_to_xfer; + __le16 word3; + __le16 word4; + __le16 word5; + __le32 expected_bytes; + __le32 reg2; +}; + +struct mstorm_fcoe_task_st_ctx { + struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp; + struct fcoe_mstorm_fcoe_task_st_ctx_fp fp; +}; + +struct ustorm_fcoe_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 + u8 flags3; +#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 global_cq_num; + __le32 reg3; + __le32 reg4; + __le32 reg5; +}; + +struct fcoe_task_context { + struct ystorm_fcoe_task_st_ctx ystorm_st_context; + struct tdif_task_context tdif_context; + struct ystorm_fcoe_task_ag_ctx ystorm_ag_context; + struct tstorm_fcoe_task_ag_ctx tstorm_ag_context; + struct timers_context timer_context; + struct tstorm_fcoe_task_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct mstorm_fcoe_task_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_task_st_ctx mstorm_st_context; + struct ustorm_fcoe_task_ag_ctx ustorm_ag_context; + struct rdif_task_context rdif_context; +}; + +struct fcoe_tx_stat { + struct regpair fcoe_tx_byte_cnt; + struct regpair fcoe_tx_data_pkt_cnt; + struct regpair fcoe_tx_xfer_pkt_cnt; + struct regpair fcoe_tx_other_pkt_cnt; +}; + +struct fcoe_wqe { + __le16 task_id; + __le16 flags; +#define FCOE_WQE_REQ_TYPE_MASK 0xF +#define FCOE_WQE_REQ_TYPE_SHIFT 0 +#define FCOE_WQE_SGL_MODE_MASK 0x7 +#define FCOE_WQE_SGL_MODE_SHIFT 4 +#define FCOE_WQE_CONTINUATION_MASK 0x1 +#define FCOE_WQE_CONTINUATION_SHIFT 7 +#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1 +#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8 +#define FCOE_WQE_SUPER_IO_MASK 0x1 +#define FCOE_WQE_SUPER_IO_SHIFT 9 +#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 +#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10 +#define FCOE_WQE_RESERVED0_MASK 0x1F +#define FCOE_WQE_RESERVED0_SHIFT 11 + union fcoe_additional_info_union additional_info_union; +}; + +struct xfrqe_prot_flags { + u8 flags; +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 +#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 +#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 +}; + +struct fcoe_db_data { + u8 params; +#define FCOE_DB_DATA_DEST_MASK 0x3 +#define FCOE_DB_DATA_DEST_SHIFT 0 +#define FCOE_DB_DATA_AGG_CMD_MASK 0x3 +#define FCOE_DB_DATA_AGG_CMD_SHIFT 2 +#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 +#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 +#define FCOE_DB_DATA_RESERVED_MASK 0x1 +#define FCOE_DB_DATA_RESERVED_SHIFT 5 +#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; +#endif /* __FCOE_COMMON__ */ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 8f64b1223c2f..4c5747babcf6 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __ISCSI_COMMON__ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 37dfba101c6c..5cd7a4608c9b 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_CHAIN_H diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 7a52f7c58c37..4cd1f0ccfa36 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_ETH_IF_H @@ -53,7 +77,7 @@ struct qed_dev_eth_info { }; struct qed_update_vport_rss_params { - u16 rss_ind_table[128]; + void *rss_ind_table[128]; u32 rss_key[10]; u8 rss_caps; }; @@ -72,6 +96,7 @@ struct qed_update_vport_params { struct qed_start_vport_params { bool remove_inner_vlan; + bool handle_ptp_pkts; bool gro_enable; bool drop_ttl0; u8 vport_id; @@ -135,6 +160,15 @@ struct qed_eth_cb_ops { void (*force_mac) (void *dev, u8 *mac, bool forced); }; +#define QED_MAX_PHC_DRIFT_PPB 291666666 + +enum qed_ptp_filter_type { + QED_PTP_FILTER_L2, + QED_PTP_FILTER_IPV4, + QED_PTP_FILTER_IPV4_IPV6, + QED_PTP_FILTER_L2_IPV4_IPV6 +}; + #ifdef CONFIG_DCB /* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration * of dcbnl_rtnl_ops structure. @@ -194,6 +228,17 @@ struct qed_eth_dcbnl_ops { }; #endif +struct qed_eth_ptp_ops { + int (*hwtstamp_tx_on)(struct qed_dev *); + int (*cfg_rx_filters)(struct qed_dev *, enum qed_ptp_filter_type); + int (*read_rx_ts)(struct qed_dev *, u64 *); + int (*read_tx_ts)(struct qed_dev *, u64 *); + int (*read_cc)(struct qed_dev *, u64 *); + int (*disable)(struct qed_dev *); + int (*adjfreq)(struct qed_dev *, s32); + int (*enable)(struct qed_dev *); +}; + struct qed_eth_ops { const struct qed_common_ops *common; #ifdef CONFIG_QED_SRIOV @@ -202,6 +247,7 @@ struct qed_eth_ops { #ifdef CONFIG_DCB const struct qed_eth_dcbnl_ops *dcb; #endif + const struct qed_eth_ptp_ops *ptp; int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h new file mode 100644 index 000000000000..bd6bcb809415 --- /dev/null +++ b/include/linux/qed/qed_fcoe_if.h @@ -0,0 +1,145 @@ +#ifndef _QED_FCOE_IF_H +#define _QED_FCOE_IF_H +#include <linux/types.h> +#include <linux/qed/qed_if.h> +struct qed_fcoe_stats { + u64 fcoe_rx_byte_cnt; + u64 fcoe_rx_data_pkt_cnt; + u64 fcoe_rx_xfer_pkt_cnt; + u64 fcoe_rx_other_pkt_cnt; + u32 fcoe_silent_drop_pkt_cmdq_full_cnt; + u32 fcoe_silent_drop_pkt_rq_full_cnt; + u32 fcoe_silent_drop_pkt_crc_error_cnt; + u32 fcoe_silent_drop_pkt_task_invalid_cnt; + u32 fcoe_silent_drop_total_pkt_cnt; + + u64 fcoe_tx_byte_cnt; + u64 fcoe_tx_data_pkt_cnt; + u64 fcoe_tx_xfer_pkt_cnt; + u64 fcoe_tx_other_pkt_cnt; +}; + +struct qed_dev_fcoe_info { + struct qed_dev_info common; + + void __iomem *primary_dbq_rq_addr; + void __iomem *secondary_bdq_rq_addr; +}; + +struct qed_fcoe_params_offload { + dma_addr_t sq_pbl_addr; + dma_addr_t sq_curr_page_addr; + dma_addr_t sq_next_page_addr; + + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + + u16 tx_max_fc_pay_len; + u16 e_d_tov_timer_val; + u16 rec_tov_timer_val; + u16 rx_max_fc_pay_len; + u16 vlan_tag; + + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; + u8 def_q_idx; +}; + +#define MAX_TID_BLOCKS_FCOE (512) +struct qed_fcoe_tid { + u32 size; /* In bytes per task */ + u32 num_tids_per_block; + u8 *blocks[MAX_TID_BLOCKS_FCOE]; +}; + +struct qed_fcoe_cb_ops { + struct qed_common_cb_ops common; + u32 (*get_login_failures)(void *cookie); +}; + +void qed_fcoe_set_pf_params(struct qed_dev *cdev, + struct qed_fcoe_pf_params *params); + +/** + * struct qed_fcoe_ops - qed FCoE operations. + * @common: common operations pointer + * @fill_dev_info: fills FCoE specific information + * @param cdev + * @param info + * @return 0 on sucesss, otherwise error value. + * @register_ops: register FCoE operations + * @param cdev + * @param ops - specified using qed_iscsi_cb_ops + * @param cookie - driver private + * @ll2: light L2 operations pointer + * @start: fcoe in FW + * @param cdev + * @param tasks - qed will fill information about tasks + * return 0 on success, otherwise error value. + * @stop: stops fcoe in FW + * @param cdev + * return 0 on success, otherwise error value. + * @acquire_conn: acquire a new fcoe connection + * @param cdev + * @param handle - qed will fill handle that should be + * used henceforth as identifier of the + * connection. + * @param p_doorbell - qed will fill the address of the + * doorbell. + * return 0 on sucesss, otherwise error value. + * @release_conn: release a previously acquired fcoe connection + * @param cdev + * @param handle - the connection handle. + * return 0 on success, otherwise error value. + * @offload_conn: configures an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * return 0 on success, otherwise error value. + * @destroy_conn: stops an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param terminate_params + * return 0 on success, otherwise error value. + * @get_stats: gets FCoE related statistics + * @param cdev + * @param stats - pointer to struck that would be filled + * we stats + * return 0 on success, error otherwise. + */ +struct qed_fcoe_ops { + const struct qed_common_ops *common; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_fcoe_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_fcoe_cb_ops *ops, void *cookie); + + const struct qed_ll2_ops *ll2; + + int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks); + + int (*stop)(struct qed_dev *cdev); + + int (*acquire_conn)(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell); + + int (*release_conn)(struct qed_dev *cdev, u32 handle); + + int (*offload_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_fcoe_params_offload *conn_info); + int (*destroy_conn)(struct qed_dev *cdev, + u32 handle, dma_addr_t terminate_params); + + int (*get_stats)(struct qed_dev *cdev, struct qed_fcoe_stats *stats); +}; + +const struct qed_fcoe_ops *qed_get_fcoe_ops(void); +void qed_put_fcoe_ops(void); +#endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 4b454f4f5b25..fde56c436f71 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1,10 +1,33 @@ /* QLogic qed NIC Driver - * - * Copyright (c) 2015 QLogic Corporation - * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_IF_H @@ -36,7 +59,6 @@ enum dcbx_protocol_type { #define QED_ROCE_PROTOCOL_INDEX (3) -#ifdef CONFIG_DCB #define QED_LLDP_CHASSIS_ID_STAT_LEN 4 #define QED_LLDP_PORT_ID_STAT_LEN 4 #define QED_DCBX_MAX_APP_PROTOCOL 32 @@ -132,7 +154,6 @@ struct qed_dcbx_get { struct qed_dcbx_remote_params remote; struct qed_dcbx_admin_params local; }; -#endif enum qed_led_mode { QED_LED_MODE_OFF, @@ -159,6 +180,38 @@ struct qed_eth_pf_params { u16 num_cons; }; +struct qed_fcoe_pf_params { + /* The following parameters are used during protocol-init */ + u64 glbl_q_params_addr; + u64 bdq_pbl_base_addr[2]; + + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; + u16 num_tasks; + + /* The following parameters are used during protocol-init */ + u16 sq_num_pbl_pages; + + u16 cq_num_entries; + u16 cmdq_num_entries; + u16 rq_buffer_log_size; + u16 mtu; + u16 dummy_icid; + u16 bdq_xoff_threshold[2]; + u16 bdq_xon_threshold[2]; + u16 rq_buffer_size; + u8 num_cqs; /* num of global CQs */ + u8 log_page_size; + u8 gl_rq_pi; + u8 gl_cmd_pi; + u8 debug_mode; + u8 is_target; + u8 bdq_pbl_num_entries[2]; +}; + /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ struct qed_iscsi_pf_params { u64 glbl_q_params_addr; @@ -222,6 +275,7 @@ struct qed_rdma_pf_params { struct qed_pf_params { struct qed_eth_pf_params eth_pf_params; + struct qed_fcoe_pf_params fcoe_pf_params; struct qed_iscsi_pf_params iscsi_pf_params; struct qed_rdma_pf_params rdma_pf_params; }; @@ -282,6 +336,7 @@ enum qed_sb_type { enum qed_protocol { QED_PROTOCOL_ETH, QED_PROTOCOL_ISCSI, + QED_PROTOCOL_FCOE, }; enum qed_link_mode_bits { @@ -368,6 +423,7 @@ struct qed_int_info { struct qed_common_cb_ops { void (*link_update)(void *dev, struct qed_link_output *link); + void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); }; struct qed_selftest_ops { @@ -471,6 +527,10 @@ struct qed_common_ops { void (*simd_handler_clean)(struct qed_dev *cdev, int index); + int (*dbg_grc)(struct qed_dev *cdev, + void *buffer, u32 *num_dumped_bytes); + + int (*dbg_grc_size)(struct qed_dev *cdev); int (*dbg_all_data) (struct qed_dev *cdev, void *buffer); diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index 5a4f8d0899e9..ac2e6a3199a3 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_IOV_IF_H @@ -29,6 +53,8 @@ struct qed_iov_hv_ops { int (*set_rate) (struct qed_dev *cdev, int vfid, u32 min_rate, u32 max_rate); + + int (*set_trust) (struct qed_dev *cdev, int vfid, bool trust); }; #endif diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index d27912480cb3..f70bb81b8b6a 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_ISCSI_IF_H diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index fd75c265dba3..4fb4666ea879 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -1,10 +1,33 @@ /* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation * - * Copyright (c) 2015 QLogic Corporation + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef _QED_LL2_IF_H diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h index 53047d3fa678..f742d4312c9d 100644 --- a/include/linux/qed/qed_roce_if.h +++ b/include/linux/qed/qed_roce_if.h @@ -1,5 +1,5 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h index f48d64b0e2fb..3b8dd551a98c 100644 --- a/include/linux/qed/qede_roce.h +++ b/include/linux/qed/qede_roce.h @@ -1,5 +1,5 @@ /* QLogic qedr NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 7663725faa94..f773aa5e746f 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __RDMA_COMMON__ diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 2eeaf3dc6646..bad02df213df 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __ROCE_COMMON__ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 3b8e1efd9bc2..03f3e37ab059 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __STORAGE_COMMON__ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index dc3889d1bbe6..46fe7856f1b2 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -1,9 +1,33 @@ /* QLogic qed NIC Driver - * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2015-2017 QLogic Corporation * - * This software is available under the terms of the GNU General Public License - * (GPL) Version 2, available from the file COPYING in the main directory of - * this source tree. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifndef __TCP_COMMON__ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 52bda854593b..3e5735064b71 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -22,11 +22,13 @@ #define _LINUX_RADIX_TREE_H #include <linux/bitops.h> -#include <linux/preempt.h> -#include <linux/types.h> #include <linux/bug.h> #include <linux/kernel.h> +#include <linux/list.h> +#include <linux/preempt.h> #include <linux/rcupdate.h> +#include <linux/spinlock.h> +#include <linux/types.h> /* * The bottom two bits of the slot determine how the remaining bits in the @@ -94,7 +96,7 @@ struct radix_tree_node { unsigned char count; /* Total entry count */ unsigned char exceptional; /* Exceptional entry count */ struct radix_tree_node *parent; /* Used when ascending tree */ - void *private_data; /* For tree user */ + struct radix_tree_root *root; /* The tree we belong to */ union { struct list_head private_list; /* For tree user */ struct rcu_head rcu_head; /* Used when freeing node */ @@ -103,7 +105,10 @@ struct radix_tree_node { unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; -/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ +/* The top bits of gfp_mask are used to store the root tags and the IDR flag */ +#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT)) +#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1) + struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node __rcu *rnode; @@ -123,7 +128,7 @@ do { \ (root)->rnode = NULL; \ } while (0) -static inline bool radix_tree_empty(struct radix_tree_root *root) +static inline bool radix_tree_empty(const struct radix_tree_root *root) { return root->rnode == NULL; } @@ -216,10 +221,8 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) */ /** - * radix_tree_deref_slot - dereference a slot - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * Returns: item that was stored in that slot with any direct pointer flag - * removed. + * radix_tree_deref_slot - dereference a slot + * @slot: slot pointer, returned by radix_tree_lookup_slot * * For use with radix_tree_lookup_slot(). Caller must hold tree at least read * locked across slot lookup and dereference. Not required if write lock is @@ -227,26 +230,27 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) * * radix_tree_deref_retry must be used to confirm validity of the pointer if * only the read lock is held. + * + * Return: entry stored in that slot. */ -static inline void *radix_tree_deref_slot(void **pslot) +static inline void *radix_tree_deref_slot(void __rcu **slot) { - return rcu_dereference(*pslot); + return rcu_dereference(*slot); } /** - * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * Returns: item that was stored in that slot with any direct pointer flag - * removed. - * - * Similar to radix_tree_deref_slot but only used during migration when a pages - * mapping is being moved. The caller does not hold the RCU read lock but it - * must hold the tree lock to prevent parallel updates. + * radix_tree_deref_slot_protected - dereference a slot with tree lock held + * @slot: slot pointer, returned by radix_tree_lookup_slot + * + * Similar to radix_tree_deref_slot. The caller does not hold the RCU read + * lock but it must hold the tree lock to prevent parallel updates. + * + * Return: entry stored in that slot. */ -static inline void *radix_tree_deref_slot_protected(void **pslot, +static inline void *radix_tree_deref_slot_protected(void __rcu **slot, spinlock_t *treelock) { - return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); + return rcu_dereference_protected(*slot, lockdep_is_held(treelock)); } /** @@ -282,9 +286,9 @@ static inline int radix_tree_exception(void *arg) return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } -int __radix_tree_create(struct radix_tree_root *root, unsigned long index, +int __radix_tree_create(struct radix_tree_root *, unsigned long index, unsigned order, struct radix_tree_node **nodep, - void ***slotp); + void __rcu ***slotp); int __radix_tree_insert(struct radix_tree_root *, unsigned long index, unsigned order, void *); static inline int radix_tree_insert(struct radix_tree_root *root, @@ -292,55 +296,56 @@ static inline int radix_tree_insert(struct radix_tree_root *root, { return __radix_tree_insert(root, index, 0, entry); } -void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, - struct radix_tree_node **nodep, void ***slotp); -void *radix_tree_lookup(struct radix_tree_root *, unsigned long); -void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); +void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, + struct radix_tree_node **nodep, void __rcu ***slotp); +void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); +void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, + unsigned long index); typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *); -void __radix_tree_replace(struct radix_tree_root *root, - struct radix_tree_node *node, - void **slot, void *item, +void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, + void __rcu **slot, void *entry, radix_tree_update_node_t update_node, void *private); void radix_tree_iter_replace(struct radix_tree_root *, - const struct radix_tree_iter *, void **slot, void *item); -void radix_tree_replace_slot(struct radix_tree_root *root, - void **slot, void *item); -void __radix_tree_delete_node(struct radix_tree_root *root, - struct radix_tree_node *node, + const struct radix_tree_iter *, void __rcu **slot, void *entry); +void radix_tree_replace_slot(struct radix_tree_root *, + void __rcu **slot, void *entry); +void __radix_tree_delete_node(struct radix_tree_root *, + struct radix_tree_node *, radix_tree_update_node_t update_node, void *private); +void radix_tree_iter_delete(struct radix_tree_root *, + struct radix_tree_iter *iter, void __rcu **slot); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); -void radix_tree_clear_tags(struct radix_tree_root *root, - struct radix_tree_node *node, - void **slot); -unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, +void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *, + void __rcu **slot); +unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items); -unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, - void ***results, unsigned long *indices, +unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, + void __rcu ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); void radix_tree_init(void); -void *radix_tree_tag_set(struct radix_tree_root *root, +void *radix_tree_tag_set(struct radix_tree_root *, unsigned long index, unsigned int tag); -void *radix_tree_tag_clear(struct radix_tree_root *root, +void *radix_tree_tag_clear(struct radix_tree_root *, unsigned long index, unsigned int tag); -int radix_tree_tag_get(struct radix_tree_root *root, +int radix_tree_tag_get(const struct radix_tree_root *, unsigned long index, unsigned int tag); -void radix_tree_iter_tag_set(struct radix_tree_root *root, +void radix_tree_iter_tag_set(struct radix_tree_root *, + const struct radix_tree_iter *iter, unsigned int tag); +void radix_tree_iter_tag_clear(struct radix_tree_root *, const struct radix_tree_iter *iter, unsigned int tag); -unsigned int -radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items, - unsigned int tag); -unsigned int -radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, - unsigned long first_index, unsigned int max_items, - unsigned int tag); -int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, + void **results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, + void __rcu ***results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); static inline void radix_tree_preload_end(void) { @@ -352,10 +357,14 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index, unsigned new_order); int radix_tree_join(struct radix_tree_root *, unsigned long index, unsigned new_order, void *); +void __rcu **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *, + gfp_t, int end); -#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ -#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ -#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ +enum { + RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ + RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ + RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ +}; /** * radix_tree_iter_init - initialize radix tree iterator @@ -364,7 +373,7 @@ int radix_tree_join(struct radix_tree_root *, unsigned long index, * @start: iteration starting index * Returns: NULL */ -static __always_inline void ** +static __always_inline void __rcu ** radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) { /* @@ -393,10 +402,46 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) * Also it fills @iter with data about chunk: position in the tree (index), * its end (next_index), and constructs a bit mask for tagged iterating (tags). */ -void **radix_tree_next_chunk(struct radix_tree_root *root, +void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, struct radix_tree_iter *iter, unsigned flags); /** + * radix_tree_iter_lookup - look up an index in the radix tree + * @root: radix tree root + * @iter: iterator state + * @index: key to look up + * + * If @index is present in the radix tree, this function returns the slot + * containing it and updates @iter to describe the entry. If @index is not + * present, it returns NULL. + */ +static inline void __rcu ** +radix_tree_iter_lookup(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); +} + +/** + * radix_tree_iter_find - find a present entry + * @root: radix tree root + * @iter: iterator state + * @index: start location + * + * This function returns the slot containing the entry with the lowest index + * which is at least @index. If @index is larger than any present entry, this + * function returns NULL. The @iter is updated to describe the entry found. + */ +static inline void __rcu ** +radix_tree_iter_find(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, 0); +} + +/** * radix_tree_iter_retry - retry this chunk of the iteration * @iter: iterator state * @@ -406,7 +451,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, * and continue the iteration. */ static inline __must_check -void **radix_tree_iter_retry(struct radix_tree_iter *iter) +void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) { iter->next_index = iter->index; iter->tags = 0; @@ -429,7 +474,7 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) * have been invalidated by an insertion or deletion. Call this function * before releasing the lock to continue the iteration from the next index. */ -void **__must_check radix_tree_iter_resume(void **slot, +void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter); /** @@ -445,11 +490,11 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) } #ifdef CONFIG_RADIX_TREE_MULTIORDER -void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, - unsigned flags); +void __rcu **__radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags); #else /* Can't happen without sibling entries, but the compiler can't tell that */ -static inline void ** __radix_tree_next_slot(void **slot, +static inline void __rcu **__radix_tree_next_slot(void __rcu **slot, struct radix_tree_iter *iter, unsigned flags) { return slot; @@ -475,8 +520,8 @@ static inline void ** __radix_tree_next_slot(void **slot, * b) we are doing non-tagged iteration, and iter->index and iter->next_index * have been set up so that radix_tree_chunk_size() returns 1 or 0. */ -static __always_inline void ** -radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) +static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { iter->tags >>= 1; @@ -514,7 +559,7 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) return NULL; found: - if (unlikely(radix_tree_is_internal_node(*slot))) + if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot)))) return __radix_tree_next_slot(slot, iter, flags); return slot; } diff --git a/include/linux/random.h b/include/linux/random.h index 7bd2403e4fef..ed5c3838780d 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -37,14 +37,26 @@ extern void get_random_bytes(void *buf, int nbytes); extern int add_random_ready_callback(struct random_ready_callback *rdy); extern void del_random_ready_callback(struct random_ready_callback *rdy); extern void get_random_bytes_arch(void *buf, int nbytes); -extern int random_int_secret_init(void); #ifndef MODULE extern const struct file_operations random_fops, urandom_fops; #endif -unsigned int get_random_int(void); -unsigned long get_random_long(void); +u32 get_random_u32(void); +u64 get_random_u64(void); +static inline unsigned int get_random_int(void) +{ + return get_random_u32(); +} +static inline unsigned long get_random_long(void) +{ +#if BITS_PER_LONG == 64 + return get_random_u64(); +#else + return get_random_u32(); +#endif +} + unsigned long randomize_page(unsigned long start, unsigned long range); u32 prandom_u32(void); diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index d076183e49be..9702b6e183bc 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ old->rbaugmented = rbcompute(old); \ } \ rbstatic const struct rb_augment_callbacks rbname = { \ - rbname ## _propagate, rbname ## _copy, rbname ## _rotate \ + .propagate = rbname ## _propagate, \ + .copy = rbname ## _copy, \ + .rotate = rbname ## _rotate \ }; diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index 4ae95f7e8597..a23a33153180 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -156,5 +156,19 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) +/** + * hlist_nulls_for_each_entry_safe - + * iterate over list of given type safe against removal of list entry + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_nulls_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_nulls_node within the struct. + */ +#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \ + for (({barrier();}), \ + pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ + (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \ + pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });) #endif #endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6ade6a52d9d4..de88b33c0974 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -40,7 +40,6 @@ #include <linux/cpumask.h> #include <linux/seqlock.h> #include <linux/lockdep.h> -#include <linux/completion.h> #include <linux/debugobjects.h> #include <linux/bug.h> #include <linux/compiler.h> @@ -226,45 +225,6 @@ void call_rcu_sched(struct rcu_head *head, void synchronize_sched(void); -/* - * Structure allowing asynchronous waiting on RCU. - */ -struct rcu_synchronize { - struct rcu_head head; - struct completion completion; -}; -void wakeme_after_rcu(struct rcu_head *head); - -void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, - struct rcu_synchronize *rs_array); - -#define _wait_rcu_gp(checktiny, ...) \ -do { \ - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ - struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ - __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ - __crcu_array, __rs_array); \ -} while (0) - -#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) - -/** - * synchronize_rcu_mult - Wait concurrently for multiple grace periods - * @...: List of call_rcu() functions for the flavors to wait on. - * - * This macro waits concurrently for multiple flavors of RCU grace periods. - * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait - * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU - * domain requires you to write a wrapper function for that SRCU domain's - * call_srcu() function, supplying the corresponding srcu_struct. - * - * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU - * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called - * is automatically a grace period. - */ -#define synchronize_rcu_mult(...) \ - _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) - /** * call_rcu_tasks() - Queue an RCU for invocation task-based grace period * @head: structure to be used for queueing the RCU updates. diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h new file mode 100644 index 000000000000..e774b4f5f220 --- /dev/null +++ b/include/linux/rcupdate_wait.h @@ -0,0 +1,50 @@ +#ifndef _LINUX_SCHED_RCUPDATE_WAIT_H +#define _LINUX_SCHED_RCUPDATE_WAIT_H + +/* + * RCU synchronization types and methods: + */ + +#include <linux/rcupdate.h> +#include <linux/completion.h> + +/* + * Structure allowing asynchronous waiting on RCU. + */ +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; +}; +void wakeme_after_rcu(struct rcu_head *head); + +void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, + struct rcu_synchronize *rs_array); + +#define _wait_rcu_gp(checktiny, ...) \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ + __crcu_array, __rs_array); \ +} while (0) + +#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) + +/** + * synchronize_rcu_mult - Wait concurrently for multiple grace periods + * @...: List of call_rcu() functions for the flavors to wait on. + * + * This macro waits concurrently for multiple flavors of RCU grace periods. + * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait + * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU + * domain requires you to write a wrapper function for that SRCU domain's + * call_srcu() function, supplying the corresponding srcu_struct. + * + * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU + * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called + * is automatically a grace period. + */ +#define synchronize_rcu_mult(...) \ + _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) + +#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 4f9b2fa2173d..b452953e21c8 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -53,15 +53,8 @@ static inline void cond_synchronize_sched(unsigned long oldstate) might_sleep(); } -static inline void rcu_barrier_bh(void) -{ - wait_rcu_gp(call_rcu_bh); -} - -static inline void rcu_barrier_sched(void) -{ - wait_rcu_gp(call_rcu_sched); -} +extern void rcu_barrier_bh(void); +extern void rcu_barrier_sched(void); static inline void synchronize_rcu_expedited(void) { diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 600aadf9cca4..0023fee4bbbc 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -1,54 +1,10 @@ #ifndef _LINUX_REFCOUNT_H #define _LINUX_REFCOUNT_H -/* - * Variant of atomic_t specialized for reference counts. - * - * The interface matches the atomic_t interface (to aid in porting) but only - * provides the few functions one should use for reference counting. - * - * It differs in that the counter saturates at UINT_MAX and will not move once - * there. This avoids wrapping the counter and causing 'spurious' - * use-after-free issues. - * - * Memory ordering rules are slightly relaxed wrt regular atomic_t functions - * and provide only what is strictly required for refcounts. - * - * The increments are fully relaxed; these will not provide ordering. The - * rationale is that whatever is used to obtain the object we're increasing the - * reference count on will provide the ordering. For locked data structures, - * its the lock acquire, for RCU/lockless data structures its the dependent - * load. - * - * Do note that inc_not_zero() provides a control dependency which will order - * future stores against the inc, this ensures we'll never modify the object - * if we did not in fact acquire a reference. - * - * The decrements will provide release order, such that all the prior loads and - * stores will be issued before, it also provides a control dependency, which - * will order us against the subsequent free(). - * - * The control dependency is against the load of the cmpxchg (ll/sc) that - * succeeded. This means the stores aren't fully ordered, but this is fine - * because the 1->0 transition indicates no concurrency. - * - * Note that the allocator is responsible for ordering things between free() - * and alloc(). - * - */ - #include <linux/atomic.h> -#include <linux/bug.h> #include <linux/mutex.h> #include <linux/spinlock.h> - -#ifdef CONFIG_DEBUG_REFCOUNT -#define REFCOUNT_WARN(cond, str) WARN_ON(cond) -#define __refcount_check __must_check -#else -#define REFCOUNT_WARN(cond, str) (void)(cond) -#define __refcount_check -#endif +#include <linux/kernel.h> typedef struct refcount_struct { atomic_t refs; @@ -66,229 +22,21 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } -static inline __refcount_check -bool refcount_add_not_zero(unsigned int i, refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - if (!val) - return false; - - if (unlikely(val == UINT_MAX)) - return true; - - new = val + i; - if (new < val) - new = UINT_MAX; - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} - -static inline void refcount_add(unsigned int i, refcount_t *r) -{ - REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); -} - -/* - * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller has guaranteed the - * object memory to be stable (RCU, etc.). It does provide a control dependency - * and thereby orders future stores. See the comment on top. - */ -static inline __refcount_check -bool refcount_inc_not_zero(refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - new = val + 1; - - if (!val) - return false; - - if (unlikely(!new)) - return true; - - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} - -/* - * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller already has a - * reference on the object, will WARN when this is not so. - */ -static inline void refcount_inc(refcount_t *r) -{ - REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); -} - -/* - * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_sub_and_test(unsigned int i, refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - if (unlikely(val == UINT_MAX)) - return false; - - new = val - i; - if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); - return false; - } - - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - return !new; -} - -static inline __refcount_check -bool refcount_dec_and_test(refcount_t *r) -{ - return refcount_sub_and_test(1, r); -} +extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); +extern void refcount_add(unsigned int i, refcount_t *r); -/* - * Similar to atomic_dec(), it will WARN on underflow and fail to decrement - * when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before. - */ -static inline -void refcount_dec(refcount_t *r) -{ - REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); -} - -/* - * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the - * success thereof. - * - * Like all decrement operations, it provides release memory order and provides - * a control dependency. - * - * It can be used like a try-delete operator; this explicit case is provided - * and not cmpxchg in generic, because that would allow implementing unsafe - * operations. - */ -static inline __refcount_check -bool refcount_dec_if_one(refcount_t *r) -{ - return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; -} - -/* - * No atomic_t counterpart, it decrements unless the value is 1, in which case - * it will return false. - * - * Was often done like: atomic_add_unless(&var, -1, 1) - */ -static inline __refcount_check -bool refcount_dec_not_one(refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); +extern __must_check bool refcount_inc_not_zero(refcount_t *r); +extern void refcount_inc(refcount_t *r); - for (;;) { - if (unlikely(val == UINT_MAX)) - return true; +extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); +extern void refcount_sub(unsigned int i, refcount_t *r); - if (val == 1) - return false; +extern __must_check bool refcount_dec_and_test(refcount_t *r); +extern void refcount_dec(refcount_t *r); - new = val - 1; - if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); - return true; - } - - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - return true; -} - -/* - * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail - * to decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) -{ - if (refcount_dec_not_one(r)) - return false; - - mutex_lock(lock); - if (!refcount_dec_and_test(r)) { - mutex_unlock(lock); - return false; - } - - return true; -} - -/* - * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) -{ - if (refcount_dec_not_one(r)) - return false; - - spin_lock(lock); - if (!refcount_dec_and_test(r)) { - spin_unlock(lock); - return false; - } - - return true; -} +extern __must_check bool refcount_dec_if_one(refcount_t *r); +extern __must_check bool refcount_dec_not_one(refcount_t *r); +extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); +extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); #endif /* _LINUX_REFCOUNT_H */ diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index ad3e5158e586..c9f795e9a2ee 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -65,7 +65,7 @@ struct regulator_state { int uV; /* suspend voltage */ unsigned int mode; /* suspend regulator operating mode */ int enabled; /* is regulator enabled in this suspend state */ - int disabled; /* is the regulator disbled in this suspend state */ + int disabled; /* is the regulator disabled in this suspend state */ }; /** diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 8265d351c9f0..81da49564ff4 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -346,6 +346,7 @@ struct rproc_ops { * a message. * @RPROC_RUNNING: device is up and running * @RPROC_CRASHED: device has crashed; need to start recovery + * @RPROC_DELETED: device is deleted * @RPROC_LAST: just keep this one at the end * * Please note that the values of these states are used as indices @@ -359,7 +360,8 @@ enum rproc_state { RPROC_SUSPENDED = 1, RPROC_RUNNING = 2, RPROC_CRASHED = 3, - RPROC_LAST = 4, + RPROC_DELETED = 4, + RPROC_LAST = 5, }; /** @@ -397,7 +399,6 @@ enum rproc_crash_type { * @num_traces: number of trace buffers * @carveouts: list of physically contiguous memory allocations * @mappings: list of iommu mappings we initiated, needed on shutdown - * @firmware_loading_complete: marks e/o asynchronous firmware loading * @bootaddr: address of first instruction to boot rproc with (optional) * @rvdevs: list of remote virtio devices * @subdevs: list of subdevices, to following the running state @@ -429,7 +430,6 @@ struct rproc { int num_traces; struct list_head carveouts; struct list_head mappings; - struct completion firmware_loading_complete; u32 bootaddr; struct list_head rvdevs; struct list_head subdevs; diff --git a/include/linux/reservation.h b/include/linux/reservation.h index d9706a6f5ae2..2b5a4679daea 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -145,6 +145,40 @@ reservation_object_get_list(struct reservation_object *obj) } /** + * reservation_object_lock - lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Locks the reservation object for exclusive access and modification. Note, + * that the lock is only against other writers, readers will run concurrently + * with a writer under RCU. The seqlock is used to notify readers if they + * overlap with a writer. + * + * As the reservation object may be locked by multiple parties in an + * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle + * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation + * object may be locked by itself by passing NULL as @ctx. + */ +static inline int +reservation_object_lock(struct reservation_object *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock(&obj->lock, ctx); +} + +/** + * reservation_object_unlock - unlock the reservation object + * @obj: the reservation object + * + * Unlocks the reservation object following exclusive access. + */ +static inline void +reservation_object_unlock(struct reservation_object *obj) +{ + ww_mutex_unlock(&obj->lock); +} + +/** * reservation_object_get_excl - get the reservation object's * exclusive fence, with update-side lock held * @obj: the reservation object diff --git a/include/linux/reset.h b/include/linux/reset.h index 5daff15722d3..86b4ed75359e 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -13,10 +13,12 @@ int reset_control_deassert(struct reset_control *rstc); int reset_control_status(struct reset_control *rstc); struct reset_control *__of_reset_control_get(struct device_node *node, - const char *id, int index, int shared); + const char *id, int index, bool shared, + bool optional); void reset_control_put(struct reset_control *rstc); struct reset_control *__devm_reset_control_get(struct device *dev, - const char *id, int index, int shared); + const char *id, int index, bool shared, + bool optional); int __must_check device_reset(struct device *dev); @@ -69,14 +71,15 @@ static inline int device_reset_optional(struct device *dev) static inline struct reset_control *__of_reset_control_get( struct device_node *node, - const char *id, int index, int shared) + const char *id, int index, bool shared, + bool optional) { return ERR_PTR(-ENOTSUPP); } static inline struct reset_control *__devm_reset_control_get( - struct device *dev, - const char *id, int index, int shared) + struct device *dev, const char *id, + int index, bool shared, bool optional) { return ERR_PTR(-ENOTSUPP); } @@ -104,7 +107,8 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) #ifndef CONFIG_RESET_CONTROLLER WARN_ON(1); #endif - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, + false); } /** @@ -132,19 +136,22 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) static inline struct reset_control *reset_control_get_shared( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, + false); } static inline struct reset_control *reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, + true); } static inline struct reset_control *reset_control_get_optional_shared( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, + true); } /** @@ -160,7 +167,7 @@ static inline struct reset_control *reset_control_get_optional_shared( static inline struct reset_control *of_reset_control_get_exclusive( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, 0); + return __of_reset_control_get(node, id, 0, false, false); } /** @@ -185,7 +192,7 @@ static inline struct reset_control *of_reset_control_get_exclusive( static inline struct reset_control *of_reset_control_get_shared( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, 1); + return __of_reset_control_get(node, id, 0, true, false); } /** @@ -202,7 +209,7 @@ static inline struct reset_control *of_reset_control_get_shared( static inline struct reset_control *of_reset_control_get_exclusive_by_index( struct device_node *node, int index) { - return __of_reset_control_get(node, NULL, index, 0); + return __of_reset_control_get(node, NULL, index, false, false); } /** @@ -230,7 +237,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index( static inline struct reset_control *of_reset_control_get_shared_by_index( struct device_node *node, int index) { - return __of_reset_control_get(node, NULL, index, 1); + return __of_reset_control_get(node, NULL, index, true, false); } /** @@ -252,7 +259,7 @@ __must_check devm_reset_control_get_exclusive(struct device *dev, #ifndef CONFIG_RESET_CONTROLLER WARN_ON(1); #endif - return __devm_reset_control_get(dev, id, 0, 0); + return __devm_reset_control_get(dev, id, 0, false, false); } /** @@ -267,19 +274,19 @@ __must_check devm_reset_control_get_exclusive(struct device *dev, static inline struct reset_control *devm_reset_control_get_shared( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, 1); + return __devm_reset_control_get(dev, id, 0, true, false); } static inline struct reset_control *devm_reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, 0); + return __devm_reset_control_get(dev, id, 0, false, true); } static inline struct reset_control *devm_reset_control_get_optional_shared( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, 1); + return __devm_reset_control_get(dev, id, 0, true, true); } /** @@ -297,7 +304,7 @@ static inline struct reset_control *devm_reset_control_get_optional_shared( static inline struct reset_control * devm_reset_control_get_exclusive_by_index(struct device *dev, int index) { - return __devm_reset_control_get(dev, NULL, index, 0); + return __devm_reset_control_get(dev, NULL, index, false, false); } /** @@ -313,7 +320,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index) static inline struct reset_control * devm_reset_control_get_shared_by_index(struct device *dev, int index) { - return __devm_reset_control_get(dev, NULL, index, 1); + return __devm_reset_control_get(dev, NULL, index, true, false); } /* diff --git a/include/linux/rfkill-regulator.h b/include/linux/rfkill-regulator.h deleted file mode 100644 index aca36bc83315..000000000000 --- a/include/linux/rfkill-regulator.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * rfkill-regulator.c - Regulator consumer driver for rfkill - * - * Copyright (C) 2009 Guiming Zhuo <gmzhuo@gmail.com> - * Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#ifndef __LINUX_RFKILL_REGULATOR_H -#define __LINUX_RFKILL_REGULATOR_H - -/* - * Use "vrfkill" as supply id when declaring the regulator consumer: - * - * static struct regulator_consumer_supply pcap_regulator_V6_consumers [] = { - * { .dev_name = "rfkill-regulator.0", .supply = "vrfkill" }, - * }; - * - * If you have several regulator driven rfkill, you can append a numerical id to - * .dev_name as done above, and use the same id when declaring the platform - * device: - * - * static struct rfkill_regulator_platform_data ezx_rfkill_bt_data = { - * .name = "ezx-bluetooth", - * .type = RFKILL_TYPE_BLUETOOTH, - * }; - * - * static struct platform_device a910_rfkill = { - * .name = "rfkill-regulator", - * .id = 0, - * .dev = { - * .platform_data = &ezx_rfkill_bt_data, - * }, - * }; - */ - -#include <linux/rfkill.h> - -struct rfkill_regulator_platform_data { - char *name; /* the name for the rfkill switch */ - enum rfkill_type type; /* the type as specified in rfkill.h */ -}; - -#endif /* __LINUX_RFKILL_REGULATOR_H */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 5c132d3188be..092292b6675e 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -25,7 +25,7 @@ #include <linux/list_nulls.h> #include <linux/workqueue.h> #include <linux/mutex.h> -#include <linux/rcupdate.h> +#include <linux/rculist.h> /* * The end of the chain is marked with a special nulls marks which has @@ -61,6 +61,7 @@ struct rhlist_head { /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets + * @nest: Number of bits of first-level nested table. * @rehash: Current bucket being rehashed * @hash_rnd: Random seed to fold into hash * @locks_mask: Mask to apply before accessing locks[] @@ -68,10 +69,12 @@ struct rhlist_head { * @walkers: List of active walkers * @rcu: RCU structure for freeing the table * @future_tbl: Table under construction during rehashing + * @ntbl: Nested table used when out of memory. * @buckets: size * hash buckets */ struct bucket_table { unsigned int size; + unsigned int nest; unsigned int rehash; u32 hash_rnd; unsigned int locks_mask; @@ -81,7 +84,7 @@ struct bucket_table { struct bucket_table __rcu *future_tbl; - struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; + struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; /** @@ -374,6 +377,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void *arg); void rhashtable_destroy(struct rhashtable *ht); +struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, + unsigned int hash); +struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, + struct bucket_table *tbl, + unsigned int hash); + #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) @@ -389,6 +398,27 @@ void rhashtable_destroy(struct rhashtable *ht); #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) +static inline struct rhash_head __rcu *const *rht_bucket( + const struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : + &tbl->buckets[hash]; +} + +static inline struct rhash_head __rcu **rht_bucket_var( + struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : + &tbl->buckets[hash]; +} + +static inline struct rhash_head __rcu **rht_bucket_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : + &tbl->buckets[hash]; +} + /** * rht_for_each_continue - continue iterating over hash chain * @pos: the &struct rhash_head to use as a loop cursor. @@ -408,7 +438,7 @@ void rhashtable_destroy(struct rhashtable *ht); * @hash: the hash value / bucket index */ #define rht_for_each(pos, tbl, hash) \ - rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash) + rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash) /** * rht_for_each_entry_continue - continue iterating over hash chain @@ -433,7 +463,7 @@ void rhashtable_destroy(struct rhashtable *ht); * @member: name of the &struct rhash_head within the hashable struct. */ #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ - rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \ + rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \ tbl, hash, member) /** @@ -448,13 +478,13 @@ void rhashtable_destroy(struct rhashtable *ht); * This hash chain list-traversal primitive allows for the looped code to * remove the loop cursor from the list. */ -#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ - for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ - next = !rht_is_a_nulls(pos) ? \ - rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ - (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ - pos = next, \ - next = !rht_is_a_nulls(pos) ? \ +#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ + for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \ + next = !rht_is_a_nulls(pos) ? \ + rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ + pos = next, \ + next = !rht_is_a_nulls(pos) ? \ rht_dereference_bucket(pos->next, tbl, hash) : NULL) /** @@ -485,7 +515,7 @@ void rhashtable_destroy(struct rhashtable *ht); * traversal is guarded by rcu_read_lock(). */ #define rht_for_each_rcu(pos, tbl, hash) \ - rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash) + rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash) /** * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain @@ -518,8 +548,8 @@ void rhashtable_destroy(struct rhashtable *ht); * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ -#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ - rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ +#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ + rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \ tbl, hash, member) /** @@ -565,7 +595,7 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; - const struct bucket_table *tbl; + struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; @@ -697,8 +727,12 @@ slow_path: } elasticity = ht->elasticity; - pprev = &tbl->buckets[hash]; - rht_for_each(head, tbl, hash) { + pprev = rht_bucket_insert(ht, tbl, hash); + data = ERR_PTR(-ENOMEM); + if (!pprev) + goto out; + + rht_for_each_continue(head, *pprev, tbl, hash) { struct rhlist_head *plist; struct rhlist_head *list; @@ -736,7 +770,7 @@ slow_path: if (unlikely(rht_grow_above_100(ht, tbl))) goto slow_path; - head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + head = rht_dereference_bucket(*pprev, tbl, hash); RCU_INIT_POINTER(obj->next, head); if (rhlist) { @@ -746,7 +780,7 @@ slow_path: RCU_INIT_POINTER(list->next, NULL); } - rcu_assign_pointer(tbl->buckets[hash], obj); + rcu_assign_pointer(*pprev, obj); atomic_inc(&ht->nelems); if (rht_grow_above_75(ht, tbl)) @@ -955,8 +989,8 @@ static inline int __rhashtable_remove_fast_one( spin_lock_bh(lock); - pprev = &tbl->buckets[hash]; - rht_for_each(he, tbl, hash) { + pprev = rht_bucket_var(tbl, hash); + rht_for_each_continue(he, *pprev, tbl, hash) { struct rhlist_head *list; list = container_of(he, struct rhlist_head, rhead); @@ -1107,8 +1141,8 @@ static inline int __rhashtable_replace_fast( spin_lock_bh(lock); - pprev = &tbl->buckets[hash]; - rht_for_each(he, tbl, hash) { + pprev = rht_bucket_var(tbl, hash); + rht_for_each_continue(he, *pprev, tbl, hash) { if (he != obj_old) { pprev = &he->next; continue; diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 15321fb1df6b..8c89e902df3e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -9,6 +9,7 @@ #include <linux/mm.h> #include <linux/rwsem.h> #include <linux/memcontrol.h> +#include <linux/highmem.h> /* * The anon_vma heads a list of private "related" vmas, to scan if @@ -196,41 +197,30 @@ int page_referenced(struct page *, int is_locked, int try_to_unmap(struct page *, enum ttu_flags flags); -/* - * Used by uprobes to replace a userspace page safely - */ -pte_t *__page_check_address(struct page *, struct mm_struct *, - unsigned long, spinlock_t **, int); - -static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, - unsigned long address, - spinlock_t **ptlp, int sync) -{ - pte_t *ptep; +/* Avoid racy checks */ +#define PVMW_SYNC (1 << 0) +/* Look for migarion entries rather than present PTEs */ +#define PVMW_MIGRATION (1 << 1) - __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, - ptlp, sync)); - return ptep; -} +struct page_vma_mapped_walk { + struct page *page; + struct vm_area_struct *vma; + unsigned long address; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + unsigned int flags; +}; -/* - * Used by idle page tracking to check if a page was referenced via page - * tables. - */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, - unsigned long address, pmd_t **pmdp, - pte_t **ptep, spinlock_t **ptlp); -#else -static inline bool page_check_address_transhuge(struct page *page, - struct mm_struct *mm, unsigned long address, - pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp) +static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { - *ptep = page_check_address(page, mm, address, ptlp, 0); - *pmdp = NULL; - return !!*ptep; + if (pvmw->pte) + pte_unmap(pvmw->pte); + if (pvmw->ptl) + spin_unlock(pvmw->ptl); } -#endif + +bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); /* * Used by swapoff to help locate where page is expected in vma. diff --git a/include/linux/rodata_test.h b/include/linux/rodata_test.h new file mode 100644 index 000000000000..ea05f6c51413 --- /dev/null +++ b/include/linux/rodata_test.h @@ -0,0 +1,23 @@ +/* + * rodata_test.h: functional test for mark_rodata_ro function + * + * (C) Copyright 2008 Intel Corporation + * Author: Arjan van de Ven <arjan@linux.intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#ifndef _RODATA_TEST_H +#define _RODATA_TEST_H + +#ifdef CONFIG_DEBUG_RODATA_TEST +extern const int rodata_test_data; +void rodata_test(void); +#else +static inline void rodata_test(void) {} +#endif + +#endif /* _RODATA_TEST_H */ diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 18f9e1ae4b7e..10d6ae8bbb7d 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -41,6 +41,7 @@ #include <linux/mod_devicetable.h> #include <linux/kref.h> #include <linux/mutex.h> +#include <linux/poll.h> #define RPMSG_ADDR_ANY 0xFFFFFFFF @@ -156,6 +157,9 @@ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, void *data, int len); +unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, + poll_table *wait); + #else static inline int register_rpmsg_device(struct rpmsg_device *dev) @@ -254,6 +258,15 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, return -ENXIO; } +static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, + struct file *filp, poll_table *wait) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return 0; +} + #endif /* IS_ENABLED(CONFIG_RPMSG) */ /* use a macro to avoid include chaining to get THIS_MODULE */ diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h index e674b2e3074b..8ec8b6439b25 100644 --- a/include/linux/rpmsg/qcom_smd.h +++ b/include/linux/rpmsg/qcom_smd.h @@ -18,14 +18,12 @@ static inline struct qcom_smd_edge * qcom_smd_register_edge(struct device *parent, struct device_node *node) { - return ERR_PTR(-ENXIO); + return NULL; } static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) { - /* This shouldn't be possible */ - WARN_ON(1); - return -ENXIO; + return 0; } #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index c8e519d0b4a3..d67eee84fd43 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1,197 +1,57 @@ #ifndef _LINUX_SCHED_H #define _LINUX_SCHED_H -#include <uapi/linux/sched.h> - -#include <linux/sched/prio.h> - - -struct sched_param { - int sched_priority; -}; - -#include <asm/param.h> /* for HZ */ +/* + * Define 'struct task_struct' and provide the main scheduler + * APIs (schedule(), wakeup variants, etc.) + */ -#include <linux/capability.h> -#include <linux/threads.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/timex.h> -#include <linux/jiffies.h> -#include <linux/plist.h> -#include <linux/rbtree.h> -#include <linux/thread_info.h> -#include <linux/cpumask.h> -#include <linux/errno.h> -#include <linux/nodemask.h> -#include <linux/mm_types.h> -#include <linux/preempt.h> +#include <uapi/linux/sched.h> -#include <asm/page.h> -#include <asm/ptrace.h> +#include <asm/current.h> -#include <linux/smp.h> +#include <linux/pid.h> #include <linux/sem.h> #include <linux/shm.h> -#include <linux/signal.h> -#include <linux/compiler.h> -#include <linux/completion.h> -#include <linux/pid.h> -#include <linux/percpu.h> -#include <linux/topology.h> +#include <linux/kcov.h> +#include <linux/mutex.h> +#include <linux/plist.h> +#include <linux/hrtimer.h> #include <linux/seccomp.h> +#include <linux/nodemask.h> #include <linux/rcupdate.h> -#include <linux/rculist.h> -#include <linux/rtmutex.h> - -#include <linux/time.h> -#include <linux/param.h> #include <linux/resource.h> -#include <linux/timer.h> -#include <linux/hrtimer.h> -#include <linux/kcov.h> -#include <linux/task_io_accounting.h> #include <linux/latencytop.h> -#include <linux/cred.h> -#include <linux/llist.h> -#include <linux/uidgid.h> -#include <linux/gfp.h> -#include <linux/magic.h> -#include <linux/cgroup-defs.h> - -#include <asm/processor.h> - -#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ - -/* - * Extended scheduling parameters data structure. - * - * This is needed because the original struct sched_param can not be - * altered without introducing ABI issues with legacy applications - * (e.g., in sched_getparam()). - * - * However, the possibility of specifying more than just a priority for - * the tasks may be useful for a wide variety of application fields, e.g., - * multimedia, streaming, automation and control, and many others. - * - * This variant (sched_attr) is meant at describing a so-called - * sporadic time-constrained task. In such model a task is specified by: - * - the activation period or minimum instance inter-arrival time; - * - the maximum (or average, depending on the actual scheduling - * discipline) computation time of all instances, a.k.a. runtime; - * - the deadline (relative to the actual activation time) of each - * instance. - * Very briefly, a periodic (sporadic) task asks for the execution of - * some specific computation --which is typically called an instance-- - * (at most) every period. Moreover, each instance typically lasts no more - * than the runtime and must be completed by time instant t equal to - * the instance activation time + the deadline. - * - * This is reflected by the actual fields of the sched_attr structure: - * - * @size size of the structure, for fwd/bwd compat. - * - * @sched_policy task's scheduling policy - * @sched_flags for customizing the scheduler behaviour - * @sched_nice task's nice value (SCHED_NORMAL/BATCH) - * @sched_priority task's static priority (SCHED_FIFO/RR) - * @sched_deadline representative of the task's deadline - * @sched_runtime representative of the task's runtime - * @sched_period representative of the task's period - * - * Given this task model, there are a multiplicity of scheduling algorithms - * and policies, that can be used to ensure all the tasks will make their - * timing constraints. - * - * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the - * only user of this new interface. More information about the algorithm - * available in the scheduling class file or in Documentation/. - */ -struct sched_attr { - u32 size; - - u32 sched_policy; - u64 sched_flags; - - /* SCHED_NORMAL, SCHED_BATCH */ - s32 sched_nice; - - /* SCHED_FIFO, SCHED_RR */ - u32 sched_priority; - - /* SCHED_DEADLINE */ - u64 sched_runtime; - u64 sched_deadline; - u64 sched_period; -}; +#include <linux/sched/prio.h> +#include <linux/signal_types.h> +#include <linux/mm_types_task.h> +#include <linux/task_io_accounting.h> -struct futex_pi_state; -struct robust_list_head; +/* task_struct member predeclarations (sorted alphabetically): */ +struct audit_context; +struct backing_dev_info; struct bio_list; -struct fs_struct; -struct perf_event_context; struct blk_plug; -struct filename; +struct cfs_rq; +struct fs_struct; +struct futex_pi_state; +struct io_context; +struct mempolicy; struct nameidata; - -#define VMACACHE_BITS 2 -#define VMACACHE_SIZE (1U << VMACACHE_BITS) -#define VMACACHE_MASK (VMACACHE_SIZE - 1) - -/* - * These are the constant used to fake the fixed-point load-average - * counting. Some notes: - * - 11 bit fractions expand to 22 bits by the multiplies: this gives - * a load-average precision of 10 bits integer + 11 bits fractional - * - if you want to count load-averages more often, you need more - * precision, or rounding will get you. With 2-second counting freq, - * the EXP_n values would be 1981, 2034 and 2043 if still using only - * 11 bit fractions. - */ -extern unsigned long avenrun[]; /* Load averages */ -extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); - -#define FSHIFT 11 /* nr of bits of precision */ -#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ -#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ -#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ -#define EXP_5 2014 /* 1/exp(5sec/5min) */ -#define EXP_15 2037 /* 1/exp(5sec/15min) */ - -#define CALC_LOAD(load,exp,n) \ - load *= exp; \ - load += n*(FIXED_1-exp); \ - load >>= FSHIFT; - -extern unsigned long total_forks; -extern int nr_threads; -DECLARE_PER_CPU(unsigned long, process_counts); -extern int nr_processes(void); -extern unsigned long nr_running(void); -extern bool single_task_running(void); -extern unsigned long nr_iowait(void); -extern unsigned long nr_iowait_cpu(int cpu); -extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); - -extern void calc_global_load(unsigned long ticks); - -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void cpu_load_update_nohz_start(void); -extern void cpu_load_update_nohz_stop(void); -#else -static inline void cpu_load_update_nohz_start(void) { } -static inline void cpu_load_update_nohz_stop(void) { } -#endif - -extern void dump_cpu_task(int cpu); - +struct nsproxy; +struct perf_event_context; +struct pid_namespace; +struct pipe_inode_info; +struct rcu_node; +struct reclaim_state; +struct robust_list_head; +struct sched_attr; +struct sched_param; struct seq_file; -struct cfs_rq; +struct sighand_struct; +struct signal_struct; +struct task_delay_info; struct task_group; -#ifdef CONFIG_SCHED_DEBUG -extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); -extern void proc_sched_set_task(struct task_struct *p); -#endif /* * Task state bitmask. NOTE! These bits are also @@ -203,53 +63,53 @@ extern void proc_sched_set_task(struct task_struct *p); * modifying one set can't modify the other one by * mistake. */ -#define TASK_RUNNING 0 -#define TASK_INTERRUPTIBLE 1 -#define TASK_UNINTERRUPTIBLE 2 -#define __TASK_STOPPED 4 -#define __TASK_TRACED 8 -/* in tsk->exit_state */ -#define EXIT_DEAD 16 -#define EXIT_ZOMBIE 32 -#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) -/* in tsk->state again */ -#define TASK_DEAD 64 -#define TASK_WAKEKILL 128 -#define TASK_WAKING 256 -#define TASK_PARKED 512 -#define TASK_NOLOAD 1024 -#define TASK_NEW 2048 -#define TASK_STATE_MAX 4096 - -#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" - -extern char ___assert_task_state[1 - 2*!!( - sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; - -/* Convenience macros for the sake of set_current_state */ -#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) -#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) -#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) - -#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) - -/* Convenience macros for the sake of wake_up */ -#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) -#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) - -/* get_task_state() */ -#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ - TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ - __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) - -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) -#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) -#define task_is_stopped_or_traced(task) \ - ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) -#define task_contributes_to_load(task) \ - ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ - (task->flags & PF_FROZEN) == 0 && \ - (task->state & TASK_NOLOAD) == 0) + +/* Used in tsk->state: */ +#define TASK_RUNNING 0 +#define TASK_INTERRUPTIBLE 1 +#define TASK_UNINTERRUPTIBLE 2 +#define __TASK_STOPPED 4 +#define __TASK_TRACED 8 +/* Used in tsk->exit_state: */ +#define EXIT_DEAD 16 +#define EXIT_ZOMBIE 32 +#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) +/* Used in tsk->state again: */ +#define TASK_DEAD 64 +#define TASK_WAKEKILL 128 +#define TASK_WAKING 256 +#define TASK_PARKED 512 +#define TASK_NOLOAD 1024 +#define TASK_NEW 2048 +#define TASK_STATE_MAX 4096 + +#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" + +/* Convenience macros for the sake of set_current_state: */ +#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) +#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) +#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) + +#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) + +/* Convenience macros for the sake of wake_up(): */ +#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) +#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) + +/* get_task_state(): */ +#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) + +#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) + +#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) + +#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) + +#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0 && \ + (task->state & TASK_NOLOAD) == 0) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP @@ -299,139 +159,24 @@ extern char ___assert_task_state[1 - 2*!!( * * Also see the comments of try_to_wake_up(). */ -#define __set_current_state(state_value) \ - do { current->state = (state_value); } while (0) -#define set_current_state(state_value) \ - smp_store_mb(current->state, (state_value)) - -#endif - -/* Task command name length */ -#define TASK_COMM_LEN 16 - -#include <linux/spinlock.h> - -/* - * This serializes "schedule()" and also protects - * the run-queue from deletions/modifications (but - * _adding_ to the beginning of the run-queue has - * a separate lock). - */ -extern rwlock_t tasklist_lock; -extern spinlock_t mmlist_lock; - -struct task_struct; - -#ifdef CONFIG_PROVE_RCU -extern int lockdep_tasklist_lock_is_held(void); -#endif /* #ifdef CONFIG_PROVE_RCU */ - -extern void sched_init(void); -extern void sched_init_smp(void); -extern asmlinkage void schedule_tail(struct task_struct *prev); -extern void init_idle(struct task_struct *idle, int cpu); -extern void init_idle_bootup_task(struct task_struct *idle); - -extern cpumask_var_t cpu_isolated_map; - -extern int runqueue_is_locked(int cpu); - -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void nohz_balance_enter_idle(int cpu); -extern void set_cpu_sd_state_idle(void); -extern int get_nohz_timer_target(void); -#else -static inline void nohz_balance_enter_idle(int cpu) { } -static inline void set_cpu_sd_state_idle(void) { } +#define __set_current_state(state_value) do { current->state = (state_value); } while (0) +#define set_current_state(state_value) smp_store_mb(current->state, (state_value)) #endif -/* - * Only dump TASK_* tasks. (0 for all tasks) - */ -extern void show_state_filter(unsigned long state_filter); - -static inline void show_state(void) -{ - show_state_filter(0); -} - -extern void show_regs(struct pt_regs *); +/* Task command name length: */ +#define TASK_COMM_LEN 16 -/* - * TASK is a pointer to the task whose backtrace we want to see (or NULL for current - * task), SP is the stack pointer of the first frame that should be shown in the back - * trace (or NULL if the entire call-chain of the task should be shown). - */ -extern void show_stack(struct task_struct *task, unsigned long *sp); +extern cpumask_var_t cpu_isolated_map; -extern void cpu_init (void); -extern void trap_init(void); -extern void update_process_times(int user); extern void scheduler_tick(void); -extern int sched_cpu_starting(unsigned int cpu); -extern int sched_cpu_activate(unsigned int cpu); -extern int sched_cpu_deactivate(unsigned int cpu); -#ifdef CONFIG_HOTPLUG_CPU -extern int sched_cpu_dying(unsigned int cpu); -#else -# define sched_cpu_dying NULL -#endif - -extern void sched_show_task(struct task_struct *p); - -#ifdef CONFIG_LOCKUP_DETECTOR -extern void touch_softlockup_watchdog_sched(void); -extern void touch_softlockup_watchdog(void); -extern void touch_softlockup_watchdog_sync(void); -extern void touch_all_softlockup_watchdogs(void); -extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); -extern unsigned int softlockup_panic; -extern unsigned int hardlockup_panic; -void lockup_detector_init(void); -#else -static inline void touch_softlockup_watchdog_sched(void) -{ -} -static inline void touch_softlockup_watchdog(void) -{ -} -static inline void touch_softlockup_watchdog_sync(void) -{ -} -static inline void touch_all_softlockup_watchdogs(void) -{ -} -static inline void lockup_detector_init(void) -{ -} -#endif - -#ifdef CONFIG_DETECT_HUNG_TASK -void reset_hung_task_detector(void); -#else -static inline void reset_hung_task_detector(void) -{ -} -#endif - -/* Attach to any functions which should be ignored in wchan output. */ -#define __sched __attribute__((__section__(".sched.text"))) +#define MAX_SCHEDULE_TIMEOUT LONG_MAX -/* Linker adds these: start and end of __sched functions */ -extern char __sched_text_start[], __sched_text_end[]; - -/* Is this address in the __sched functions? */ -extern int in_sched_functions(unsigned long addr); - -#define MAX_SCHEDULE_TIMEOUT LONG_MAX -extern signed long schedule_timeout(signed long timeout); -extern signed long schedule_timeout_interruptible(signed long timeout); -extern signed long schedule_timeout_killable(signed long timeout); -extern signed long schedule_timeout_uninterruptible(signed long timeout); -extern signed long schedule_timeout_idle(signed long timeout); +extern long schedule_timeout(long timeout); +extern long schedule_timeout_interruptible(long timeout); +extern long schedule_timeout_killable(long timeout); +extern long schedule_timeout_uninterruptible(long timeout); +extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); @@ -440,112 +185,6 @@ extern void io_schedule_finish(int token); extern long io_schedule_timeout(long timeout); extern void io_schedule(void); -void __noreturn do_task_dead(void); - -struct nsproxy; -struct user_namespace; - -#ifdef CONFIG_MMU -extern void arch_pick_mmap_layout(struct mm_struct *mm); -extern unsigned long -arch_get_unmapped_area(struct file *, unsigned long, unsigned long, - unsigned long, unsigned long); -extern unsigned long -arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags); -#else -static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} -#endif - -#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ -#define SUID_DUMP_USER 1 /* Dump as user of process */ -#define SUID_DUMP_ROOT 2 /* Dump as root */ - -/* mm flags */ - -/* for SUID_DUMP_* above */ -#define MMF_DUMPABLE_BITS 2 -#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) - -extern void set_dumpable(struct mm_struct *mm, int value); -/* - * This returns the actual value of the suid_dumpable flag. For things - * that are using this for checking for privilege transitions, it must - * test against SUID_DUMP_USER rather than treating it as a boolean - * value. - */ -static inline int __get_dumpable(unsigned long mm_flags) -{ - return mm_flags & MMF_DUMPABLE_MASK; -} - -static inline int get_dumpable(struct mm_struct *mm) -{ - return __get_dumpable(mm->flags); -} - -/* coredump filter bits */ -#define MMF_DUMP_ANON_PRIVATE 2 -#define MMF_DUMP_ANON_SHARED 3 -#define MMF_DUMP_MAPPED_PRIVATE 4 -#define MMF_DUMP_MAPPED_SHARED 5 -#define MMF_DUMP_ELF_HEADERS 6 -#define MMF_DUMP_HUGETLB_PRIVATE 7 -#define MMF_DUMP_HUGETLB_SHARED 8 -#define MMF_DUMP_DAX_PRIVATE 9 -#define MMF_DUMP_DAX_SHARED 10 - -#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS -#define MMF_DUMP_FILTER_BITS 9 -#define MMF_DUMP_FILTER_MASK \ - (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) -#define MMF_DUMP_FILTER_DEFAULT \ - ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ - (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) - -#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS -# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) -#else -# define MMF_DUMP_MASK_DEFAULT_ELF 0 -#endif - /* leave room for more dump flags */ -#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ -#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ -/* - * This one-shot flag is dropped due to necessity of changing exe once again - * on NFS restore - */ -//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ - -#define MMF_HAS_UPROBES 19 /* has uprobes */ -#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ -#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ -#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ -#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ - -#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) - -struct sighand_struct { - atomic_t count; - struct k_sigaction action[_NSIG]; - spinlock_t siglock; - wait_queue_head_t signalfd_wqh; -}; - -struct pacct_struct { - int ac_flag; - long ac_exitcode; - unsigned long ac_mem; - u64 ac_utime, ac_stime; - unsigned long ac_minflt, ac_majflt; -}; - -struct cpu_itimer { - u64 expires; - u64 incr; -}; - /** * struct prev_cputime - snaphsot of system and user cputime * @utime: time spent in user mode @@ -557,20 +196,12 @@ struct cpu_itimer { */ struct prev_cputime { #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - u64 utime; - u64 stime; - raw_spinlock_t lock; + u64 utime; + u64 stime; + raw_spinlock_t lock; #endif }; -static inline void prev_cputime_init(struct prev_cputime *prev) -{ -#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - prev->utime = prev->stime = 0; - raw_spin_lock_init(&prev->lock); -#endif -} - /** * struct task_cputime - collected CPU time counts * @utime: time spent in user mode, in nanoseconds @@ -582,380 +213,35 @@ static inline void prev_cputime_init(struct prev_cputime *prev) * these counts together and treat all three of them in parallel. */ struct task_cputime { - u64 utime; - u64 stime; - unsigned long long sum_exec_runtime; -}; - -/* Alternate field names when used to cache expirations. */ -#define virt_exp utime -#define prof_exp stime -#define sched_exp sum_exec_runtime - -/* - * This is the atomic variant of task_cputime, which can be used for - * storing and updating task_cputime statistics without locking. - */ -struct task_cputime_atomic { - atomic64_t utime; - atomic64_t stime; - atomic64_t sum_exec_runtime; -}; - -#define INIT_CPUTIME_ATOMIC \ - (struct task_cputime_atomic) { \ - .utime = ATOMIC64_INIT(0), \ - .stime = ATOMIC64_INIT(0), \ - .sum_exec_runtime = ATOMIC64_INIT(0), \ - } - -#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) - -/* - * Disable preemption until the scheduler is running -- use an unconditional - * value so that it also works on !PREEMPT_COUNT kernels. - * - * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). - */ -#define INIT_PREEMPT_COUNT PREEMPT_OFFSET - -/* - * Initial preempt_count value; reflects the preempt_count schedule invariant - * which states that during context switches: - * - * preempt_count() == 2*PREEMPT_DISABLE_OFFSET - * - * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. - * Note: See finish_task_switch(). - */ -#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) - -/** - * struct thread_group_cputimer - thread group interval timer counts - * @cputime_atomic: atomic thread group interval timers. - * @running: true when there are timers running and - * @cputime_atomic receives updates. - * @checking_timer: true when a thread in the group is in the - * process of checking for thread group timers. - * - * This structure contains the version of task_cputime, above, that is - * used for thread group CPU timer calculations. - */ -struct thread_group_cputimer { - struct task_cputime_atomic cputime_atomic; - bool running; - bool checking_timer; -}; - -#include <linux/rwsem.h> -struct autogroup; - -/* - * NOTE! "signal_struct" does not have its own - * locking, because a shared signal_struct always - * implies a shared sighand_struct, so locking - * sighand_struct is always a proper superset of - * the locking of signal_struct. - */ -struct signal_struct { - atomic_t sigcnt; - atomic_t live; - int nr_threads; - struct list_head thread_head; - - wait_queue_head_t wait_chldexit; /* for wait4() */ - - /* current thread group signal load-balancing target: */ - struct task_struct *curr_target; - - /* shared signal handling: */ - struct sigpending shared_pending; - - /* thread group exit support */ - int group_exit_code; - /* overloaded: - * - notify group_exit_task when ->count is equal to notify_count - * - everyone except group_exit_task is stopped during signal delivery - * of fatal signals, group_exit_task processes the signal. - */ - int notify_count; - struct task_struct *group_exit_task; - - /* thread group stop support, overloads group_exit_code too */ - int group_stop_count; - unsigned int flags; /* see SIGNAL_* flags below */ - - /* - * PR_SET_CHILD_SUBREAPER marks a process, like a service - * manager, to re-parent orphan (double-forking) child processes - * to this process instead of 'init'. The service manager is - * able to receive SIGCHLD signals and is able to investigate - * the process until it calls wait(). All children of this - * process will inherit a flag if they should look for a - * child_subreaper process at exit. - */ - unsigned int is_child_subreaper:1; - unsigned int has_child_subreaper:1; - -#ifdef CONFIG_POSIX_TIMERS - - /* POSIX.1b Interval Timers */ - int posix_timer_id; - struct list_head posix_timers; - - /* ITIMER_REAL timer for the process */ - struct hrtimer real_timer; - ktime_t it_real_incr; - - /* - * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use - * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these - * values are defined to 0 and 1 respectively - */ - struct cpu_itimer it[2]; - - /* - * Thread group totals for process CPU timers. - * See thread_group_cputimer(), et al, for details. - */ - struct thread_group_cputimer cputimer; - - /* Earliest-expiration cache. */ - struct task_cputime cputime_expires; - - struct list_head cpu_timers[3]; - -#endif - - struct pid *leader_pid; - -#ifdef CONFIG_NO_HZ_FULL - atomic_t tick_dep_mask; -#endif - - struct pid *tty_old_pgrp; - - /* boolean value for session group leader */ - int leader; - - struct tty_struct *tty; /* NULL if no tty */ - -#ifdef CONFIG_SCHED_AUTOGROUP - struct autogroup *autogroup; -#endif - /* - * Cumulative resource counters for dead threads in the group, - * and for reaped dead child processes forked by this group. - * Live threads maintain their own counters and add to these - * in __exit_signal, except for the group leader. - */ - seqlock_t stats_lock; - u64 utime, stime, cutime, cstime; - u64 gtime; - u64 cgtime; - struct prev_cputime prev_cputime; - unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; - unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; - unsigned long inblock, oublock, cinblock, coublock; - unsigned long maxrss, cmaxrss; - struct task_io_accounting ioac; - - /* - * Cumulative ns of schedule CPU time fo dead threads in the - * group, not including a zombie group leader, (This only differs - * from jiffies_to_ns(utime + stime) if sched_clock uses something - * other than jiffies.) - */ - unsigned long long sum_sched_runtime; - - /* - * We don't bother to synchronize most readers of this at all, - * because there is no reader checking a limit that actually needs - * to get both rlim_cur and rlim_max atomically, and either one - * alone is a single word that can safely be read normally. - * getrlimit/setrlimit use task_lock(current->group_leader) to - * protect this instead of the siglock, because they really - * have no need to disable irqs. - */ - struct rlimit rlim[RLIM_NLIMITS]; - -#ifdef CONFIG_BSD_PROCESS_ACCT - struct pacct_struct pacct; /* per-process accounting information */ -#endif -#ifdef CONFIG_TASKSTATS - struct taskstats *stats; -#endif -#ifdef CONFIG_AUDIT - unsigned audit_tty; - struct tty_audit_buf *tty_audit_buf; -#endif - - /* - * Thread is the potential origin of an oom condition; kill first on - * oom - */ - bool oom_flag_origin; - short oom_score_adj; /* OOM kill score adjustment */ - short oom_score_adj_min; /* OOM kill score adjustment min value. - * Only settable by CAP_SYS_RESOURCE. */ - struct mm_struct *oom_mm; /* recorded mm when the thread group got - * killed by the oom killer */ - - struct mutex cred_guard_mutex; /* guard against foreign influences on - * credential calculations - * (notably. ptrace) */ + u64 utime; + u64 stime; + unsigned long long sum_exec_runtime; }; -/* - * Bits in flags field of signal_struct. - */ -#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ -#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ -#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ -#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ -/* - * Pending notifications to parent. - */ -#define SIGNAL_CLD_STOPPED 0x00000010 -#define SIGNAL_CLD_CONTINUED 0x00000020 -#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) - -#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ - -#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ - SIGNAL_STOP_CONTINUED) - -static inline void signal_set_stop_flags(struct signal_struct *sig, - unsigned int flags) -{ - WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); - sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; -} - -/* If true, all threads except ->group_exit_task have pending SIGKILL */ -static inline int signal_group_exit(const struct signal_struct *sig) -{ - return (sig->flags & SIGNAL_GROUP_EXIT) || - (sig->group_exit_task != NULL); -} - -/* - * Some day this will be a full-fledged user tracking system.. - */ -struct user_struct { - atomic_t __count; /* reference count */ - atomic_t processes; /* How many processes does this user have? */ - atomic_t sigpending; /* How many pending signals does this user have? */ -#ifdef CONFIG_INOTIFY_USER - atomic_t inotify_watches; /* How many inotify watches does this user have? */ - atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ -#endif -#ifdef CONFIG_FANOTIFY - atomic_t fanotify_listeners; -#endif -#ifdef CONFIG_EPOLL - atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ -#endif -#ifdef CONFIG_POSIX_MQUEUE - /* protected by mq_lock */ - unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ -#endif - unsigned long locked_shm; /* How many pages of mlocked shm ? */ - unsigned long unix_inflight; /* How many files in flight in unix sockets */ - atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ +/* Alternate field names when used on cache expirations: */ +#define virt_exp utime +#define prof_exp stime +#define sched_exp sum_exec_runtime -#ifdef CONFIG_KEYS - struct key *uid_keyring; /* UID specific keyring */ - struct key *session_keyring; /* UID's default session keyring */ -#endif - - /* Hash table maintenance information */ - struct hlist_node uidhash_node; - kuid_t uid; - -#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) - atomic_long_t locked_vm; -#endif -}; - -extern int uids_sysfs_init(void); - -extern struct user_struct *find_user(kuid_t); - -extern struct user_struct root_user; -#define INIT_USER (&root_user) - - -struct backing_dev_info; -struct reclaim_state; - -#ifdef CONFIG_SCHED_INFO struct sched_info { - /* cumulative counters */ - unsigned long pcount; /* # of times run on this cpu */ - unsigned long long run_delay; /* time spent waiting on a runqueue */ - - /* timestamps */ - unsigned long long last_arrival,/* when we last ran on a cpu */ - last_queued; /* when we were last queued to run */ -}; -#endif /* CONFIG_SCHED_INFO */ +#ifdef CONFIG_SCHED_INFO + /* Cumulative counters: */ -#ifdef CONFIG_TASK_DELAY_ACCT -struct task_delay_info { - spinlock_t lock; - unsigned int flags; /* Private per-task flags */ + /* # of times we have run on this CPU: */ + unsigned long pcount; - /* For each stat XXX, add following, aligned appropriately - * - * struct timespec XXX_start, XXX_end; - * u64 XXX_delay; - * u32 XXX_count; - * - * Atomicity of updates to XXX_delay, XXX_count protected by - * single lock above (split into XXX_lock if contention is an issue). - */ + /* Time spent waiting on a runqueue: */ + unsigned long long run_delay; - /* - * XXX_count is incremented on every XXX operation, the delay - * associated with the operation is added to XXX_delay. - * XXX_delay contains the accumulated delay time in nanoseconds. - */ - u64 blkio_start; /* Shared by blkio, swapin */ - u64 blkio_delay; /* wait for sync block io completion */ - u64 swapin_delay; /* wait for swapin block io completion */ - u32 blkio_count; /* total count of the number of sync block */ - /* io operations performed */ - u32 swapin_count; /* total count of the number of swapin block */ - /* io operations performed */ - - u64 freepages_start; - u64 freepages_delay; /* wait for memory reclaim */ - u32 freepages_count; /* total count of memory reclaim */ -}; -#endif /* CONFIG_TASK_DELAY_ACCT */ + /* Timestamps: */ -static inline int sched_info_on(void) -{ -#ifdef CONFIG_SCHEDSTATS - return 1; -#elif defined(CONFIG_TASK_DELAY_ACCT) - extern int delayacct_on; - return delayacct_on; -#else - return 0; -#endif -} + /* When did we last run on a CPU? */ + unsigned long long last_arrival; -#ifdef CONFIG_SCHEDSTATS -void force_schedstat_enabled(void); -#endif + /* When were we last queued to run? */ + unsigned long long last_queued; -enum cpu_idle_type { - CPU_IDLE, - CPU_NOT_IDLE, - CPU_NEWLY_IDLE, - CPU_MAX_IDLE_TYPES +#endif /* CONFIG_SCHED_INFO */ }; /* @@ -965,290 +251,12 @@ enum cpu_idle_type { * We define a basic fixed point arithmetic range, and then formalize * all these metrics based on that basic range. */ -# define SCHED_FIXEDPOINT_SHIFT 10 -# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) - -/* - * Increase resolution of cpu_capacity calculations - */ -#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT -#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) - -/* - * Wake-queues are lists of tasks with a pending wakeup, whose - * callers have already marked the task as woken internally, - * and can thus carry on. A common use case is being able to - * do the wakeups once the corresponding user lock as been - * released. - * - * We hold reference to each task in the list across the wakeup, - * thus guaranteeing that the memory is still valid by the time - * the actual wakeups are performed in wake_up_q(). - * - * One per task suffices, because there's never a need for a task to be - * in two wake queues simultaneously; it is forbidden to abandon a task - * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is - * already in a wake queue, the wakeup will happen soon and the second - * waker can just skip it. - * - * The DEFINE_WAKE_Q macro declares and initializes the list head. - * wake_up_q() does NOT reinitialize the list; it's expected to be - * called near the end of a function. Otherwise, the list can be - * re-initialized for later re-use by wake_q_init(). - * - * Note that this can cause spurious wakeups. schedule() callers - * must ensure the call is done inside a loop, confirming that the - * wakeup condition has in fact occurred. - */ -struct wake_q_node { - struct wake_q_node *next; -}; - -struct wake_q_head { - struct wake_q_node *first; - struct wake_q_node **lastp; -}; - -#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) - -#define DEFINE_WAKE_Q(name) \ - struct wake_q_head name = { WAKE_Q_TAIL, &name.first } - -static inline void wake_q_init(struct wake_q_head *head) -{ - head->first = WAKE_Q_TAIL; - head->lastp = &head->first; -} - -extern void wake_q_add(struct wake_q_head *head, - struct task_struct *task); -extern void wake_up_q(struct wake_q_head *head); - -/* - * sched-domains (multiprocessor balancing) declarations: - */ -#ifdef CONFIG_SMP -#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ -#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ -#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ -#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ -#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ -#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ -#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ -#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ -#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ -#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ -#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ -#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ -#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ -#define SD_NUMA 0x4000 /* cross-node balancing */ - -#ifdef CONFIG_SCHED_SMT -static inline int cpu_smt_flags(void) -{ - return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; -} -#endif - -#ifdef CONFIG_SCHED_MC -static inline int cpu_core_flags(void) -{ - return SD_SHARE_PKG_RESOURCES; -} -#endif - -#ifdef CONFIG_NUMA -static inline int cpu_numa_flags(void) -{ - return SD_NUMA; -} -#endif - -extern int arch_asym_cpu_priority(int cpu); - -struct sched_domain_attr { - int relax_domain_level; -}; - -#define SD_ATTR_INIT (struct sched_domain_attr) { \ - .relax_domain_level = -1, \ -} - -extern int sched_domain_level_max; - -struct sched_group; - -struct sched_domain_shared { - atomic_t ref; - atomic_t nr_busy_cpus; - int has_idle_cores; -}; - -struct sched_domain { - /* These fields must be setup */ - struct sched_domain *parent; /* top domain must be null terminated */ - struct sched_domain *child; /* bottom domain must be null terminated */ - struct sched_group *groups; /* the balancing groups of the domain */ - unsigned long min_interval; /* Minimum balance interval ms */ - unsigned long max_interval; /* Maximum balance interval ms */ - unsigned int busy_factor; /* less balancing by factor if busy */ - unsigned int imbalance_pct; /* No balance until over watermark */ - unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ - unsigned int busy_idx; - unsigned int idle_idx; - unsigned int newidle_idx; - unsigned int wake_idx; - unsigned int forkexec_idx; - unsigned int smt_gain; - - int nohz_idle; /* NOHZ IDLE status */ - int flags; /* See SD_* */ - int level; - - /* Runtime fields. */ - unsigned long last_balance; /* init to jiffies. units in jiffies */ - unsigned int balance_interval; /* initialise to 1. units in ms. */ - unsigned int nr_balance_failed; /* initialise to 0 */ - - /* idle_balance() stats */ - u64 max_newidle_lb_cost; - unsigned long next_decay_max_lb_cost; - - u64 avg_scan_cost; /* select_idle_sibling */ - -#ifdef CONFIG_SCHEDSTATS - /* load_balance() stats */ - unsigned int lb_count[CPU_MAX_IDLE_TYPES]; - unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; - unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; - unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; - unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; - unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; - unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; - unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; - - /* Active load balancing */ - unsigned int alb_count; - unsigned int alb_failed; - unsigned int alb_pushed; - - /* SD_BALANCE_EXEC stats */ - unsigned int sbe_count; - unsigned int sbe_balanced; - unsigned int sbe_pushed; - - /* SD_BALANCE_FORK stats */ - unsigned int sbf_count; - unsigned int sbf_balanced; - unsigned int sbf_pushed; - - /* try_to_wake_up() stats */ - unsigned int ttwu_wake_remote; - unsigned int ttwu_move_affine; - unsigned int ttwu_move_balance; -#endif -#ifdef CONFIG_SCHED_DEBUG - char *name; -#endif - union { - void *private; /* used during construction */ - struct rcu_head rcu; /* used during destruction */ - }; - struct sched_domain_shared *shared; - - unsigned int span_weight; - /* - * Span of all CPUs in this domain. - * - * NOTE: this field is variable length. (Allocated dynamically - * by attaching extra space to the end of the structure, - * depending on how many CPUs the kernel has booted up with) - */ - unsigned long span[0]; -}; - -static inline struct cpumask *sched_domain_span(struct sched_domain *sd) -{ - return to_cpumask(sd->span); -} - -extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new); - -/* Allocate an array of sched domains, for partition_sched_domains(). */ -cpumask_var_t *alloc_sched_domains(unsigned int ndoms); -void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); - -bool cpus_share_cache(int this_cpu, int that_cpu); - -typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); -typedef int (*sched_domain_flags_f)(void); - -#define SDTL_OVERLAP 0x01 - -struct sd_data { - struct sched_domain **__percpu sd; - struct sched_domain_shared **__percpu sds; - struct sched_group **__percpu sg; - struct sched_group_capacity **__percpu sgc; -}; - -struct sched_domain_topology_level { - sched_domain_mask_f mask; - sched_domain_flags_f sd_flags; - int flags; - int numa_level; - struct sd_data data; -#ifdef CONFIG_SCHED_DEBUG - char *name; -#endif -}; - -extern void set_sched_topology(struct sched_domain_topology_level *tl); -extern void wake_up_if_idle(int cpu); - -#ifdef CONFIG_SCHED_DEBUG -# define SD_INIT_NAME(type) .name = #type -#else -# define SD_INIT_NAME(type) -#endif - -#else /* CONFIG_SMP */ - -struct sched_domain_attr; - -static inline void -partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new) -{ -} - -static inline bool cpus_share_cache(int this_cpu, int that_cpu) -{ - return true; -} - -#endif /* !CONFIG_SMP */ - - -struct io_context; /* See blkdev.h */ - - -#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK -extern void prefetch_stack(struct task_struct *t); -#else -static inline void prefetch_stack(struct task_struct *t) { } -#endif - -struct audit_context; /* See audit.c */ -struct mempolicy; -struct pipe_inode_info; -struct uts_namespace; +# define SCHED_FIXEDPOINT_SHIFT 10 +# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) struct load_weight { - unsigned long weight; - u32 inv_weight; + unsigned long weight; + u32 inv_weight; }; /* @@ -1304,71 +312,73 @@ struct load_weight { * issues. */ struct sched_avg { - u64 last_update_time, load_sum; - u32 util_sum, period_contrib; - unsigned long load_avg, util_avg; + u64 last_update_time; + u64 load_sum; + u32 util_sum; + u32 period_contrib; + unsigned long load_avg; + unsigned long util_avg; }; -#ifdef CONFIG_SCHEDSTATS struct sched_statistics { - u64 wait_start; - u64 wait_max; - u64 wait_count; - u64 wait_sum; - u64 iowait_count; - u64 iowait_sum; - - u64 sleep_start; - u64 sleep_max; - s64 sum_sleep_runtime; - - u64 block_start; - u64 block_max; - u64 exec_max; - u64 slice_max; - - u64 nr_migrations_cold; - u64 nr_failed_migrations_affine; - u64 nr_failed_migrations_running; - u64 nr_failed_migrations_hot; - u64 nr_forced_migrations; - - u64 nr_wakeups; - u64 nr_wakeups_sync; - u64 nr_wakeups_migrate; - u64 nr_wakeups_local; - u64 nr_wakeups_remote; - u64 nr_wakeups_affine; - u64 nr_wakeups_affine_attempts; - u64 nr_wakeups_passive; - u64 nr_wakeups_idle; -}; +#ifdef CONFIG_SCHEDSTATS + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + + u64 block_start; + u64 block_max; + u64 exec_max; + u64 slice_max; + + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; #endif +}; struct sched_entity { - struct load_weight load; /* for load-balancing */ - struct rb_node run_node; - struct list_head group_node; - unsigned int on_rq; + /* For load-balancing: */ + struct load_weight load; + struct rb_node run_node; + struct list_head group_node; + unsigned int on_rq; - u64 exec_start; - u64 sum_exec_runtime; - u64 vruntime; - u64 prev_sum_exec_runtime; + u64 exec_start; + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; - u64 nr_migrations; + u64 nr_migrations; -#ifdef CONFIG_SCHEDSTATS - struct sched_statistics statistics; -#endif + struct sched_statistics statistics; #ifdef CONFIG_FAIR_GROUP_SCHED - int depth; - struct sched_entity *parent; + int depth; + struct sched_entity *parent; /* rq on which this entity is (to be) queued: */ - struct cfs_rq *cfs_rq; + struct cfs_rq *cfs_rq; /* rq "owned" by this entity/group: */ - struct cfs_rq *my_q; + struct cfs_rq *my_q; #endif #ifdef CONFIG_SMP @@ -1378,49 +388,49 @@ struct sched_entity { * Put into separate cache line so it does not * collide with read-mostly values above. */ - struct sched_avg avg ____cacheline_aligned_in_smp; + struct sched_avg avg ____cacheline_aligned_in_smp; #endif }; struct sched_rt_entity { - struct list_head run_list; - unsigned long timeout; - unsigned long watchdog_stamp; - unsigned int time_slice; - unsigned short on_rq; - unsigned short on_list; - - struct sched_rt_entity *back; + struct list_head run_list; + unsigned long timeout; + unsigned long watchdog_stamp; + unsigned int time_slice; + unsigned short on_rq; + unsigned short on_list; + + struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED - struct sched_rt_entity *parent; + struct sched_rt_entity *parent; /* rq on which this entity is (to be) queued: */ - struct rt_rq *rt_rq; + struct rt_rq *rt_rq; /* rq "owned" by this entity/group: */ - struct rt_rq *my_q; + struct rt_rq *my_q; #endif }; struct sched_dl_entity { - struct rb_node rb_node; + struct rb_node rb_node; /* * Original scheduling parameters. Copied here from sched_attr * during sched_setattr(), they will remain the same until * the next sched_setattr(). */ - u64 dl_runtime; /* maximum runtime for each instance */ - u64 dl_deadline; /* relative deadline of each instance */ - u64 dl_period; /* separation of two instances (period) */ - u64 dl_bw; /* dl_runtime / dl_deadline */ + u64 dl_runtime; /* Maximum runtime for each instance */ + u64 dl_deadline; /* Relative deadline of each instance */ + u64 dl_period; /* Separation of two instances (period) */ + u64 dl_bw; /* dl_runtime / dl_deadline */ /* * Actual scheduling parameters. Initialized with the values above, * they are continously updated during task execution. Note that * the remaining runtime could be < 0 in case we are in overrun. */ - s64 runtime; /* remaining runtime for this instance */ - u64 deadline; /* absolute deadline for this instance */ - unsigned int flags; /* specifying the scheduler behaviour */ + s64 runtime; /* Remaining runtime for this instance */ + u64 deadline; /* Absolute deadline for this instance */ + unsigned int flags; /* Specifying the scheduler behaviour */ /* * Some bool flags: @@ -1433,28 +443,31 @@ struct sched_dl_entity { * outside bandwidth enforcement mechanism (but only until we * exit the critical section); * - * @dl_yielded tells if task gave up the cpu before consuming + * @dl_yielded tells if task gave up the CPU before consuming * all its available runtime during the last job. */ - int dl_throttled, dl_boosted, dl_yielded; + int dl_throttled; + int dl_boosted; + int dl_yielded; /* * Bandwidth enforcement timer. Each -deadline task has its * own bandwidth to be enforced, thus we need one timer per task. */ - struct hrtimer dl_timer; + struct hrtimer dl_timer; }; union rcu_special { struct { - u8 blocked; - u8 need_qs; - u8 exp_need_qs; - u8 pad; /* Otherwise the compiler can store garbage here. */ + u8 blocked; + u8 need_qs; + u8 exp_need_qs; + + /* Otherwise the compiler can store garbage here: */ + u8 pad; } b; /* Bits. */ u32 s; /* Set of bits. */ }; -struct rcu_node; enum perf_event_task_context { perf_invalid_context = -1, @@ -1463,23 +476,8 @@ enum perf_event_task_context { perf_nr_task_contexts, }; -/* Track pages that require TLB flushes */ -struct tlbflush_unmap_batch { - /* - * Each bit set is a CPU that potentially has a TLB entry for one of - * the PFNs being flushed. See set_tlb_ubc_flush_pending(). - */ - struct cpumask cpumask; - - /* True if any bit in cpumask is set */ - bool flush_required; - - /* - * If true then the PTE was dirty when unmapped. The entry must be - * flushed before IO is initiated or a stale TLB entry potentially - * allows an update without redirtying the page. - */ - bool writable; +struct wake_q_node { + struct wake_q_node *next; }; struct task_struct { @@ -1488,362 +486,417 @@ struct task_struct { * For reasons of header soup (see current_thread_info()), this * must be the first element of task_struct. */ - struct thread_info thread_info; + struct thread_info thread_info; #endif - volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ - void *stack; - atomic_t usage; - unsigned int flags; /* per process flags, defined below */ - unsigned int ptrace; + /* -1 unrunnable, 0 runnable, >0 stopped: */ + volatile long state; + void *stack; + atomic_t usage; + /* Per task flags (PF_*), defined further below: */ + unsigned int flags; + unsigned int ptrace; #ifdef CONFIG_SMP - struct llist_node wake_entry; - int on_cpu; + struct llist_node wake_entry; + int on_cpu; #ifdef CONFIG_THREAD_INFO_IN_TASK - unsigned int cpu; /* current CPU */ + /* Current CPU: */ + unsigned int cpu; #endif - unsigned int wakee_flips; - unsigned long wakee_flip_decay_ts; - struct task_struct *last_wakee; + unsigned int wakee_flips; + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; - int wake_cpu; + int wake_cpu; #endif - int on_rq; + int on_rq; - int prio, static_prio, normal_prio; - unsigned int rt_priority; - const struct sched_class *sched_class; - struct sched_entity se; - struct sched_rt_entity rt; + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; #ifdef CONFIG_CGROUP_SCHED - struct task_group *sched_task_group; + struct task_group *sched_task_group; #endif - struct sched_dl_entity dl; + struct sched_dl_entity dl; #ifdef CONFIG_PREEMPT_NOTIFIERS - /* list of struct preempt_notifier: */ - struct hlist_head preempt_notifiers; + /* List of struct preempt_notifier: */ + struct hlist_head preempt_notifiers; #endif #ifdef CONFIG_BLK_DEV_IO_TRACE - unsigned int btrace_seq; + unsigned int btrace_seq; #endif - unsigned int policy; - int nr_cpus_allowed; - cpumask_t cpus_allowed; + unsigned int policy; + int nr_cpus_allowed; + cpumask_t cpus_allowed; #ifdef CONFIG_PREEMPT_RCU - int rcu_read_lock_nesting; - union rcu_special rcu_read_unlock_special; - struct list_head rcu_node_entry; - struct rcu_node *rcu_blocked_node; + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; #endif /* #ifdef CONFIG_PREEMPT_RCU */ + #ifdef CONFIG_TASKS_RCU - unsigned long rcu_tasks_nvcsw; - bool rcu_tasks_holdout; - struct list_head rcu_tasks_holdout_list; - int rcu_tasks_idle_cpu; + unsigned long rcu_tasks_nvcsw; + bool rcu_tasks_holdout; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_idle_cpu; #endif /* #ifdef CONFIG_TASKS_RCU */ -#ifdef CONFIG_SCHED_INFO - struct sched_info sched_info; -#endif + struct sched_info sched_info; - struct list_head tasks; + struct list_head tasks; #ifdef CONFIG_SMP - struct plist_node pushable_tasks; - struct rb_node pushable_dl_tasks; -#endif - - struct mm_struct *mm, *active_mm; - /* per-thread vma caching */ - u32 vmacache_seqnum; - struct vm_area_struct *vmacache[VMACACHE_SIZE]; -#if defined(SPLIT_RSS_COUNTING) - struct task_rss_stat rss_stat; -#endif -/* task state */ - int exit_state; - int exit_code, exit_signal; - int pdeath_signal; /* The signal sent when the parent dies */ - unsigned long jobctl; /* JOBCTL_*, siglock protected */ - - /* Used for emulating ABI behavior of previous Linux versions */ - unsigned int personality; - - /* scheduler bits, serialized by scheduler locks */ - unsigned sched_reset_on_fork:1; - unsigned sched_contributes_to_load:1; - unsigned sched_migrated:1; - unsigned sched_remote_wakeup:1; - unsigned :0; /* force alignment to the next boundary */ - - /* unserialized, strictly 'current' */ - unsigned in_execve:1; /* bit to tell LSMs we're in execve */ - unsigned in_iowait:1; -#if !defined(TIF_RESTORE_SIGMASK) - unsigned restore_sigmask:1; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; +#endif + + struct mm_struct *mm; + struct mm_struct *active_mm; + + /* Per-thread vma caching: */ + struct vmacache vmacache; + +#ifdef SPLIT_RSS_COUNTING + struct task_rss_stat rss_stat; +#endif + int exit_state; + int exit_code; + int exit_signal; + /* The signal sent when the parent dies: */ + int pdeath_signal; + /* JOBCTL_*, siglock protected: */ + unsigned long jobctl; + + /* Used for emulating ABI behavior of previous Linux versions: */ + unsigned int personality; + + /* Scheduler bits, serialized by scheduler locks: */ + unsigned sched_reset_on_fork:1; + unsigned sched_contributes_to_load:1; + unsigned sched_migrated:1; + unsigned sched_remote_wakeup:1; + /* Force alignment to the next boundary: */ + unsigned :0; + + /* Unserialized, strictly 'current' */ + + /* Bit to tell LSMs we're in execve(): */ + unsigned in_execve:1; + unsigned in_iowait:1; +#ifndef TIF_RESTORE_SIGMASK + unsigned restore_sigmask:1; #endif #ifdef CONFIG_MEMCG - unsigned memcg_may_oom:1; + unsigned memcg_may_oom:1; #ifndef CONFIG_SLOB - unsigned memcg_kmem_skip_account:1; + unsigned memcg_kmem_skip_account:1; #endif #endif #ifdef CONFIG_COMPAT_BRK - unsigned brk_randomized:1; + unsigned brk_randomized:1; #endif - unsigned long atomic_flags; /* Flags needing atomic access. */ + unsigned long atomic_flags; /* Flags requiring atomic access. */ - struct restart_block restart_block; + struct restart_block restart_block; - pid_t pid; - pid_t tgid; + pid_t pid; + pid_t tgid; #ifdef CONFIG_CC_STACKPROTECTOR - /* Canary value for the -fstack-protector gcc feature */ - unsigned long stack_canary; + /* Canary value for the -fstack-protector GCC feature: */ + unsigned long stack_canary; #endif /* - * pointers to (original) parent process, youngest child, younger sibling, + * Pointers to the (original) parent process, youngest child, younger sibling, * older sibling, respectively. (p->father can be replaced with * p->real_parent->pid) */ - struct task_struct __rcu *real_parent; /* real parent process */ - struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ + + /* Real parent process: */ + struct task_struct __rcu *real_parent; + + /* Recipient of SIGCHLD, wait4() reports: */ + struct task_struct __rcu *parent; + /* - * children/sibling forms the list of my natural children + * Children/sibling form the list of natural children: */ - struct list_head children; /* list of my children */ - struct list_head sibling; /* linkage in my parent's children list */ - struct task_struct *group_leader; /* threadgroup leader */ + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; /* - * ptraced is the list of tasks this task is using ptrace on. + * 'ptraced' is the list of tasks this task is using ptrace() on. + * * This includes both natural children and PTRACE_ATTACH targets. - * p->ptrace_entry is p's link on the p->parent->ptraced list. + * 'ptrace_entry' is this task's link on the p->parent->ptraced list. */ - struct list_head ptraced; - struct list_head ptrace_entry; + struct list_head ptraced; + struct list_head ptrace_entry; /* PID/PID hash table linkage. */ - struct pid_link pids[PIDTYPE_MAX]; - struct list_head thread_group; - struct list_head thread_node; + struct pid_link pids[PIDTYPE_MAX]; + struct list_head thread_group; + struct list_head thread_node; + + struct completion *vfork_done; - struct completion *vfork_done; /* for vfork() */ - int __user *set_child_tid; /* CLONE_CHILD_SETTID */ - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ + /* CLONE_CHILD_SETTID: */ + int __user *set_child_tid; - u64 utime, stime; + /* CLONE_CHILD_CLEARTID: */ + int __user *clear_child_tid; + + u64 utime; + u64 stime; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME - u64 utimescaled, stimescaled; + u64 utimescaled; + u64 stimescaled; #endif - u64 gtime; - struct prev_cputime prev_cputime; + u64 gtime; + struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqcount_t vtime_seqcount; - unsigned long long vtime_snap; + seqcount_t vtime_seqcount; + unsigned long long vtime_snap; enum { - /* Task is sleeping or running in a CPU with VTIME inactive */ + /* Task is sleeping or running in a CPU with VTIME inactive: */ VTIME_INACTIVE = 0, - /* Task runs in userspace in a CPU with VTIME active */ + /* Task runs in userspace in a CPU with VTIME active: */ VTIME_USER, - /* Task runs in kernelspace in a CPU with VTIME active */ + /* Task runs in kernelspace in a CPU with VTIME active: */ VTIME_SYS, } vtime_snap_whence; #endif #ifdef CONFIG_NO_HZ_FULL - atomic_t tick_dep_mask; + atomic_t tick_dep_mask; #endif - unsigned long nvcsw, nivcsw; /* context switch counts */ - u64 start_time; /* monotonic time in nsec */ - u64 real_start_time; /* boot based time in nsec */ -/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ - unsigned long min_flt, maj_flt; + /* Context switch counts: */ + unsigned long nvcsw; + unsigned long nivcsw; + + /* Monotonic time in nsecs: */ + u64 start_time; + + /* Boot based time in nsecs: */ + u64 real_start_time; + + /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ + unsigned long min_flt; + unsigned long maj_flt; #ifdef CONFIG_POSIX_TIMERS - struct task_cputime cputime_expires; - struct list_head cpu_timers[3]; -#endif - -/* process credentials */ - const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ - const struct cred __rcu *real_cred; /* objective and real subjective task - * credentials (COW) */ - const struct cred __rcu *cred; /* effective (overridable) subjective task - * credentials (COW) */ - char comm[TASK_COMM_LEN]; /* executable name excluding path - - access with [gs]et_task_comm (which lock - it with task_lock()) - - initialized normally by setup_new_exec */ -/* file system info */ - struct nameidata *nameidata; + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; +#endif + + /* Process credentials: */ + + /* Tracer's credentials at attach: */ + const struct cred __rcu *ptracer_cred; + + /* Objective and real subjective task credentials (COW): */ + const struct cred __rcu *real_cred; + + /* Effective (overridable) subjective task credentials (COW): */ + const struct cred __rcu *cred; + + /* + * executable name, excluding path. + * + * - normally initialized setup_new_exec() + * - access it with [gs]et_task_comm() + * - lock it with task_lock() + */ + char comm[TASK_COMM_LEN]; + + struct nameidata *nameidata; + #ifdef CONFIG_SYSVIPC -/* ipc stuff */ - struct sysv_sem sysvsem; - struct sysv_shm sysvshm; + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; #endif #ifdef CONFIG_DETECT_HUNG_TASK -/* hung task detection */ - unsigned long last_switch_count; -#endif -/* filesystem information */ - struct fs_struct *fs; -/* open file information */ - struct files_struct *files; -/* namespaces */ - struct nsproxy *nsproxy; -/* signal handlers */ - struct signal_struct *signal; - struct sighand_struct *sighand; - - sigset_t blocked, real_blocked; - sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ - struct sigpending pending; - - unsigned long sas_ss_sp; - size_t sas_ss_size; - unsigned sas_ss_flags; - - struct callback_head *task_works; - - struct audit_context *audit_context; + unsigned long last_switch_count; +#endif + /* Filesystem information: */ + struct fs_struct *fs; + + /* Open file information: */ + struct files_struct *files; + + /* Namespaces: */ + struct nsproxy *nsproxy; + + /* Signal handlers: */ + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + /* Restored if set_restore_sigmask() was used: */ + sigset_t saved_sigmask; + struct sigpending pending; + unsigned long sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + + struct callback_head *task_works; + + struct audit_context *audit_context; #ifdef CONFIG_AUDITSYSCALL - kuid_t loginuid; - unsigned int sessionid; + kuid_t loginuid; + unsigned int sessionid; #endif - struct seccomp seccomp; + struct seccomp seccomp; + + /* Thread group tracking: */ + u32 parent_exec_id; + u32 self_exec_id; -/* Thread group tracking */ - u32 parent_exec_id; - u32 self_exec_id; -/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, - * mempolicy */ - spinlock_t alloc_lock; + /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ + spinlock_t alloc_lock; /* Protection of the PI data structures: */ - raw_spinlock_t pi_lock; + raw_spinlock_t pi_lock; - struct wake_q_node wake_q; + struct wake_q_node wake_q; #ifdef CONFIG_RT_MUTEXES - /* PI waiters blocked on a rt_mutex held by this task */ - struct rb_root pi_waiters; - struct rb_node *pi_waiters_leftmost; - /* Deadlock detection and priority inheritance handling */ - struct rt_mutex_waiter *pi_blocked_on; + /* PI waiters blocked on a rt_mutex held by this task: */ + struct rb_root pi_waiters; + struct rb_node *pi_waiters_leftmost; + /* Deadlock detection and priority inheritance handling: */ + struct rt_mutex_waiter *pi_blocked_on; #endif #ifdef CONFIG_DEBUG_MUTEXES - /* mutex deadlock detection */ - struct mutex_waiter *blocked_on; + /* Mutex deadlock detection: */ + struct mutex_waiter *blocked_on; #endif + #ifdef CONFIG_TRACE_IRQFLAGS - unsigned int irq_events; - unsigned long hardirq_enable_ip; - unsigned long hardirq_disable_ip; - unsigned int hardirq_enable_event; - unsigned int hardirq_disable_event; - int hardirqs_enabled; - int hardirq_context; - unsigned long softirq_disable_ip; - unsigned long softirq_enable_ip; - unsigned int softirq_disable_event; - unsigned int softirq_enable_event; - int softirqs_enabled; - int softirq_context; + unsigned int irq_events; + unsigned long hardirq_enable_ip; + unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; + unsigned int hardirq_disable_event; + int hardirqs_enabled; + int hardirq_context; + unsigned long softirq_disable_ip; + unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; + unsigned int softirq_enable_event; + int softirqs_enabled; + int softirq_context; #endif + #ifdef CONFIG_LOCKDEP -# define MAX_LOCK_DEPTH 48UL - u64 curr_chain_key; - int lockdep_depth; - unsigned int lockdep_recursion; - struct held_lock held_locks[MAX_LOCK_DEPTH]; - gfp_t lockdep_reclaim_gfp; +# define MAX_LOCK_DEPTH 48UL + u64 curr_chain_key; + int lockdep_depth; + unsigned int lockdep_recursion; + struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; #endif + #ifdef CONFIG_UBSAN - unsigned int in_ubsan; + unsigned int in_ubsan; #endif -/* journalling filesystem info */ - void *journal_info; + /* Journalling filesystem info: */ + void *journal_info; -/* stacked block device info */ - struct bio_list *bio_list; + /* Stacked block device info: */ + struct bio_list *bio_list; #ifdef CONFIG_BLOCK -/* stack plugging */ - struct blk_plug *plug; + /* Stack plugging: */ + struct blk_plug *plug; #endif -/* VM state */ - struct reclaim_state *reclaim_state; + /* VM state: */ + struct reclaim_state *reclaim_state; + + struct backing_dev_info *backing_dev_info; - struct backing_dev_info *backing_dev_info; + struct io_context *io_context; - struct io_context *io_context; + /* Ptrace state: */ + unsigned long ptrace_message; + siginfo_t *last_siginfo; - unsigned long ptrace_message; - siginfo_t *last_siginfo; /* For ptrace use. */ - struct task_io_accounting ioac; -#if defined(CONFIG_TASK_XACCT) - u64 acct_rss_mem1; /* accumulated rss usage */ - u64 acct_vm_mem1; /* accumulated virtual memory usage */ - u64 acct_timexpd; /* stime + utime since last update */ + struct task_io_accounting ioac; +#ifdef CONFIG_TASK_XACCT + /* Accumulated RSS usage: */ + u64 acct_rss_mem1; + /* Accumulated virtual memory usage: */ + u64 acct_vm_mem1; + /* stime + utime since last update: */ + u64 acct_timexpd; #endif #ifdef CONFIG_CPUSETS - nodemask_t mems_allowed; /* Protected by alloc_lock */ - seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ - int cpuset_mem_spread_rotor; - int cpuset_slab_spread_rotor; + /* Protected by ->alloc_lock: */ + nodemask_t mems_allowed; + /* Seqence number to catch updates: */ + seqcount_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; #endif #ifdef CONFIG_CGROUPS - /* Control Group info protected by css_set_lock */ - struct css_set __rcu *cgroups; - /* cg_list protected by css_set_lock and tsk->alloc_lock */ - struct list_head cg_list; + /* Control Group info protected by css_set_lock: */ + struct css_set __rcu *cgroups; + /* cg_list protected by css_set_lock and tsk->alloc_lock: */ + struct list_head cg_list; #endif #ifdef CONFIG_INTEL_RDT_A - int closid; + int closid; #endif #ifdef CONFIG_FUTEX - struct robust_list_head __user *robust_list; + struct robust_list_head __user *robust_list; #ifdef CONFIG_COMPAT struct compat_robust_list_head __user *compat_robust_list; #endif - struct list_head pi_state_list; - struct futex_pi_state *pi_state_cache; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; #endif #ifdef CONFIG_PERF_EVENTS - struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; - struct mutex perf_event_mutex; - struct list_head perf_event_list; + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; + struct mutex perf_event_mutex; + struct list_head perf_event_list; #endif #ifdef CONFIG_DEBUG_PREEMPT - unsigned long preempt_disable_ip; + unsigned long preempt_disable_ip; #endif #ifdef CONFIG_NUMA - struct mempolicy *mempolicy; /* Protected by alloc_lock */ - short il_next; - short pref_node_fork; + /* Protected by alloc_lock: */ + struct mempolicy *mempolicy; + short il_next; + short pref_node_fork; #endif #ifdef CONFIG_NUMA_BALANCING - int numa_scan_seq; - unsigned int numa_scan_period; - unsigned int numa_scan_period_max; - int numa_preferred_nid; - unsigned long numa_migrate_retry; - u64 node_stamp; /* migration stamp */ - u64 last_task_numa_placement; - u64 last_sum_exec_runtime; - struct callback_head numa_work; - - struct list_head numa_entry; - struct numa_group *numa_group; + int numa_scan_seq; + unsigned int numa_scan_period; + unsigned int numa_scan_period_max; + int numa_preferred_nid; + unsigned long numa_migrate_retry; + /* Migration stamp: */ + u64 node_stamp; + u64 last_task_numa_placement; + u64 last_sum_exec_runtime; + struct callback_head numa_work; + + struct list_head numa_entry; + struct numa_group *numa_group; /* * numa_faults is an array split into four regions: @@ -1859,8 +912,8 @@ struct task_struct { * during the current scan window. When the scan completes, the counts * in faults_memory and faults_cpu decay and these values are copied. */ - unsigned long *numa_faults; - unsigned long total_numa_faults; + unsigned long *numa_faults; + unsigned long total_numa_faults; /* * numa_faults_locality tracks if faults recorded during the last @@ -1868,208 +921,133 @@ struct task_struct { * period is adapted based on the locality of the faults with different * weights depending on whether they were shared or private faults */ - unsigned long numa_faults_locality[3]; + unsigned long numa_faults_locality[3]; - unsigned long numa_pages_migrated; + unsigned long numa_pages_migrated; #endif /* CONFIG_NUMA_BALANCING */ -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH - struct tlbflush_unmap_batch tlb_ubc; -#endif + struct tlbflush_unmap_batch tlb_ubc; - struct rcu_head rcu; + struct rcu_head rcu; - /* - * cache last used pipe for splice - */ - struct pipe_inode_info *splice_pipe; + /* Cache last used pipe for splice(): */ + struct pipe_inode_info *splice_pipe; - struct page_frag task_frag; + struct page_frag task_frag; -#ifdef CONFIG_TASK_DELAY_ACCT - struct task_delay_info *delays; +#ifdef CONFIG_TASK_DELAY_ACCT + struct task_delay_info *delays; #endif + #ifdef CONFIG_FAULT_INJECTION - int make_it_fail; + int make_it_fail; #endif /* - * when (nr_dirtied >= nr_dirtied_pause), it's time to call - * balance_dirty_pages() for some dirty throttling pause + * When (nr_dirtied >= nr_dirtied_pause), it's time to call + * balance_dirty_pages() for a dirty throttling pause: */ - int nr_dirtied; - int nr_dirtied_pause; - unsigned long dirty_paused_when; /* start of a write-and-pause period */ + int nr_dirtied; + int nr_dirtied_pause; + /* Start of a write-and-pause period: */ + unsigned long dirty_paused_when; #ifdef CONFIG_LATENCYTOP - int latency_record_count; - struct latency_record latency_record[LT_SAVECOUNT]; + int latency_record_count; + struct latency_record latency_record[LT_SAVECOUNT]; #endif /* - * time slack values; these are used to round up poll() and + * Time slack values; these are used to round up poll() and * select() etc timeout values. These are in nanoseconds. */ - u64 timer_slack_ns; - u64 default_timer_slack_ns; + u64 timer_slack_ns; + u64 default_timer_slack_ns; #ifdef CONFIG_KASAN - unsigned int kasan_depth; + unsigned int kasan_depth; #endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER - /* Index of current stored address in ret_stack */ - int curr_ret_stack; - /* Stack of return addresses for return function tracing */ - struct ftrace_ret_stack *ret_stack; - /* time stamp for last schedule */ - unsigned long long ftrace_timestamp; + /* Index of current stored address in ret_stack: */ + int curr_ret_stack; + + /* Stack of return addresses for return function tracing: */ + struct ftrace_ret_stack *ret_stack; + + /* Timestamp for last schedule: */ + unsigned long long ftrace_timestamp; + /* * Number of functions that haven't been traced - * because of depth overrun. + * because of depth overrun: */ - atomic_t trace_overrun; - /* Pause for the tracing */ - atomic_t tracing_graph_pause; + atomic_t trace_overrun; + + /* Pause tracing: */ + atomic_t tracing_graph_pause; #endif + #ifdef CONFIG_TRACING - /* state flags for use by tracers */ - unsigned long trace; - /* bitmask and counter of trace recursion */ - unsigned long trace_recursion; + /* State flags for use by tracers: */ + unsigned long trace; + + /* Bitmask and counter of trace recursion: */ + unsigned long trace_recursion; #endif /* CONFIG_TRACING */ + #ifdef CONFIG_KCOV - /* Coverage collection mode enabled for this task (0 if disabled). */ - enum kcov_mode kcov_mode; - /* Size of the kcov_area. */ - unsigned kcov_size; - /* Buffer for coverage collection. */ - void *kcov_area; - /* kcov desciptor wired with this task or NULL. */ - struct kcov *kcov; + /* Coverage collection mode enabled for this task (0 if disabled): */ + enum kcov_mode kcov_mode; + + /* Size of the kcov_area: */ + unsigned int kcov_size; + + /* Buffer for coverage collection: */ + void *kcov_area; + + /* KCOV descriptor wired with this task or NULL: */ + struct kcov *kcov; #endif + #ifdef CONFIG_MEMCG - struct mem_cgroup *memcg_in_oom; - gfp_t memcg_oom_gfp_mask; - int memcg_oom_order; + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; - /* number of pages to reclaim on returning to userland */ - unsigned int memcg_nr_pages_over_high; + /* Number of pages to reclaim on returning to userland: */ + unsigned int memcg_nr_pages_over_high; #endif + #ifdef CONFIG_UPROBES - struct uprobe_task *utask; + struct uprobe_task *utask; #endif #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) - unsigned int sequential_io; - unsigned int sequential_io_avg; + unsigned int sequential_io; + unsigned int sequential_io_avg; #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; + unsigned long task_state_change; #endif - int pagefault_disabled; + int pagefault_disabled; #ifdef CONFIG_MMU - struct task_struct *oom_reaper_list; + struct task_struct *oom_reaper_list; #endif #ifdef CONFIG_VMAP_STACK - struct vm_struct *stack_vm_area; + struct vm_struct *stack_vm_area; #endif #ifdef CONFIG_THREAD_INFO_IN_TASK - /* A live task holds one reference. */ - atomic_t stack_refcount; + /* A live task holds one reference: */ + atomic_t stack_refcount; #endif -/* CPU-specific state of this task */ - struct thread_struct thread; -/* - * WARNING: on x86, 'thread_struct' contains a variable-sized - * structure. It *MUST* be at the end of 'task_struct'. - * - * Do not put anything below here! - */ -}; - -#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT -extern int arch_task_struct_size __read_mostly; -#else -# define arch_task_struct_size (sizeof(struct task_struct)) -#endif - -#ifdef CONFIG_VMAP_STACK -static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) -{ - return t->stack_vm_area; -} -#else -static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) -{ - return NULL; -} -#endif - -/* Future-safe accessor for struct task_struct's cpus_allowed. */ -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) - -static inline int tsk_nr_cpus_allowed(struct task_struct *p) -{ - return p->nr_cpus_allowed; -} - -#define TNF_MIGRATED 0x01 -#define TNF_NO_GROUP 0x02 -#define TNF_SHARED 0x04 -#define TNF_FAULT_LOCAL 0x08 -#define TNF_MIGRATE_FAIL 0x10 - -static inline bool in_vfork(struct task_struct *tsk) -{ - bool ret; + /* CPU-specific state of this task: */ + struct thread_struct thread; /* - * need RCU to access ->real_parent if CLONE_VM was used along with - * CLONE_PARENT. - * - * We check real_parent->mm == tsk->mm because CLONE_VFORK does not - * imply CLONE_VM - * - * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus - * ->real_parent is not necessarily the task doing vfork(), so in - * theory we can't rely on task_lock() if we want to dereference it. + * WARNING: on x86, 'thread_struct' contains a variable-sized + * structure. It *MUST* be at the end of 'task_struct'. * - * And in this case we can't trust the real_parent->mm == tsk->mm - * check, it can be false negative. But we do not care, if init or - * another oom-unkillable task does this it should blame itself. + * Do not put anything below here! */ - rcu_read_lock(); - ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; - rcu_read_unlock(); - - return ret; -} - -#ifdef CONFIG_NUMA_BALANCING -extern void task_numa_fault(int last_node, int node, int pages, int flags); -extern pid_t task_numa_group_id(struct task_struct *p); -extern void set_numabalancing_state(bool enabled); -extern void task_numa_free(struct task_struct *p); -extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, - int src_nid, int dst_cpu); -#else -static inline void task_numa_fault(int last_node, int node, int pages, - int flags) -{ -} -static inline pid_t task_numa_group_id(struct task_struct *p) -{ - return 0; -} -static inline void set_numabalancing_state(bool enabled) -{ -} -static inline void task_numa_free(struct task_struct *p) -{ -} -static inline bool should_numa_migrate_memory(struct task_struct *p, - struct page *page, int src_nid, int dst_cpu) -{ - return true; -} -#endif +}; static inline struct pid *task_pid(struct task_struct *task) { @@ -2082,7 +1060,7 @@ static inline struct pid *task_tgid(struct task_struct *task) } /* - * Without tasklist or rcu lock it is not safe to dereference + * Without tasklist or RCU lock it is not safe to dereference * the result of task_pgrp/task_session even if task == current, * we can race with another thread doing sys_setsid/sys_setpgid. */ @@ -2096,8 +1074,6 @@ static inline struct pid *task_session(struct task_struct *task) return task->group_leader->pids[PIDTYPE_SID].pid; } -struct pid_namespace; - /* * the helpers to get the task's different pids as they are seen * from various namespaces @@ -2111,16 +1087,14 @@ struct pid_namespace; * * see also pid_nr() etc in include/linux/pid.h */ -pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, - struct pid_namespace *ns); +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); static inline pid_t task_pid_nr(struct task_struct *tsk) { return tsk->pid; } -static inline pid_t task_pid_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) +static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); } @@ -2136,15 +1110,28 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk) return tsk->tgid; } -pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); +extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); static inline pid_t task_tgid_vnr(struct task_struct *tsk) { return pid_vnr(task_tgid(tsk)); } +/** + * pid_alive - check that a task structure is not stale + * @p: Task structure to be checked. + * + * Test if a process is not yet dead (at most zombie state) + * If pid_alive fails, then pointers within the task structure + * can be stale and must not be dereferenced. + * + * Return: 1 if the process is alive. 0 otherwise. + */ +static inline int pid_alive(const struct task_struct *p) +{ + return p->pids[PIDTYPE_PID].pid != NULL; +} -static inline int pid_alive(const struct task_struct *p); static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) { pid_t pid = 0; @@ -2162,8 +1149,7 @@ static inline pid_t task_ppid_nr(const struct task_struct *tsk) return task_ppid_nr_ns(tsk, &init_pid_ns); } -static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); } @@ -2174,8 +1160,7 @@ static inline pid_t task_pgrp_vnr(struct task_struct *tsk) } -static inline pid_t task_session_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) +static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); } @@ -2185,28 +1170,13 @@ static inline pid_t task_session_vnr(struct task_struct *tsk) return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); } -/* obsolete, do not use */ +/* Obsolete, do not use: */ static inline pid_t task_pgrp_nr(struct task_struct *tsk) { return task_pgrp_nr_ns(tsk, &init_pid_ns); } /** - * pid_alive - check that a task structure is not stale - * @p: Task structure to be checked. - * - * Test if a process is not yet dead (at most zombie state) - * If pid_alive fails, then pointers within the task structure - * can be stale and must not be dereferenced. - * - * Return: 1 if the process is alive. 0 otherwise. - */ -static inline int pid_alive(const struct task_struct *p) -{ - return p->pids[PIDTYPE_PID].pid != NULL; -} - -/** * is_global_init - check if a task structure is init. Since init * is free to have sub-threads we need to check tgid. * @tsk: Task structure to be checked. @@ -2222,89 +1192,37 @@ static inline int is_global_init(struct task_struct *tsk) extern struct pid *cad_pid; -extern void free_task(struct task_struct *tsk); -#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) - -extern void __put_task_struct(struct task_struct *t); - -static inline void put_task_struct(struct task_struct *t) -{ - if (atomic_dec_and_test(&t->usage)) - __put_task_struct(t); -} - -struct task_struct *task_rcu_dereference(struct task_struct **ptask); -struct task_struct *try_get_task_struct(struct task_struct **ptask); - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern void task_cputime(struct task_struct *t, - u64 *utime, u64 *stime); -extern u64 task_gtime(struct task_struct *t); -#else -static inline void task_cputime(struct task_struct *t, - u64 *utime, u64 *stime) -{ - *utime = t->utime; - *stime = t->stime; -} - -static inline u64 task_gtime(struct task_struct *t) -{ - return t->gtime; -} -#endif - -#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME -static inline void task_cputime_scaled(struct task_struct *t, - u64 *utimescaled, - u64 *stimescaled) -{ - *utimescaled = t->utimescaled; - *stimescaled = t->stimescaled; -} -#else -static inline void task_cputime_scaled(struct task_struct *t, - u64 *utimescaled, - u64 *stimescaled) -{ - task_cputime(t, utimescaled, stimescaled); -} -#endif - -extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); -extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); - /* * Per process flags */ -#define PF_IDLE 0x00000002 /* I am an IDLE thread */ -#define PF_EXITING 0x00000004 /* getting shut down */ -#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ -#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ -#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ -#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ -#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ -#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ -#define PF_DUMPCORE 0x00000200 /* dumped core */ -#define PF_SIGNALED 0x00000400 /* killed by a signal */ -#define PF_MEMALLOC 0x00000800 /* Allocating memory */ -#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ -#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ -#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ -#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ -#define PF_FROZEN 0x00010000 /* frozen for system suspend */ -#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ -#define PF_KSWAPD 0x00040000 /* I am kswapd */ -#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ -#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ -#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ -#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ -#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ -#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ -#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ -#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ -#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ +#define PF_IDLE 0x00000002 /* I am an IDLE thread */ +#define PF_EXITING 0x00000004 /* Getting shut down */ +#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ +#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ +#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ +#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ +#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ +#define PF_DUMPCORE 0x00000200 /* Dumped core */ +#define PF_SIGNALED 0x00000400 /* Killed by a signal */ +#define PF_MEMALLOC 0x00000800 /* Allocating memory */ +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ +#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ +#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ +#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ +#define PF_FROZEN 0x00010000 /* Frozen for system suspend */ +#define PF_FSTRANS 0x00020000 /* Inside a filesystem transaction */ +#define PF_KSWAPD 0x00040000 /* I am kswapd */ +#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ +#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ +#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ +#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ +#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ +#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ +#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ +#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -2317,55 +1235,38 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *s * child is not running and in turn not changing child->flags * at the same time the parent does it. */ -#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) -#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) -#define clear_used_math() clear_stopped_child_used_math(current) -#define set_used_math() set_stopped_child_used_math(current) +#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) +#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) +#define clear_used_math() clear_stopped_child_used_math(current) +#define set_used_math() set_stopped_child_used_math(current) + #define conditional_stopped_child_used_math(condition, child) \ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) -#define conditional_used_math(condition) \ - conditional_stopped_child_used_math(condition, current) -#define copy_to_stopped_child_used_math(child) \ - do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) -/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ -#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) -#define used_math() tsk_used_math(current) -/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags - * __GFP_FS is also cleared as it implies __GFP_IO. - */ -static inline gfp_t memalloc_noio_flags(gfp_t flags) -{ - if (unlikely(current->flags & PF_MEMALLOC_NOIO)) - flags &= ~(__GFP_IO | __GFP_FS); - return flags; -} +#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) -static inline unsigned int memalloc_noio_save(void) -{ - unsigned int flags = current->flags & PF_MEMALLOC_NOIO; - current->flags |= PF_MEMALLOC_NOIO; - return flags; -} +#define copy_to_stopped_child_used_math(child) \ + do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) -static inline void memalloc_noio_restore(unsigned int flags) -{ - current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; -} +/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ +#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) +#define used_math() tsk_used_math(current) /* Per-process atomic flags. */ -#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ -#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ -#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ -#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ +#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ +#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ +#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ +#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ #define TASK_PFA_TEST(name, func) \ static inline bool task_##func(struct task_struct *p) \ { return test_bit(PFA_##name, &p->atomic_flags); } + #define TASK_PFA_SET(name, func) \ static inline void task_set_##func(struct task_struct *p) \ { set_bit(PFA_##name, &p->atomic_flags); } + #define TASK_PFA_CLEAR(name, func) \ static inline void task_clear_##func(struct task_struct *p) \ { clear_bit(PFA_##name, &p->atomic_flags); } @@ -2384,75 +1285,23 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) TASK_PFA_TEST(LMK_WAITING, lmk_waiting) TASK_PFA_SET(LMK_WAITING, lmk_waiting) -/* - * task->jobctl flags - */ -#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ - -#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ -#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ -#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ -#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ -#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ -#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ -#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ - -#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) -#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) -#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) -#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) -#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) -#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) -#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) - -#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) -#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) - -extern bool task_set_jobctl_pending(struct task_struct *task, - unsigned long mask); -extern void task_clear_jobctl_trapping(struct task_struct *task); -extern void task_clear_jobctl_pending(struct task_struct *task, - unsigned long mask); - -static inline void rcu_copy_process(struct task_struct *p) -{ -#ifdef CONFIG_PREEMPT_RCU - p->rcu_read_lock_nesting = 0; - p->rcu_read_unlock_special.s = 0; - p->rcu_blocked_node = NULL; - INIT_LIST_HEAD(&p->rcu_node_entry); -#endif /* #ifdef CONFIG_PREEMPT_RCU */ -#ifdef CONFIG_TASKS_RCU - p->rcu_tasks_holdout = false; - INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); - p->rcu_tasks_idle_cpu = -1; -#endif /* #ifdef CONFIG_TASKS_RCU */ -} - -static inline void tsk_restore_flags(struct task_struct *task, - unsigned long orig_flags, unsigned long flags) +static inline void +tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags) { task->flags &= ~flags; task->flags |= orig_flags & flags; } -extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, - const struct cpumask *trial); -extern int task_can_attach(struct task_struct *p, - const struct cpumask *cs_cpus_allowed); +extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); +extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); #ifdef CONFIG_SMP -extern void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask); - -extern int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask); +extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); +extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); #else -static inline void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask) +static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { } -static inline int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask) +static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) { if (!cpumask_test_cpu(0, new_mask)) return -EINVAL; @@ -2460,165 +1309,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, } #endif -#ifdef CONFIG_NO_HZ_COMMON -void calc_load_enter_idle(void); -void calc_load_exit_idle(void); -#else -static inline void calc_load_enter_idle(void) { } -static inline void calc_load_exit_idle(void) { } -#endif /* CONFIG_NO_HZ_COMMON */ - #ifndef cpu_relax_yield #define cpu_relax_yield() cpu_relax() #endif -/* - * Do not use outside of architecture code which knows its limitations. - * - * sched_clock() has no promise of monotonicity or bounded drift between - * CPUs, use (which you should not) requires disabling IRQs. - * - * Please use one of the three interfaces below. - */ -extern unsigned long long notrace sched_clock(void); -/* - * See the comment in kernel/sched/clock.c - */ -extern u64 running_clock(void); -extern u64 sched_clock_cpu(int cpu); - - -extern void sched_clock_init(void); - -#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static inline void sched_clock_init_late(void) -{ -} - -static inline void sched_clock_tick(void) -{ -} - -static inline void clear_sched_clock_stable(void) -{ -} - -static inline void sched_clock_idle_sleep_event(void) -{ -} - -static inline void sched_clock_idle_wakeup_event(u64 delta_ns) -{ -} - -static inline u64 cpu_clock(int cpu) -{ - return sched_clock(); -} - -static inline u64 local_clock(void) -{ - return sched_clock(); -} -#else -extern void sched_clock_init_late(void); -/* - * Architectures can set this to 1 if they have specified - * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, - * but then during bootup it turns out that sched_clock() - * is reliable after all: - */ -extern int sched_clock_stable(void); -extern void clear_sched_clock_stable(void); - -extern void sched_clock_tick(void); -extern void sched_clock_idle_sleep_event(void); -extern void sched_clock_idle_wakeup_event(u64 delta_ns); - -/* - * As outlined in clock.c, provides a fast, high resolution, nanosecond - * time source that is monotonic per cpu argument and has bounded drift - * between cpus. - * - * ######################### BIG FAT WARNING ########################## - * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # - * # go backwards !! # - * #################################################################### - */ -static inline u64 cpu_clock(int cpu) -{ - return sched_clock_cpu(cpu); -} - -static inline u64 local_clock(void) -{ - return sched_clock_cpu(raw_smp_processor_id()); -} -#endif - -#ifdef CONFIG_IRQ_TIME_ACCOUNTING -/* - * An i/f to runtime opt-in for irq time accounting based off of sched_clock. - * The reason for this explicit opt-in is not to have perf penalty with - * slow sched_clocks. - */ -extern void enable_sched_clock_irqtime(void); -extern void disable_sched_clock_irqtime(void); -#else -static inline void enable_sched_clock_irqtime(void) {} -static inline void disable_sched_clock_irqtime(void) {} -#endif - -extern unsigned long long -task_sched_runtime(struct task_struct *task); - -/* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP -extern void sched_exec(void); -#else -#define sched_exec() {} -#endif - -extern void sched_clock_idle_sleep_event(void); -extern void sched_clock_idle_wakeup_event(u64 delta_ns); - -#ifdef CONFIG_HOTPLUG_CPU -extern void idle_task_exit(void); -#else -static inline void idle_task_exit(void) {} -#endif - -#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) -extern void wake_up_nohz_cpu(int cpu); -#else -static inline void wake_up_nohz_cpu(int cpu) { } -#endif - -#ifdef CONFIG_NO_HZ_FULL -extern u64 scheduler_tick_max_deferment(void); -#endif - -#ifdef CONFIG_SCHED_AUTOGROUP -extern void sched_autogroup_create_attach(struct task_struct *p); -extern void sched_autogroup_detach(struct task_struct *p); -extern void sched_autogroup_fork(struct signal_struct *sig); -extern void sched_autogroup_exit(struct signal_struct *sig); -extern void sched_autogroup_exit_task(struct task_struct *p); -#ifdef CONFIG_PROC_FS -extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); -extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); -#endif -#else -static inline void sched_autogroup_create_attach(struct task_struct *p) { } -static inline void sched_autogroup_detach(struct task_struct *p) { } -static inline void sched_autogroup_fork(struct signal_struct *sig) { } -static inline void sched_autogroup_exit(struct signal_struct *sig) { } -static inline void sched_autogroup_exit_task(struct task_struct *p) { } -#endif - extern int yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); extern int task_prio(const struct task_struct *p); + /** * task_nice - return the nice value of a given task. * @p: the task in question. @@ -2629,16 +1327,15 @@ static inline int task_nice(const struct task_struct *p) { return PRIO_TO_NICE((p)->static_prio); } + extern int can_nice(const struct task_struct *p, const int nice); extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); -extern int sched_setscheduler(struct task_struct *, int, - const struct sched_param *); -extern int sched_setscheduler_nocheck(struct task_struct *, int, - const struct sched_param *); -extern int sched_setattr(struct task_struct *, - const struct sched_attr *); +extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); +extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); +extern int sched_setattr(struct task_struct *, const struct sched_attr *); extern struct task_struct *idle_task(int cpu); + /** * is_idle_task - is the specified task an idle task? * @p: the task in question. @@ -2649,6 +1346,7 @@ static inline bool is_idle_task(const struct task_struct *p) { return !!(p->flags & PF_IDLE); } + extern struct task_struct *curr_task(int cpu); extern void ia64_set_curr_task(int cpu, struct task_struct *p); @@ -2661,23 +1359,15 @@ union thread_union { unsigned long stack[THREAD_SIZE/sizeof(long)]; }; -#ifndef __HAVE_ARCH_KSTACK_END -static inline int kstack_end(void *addr) +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline struct thread_info *task_thread_info(struct task_struct *task) { - /* Reliable end of stack detection: - * Some APM bios versions misalign the stack - */ - return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); + return &task->thread_info; } +#elif !defined(__HAVE_THREAD_FUNCTIONS) +# define task_thread_info(task) ((struct thread_info *)(task)->stack) #endif -extern union thread_union init_thread_union; -extern struct task_struct init_task; - -extern struct mm_struct init_mm; - -extern struct pid_namespace init_pid_ns; - /* * find a task by one of its numerical ids * @@ -2690,322 +1380,25 @@ extern struct pid_namespace init_pid_ns; */ extern struct task_struct *find_task_by_vpid(pid_t nr); -extern struct task_struct *find_task_by_pid_ns(pid_t nr, - struct pid_namespace *ns); - -/* per-UID process charging. */ -extern struct user_struct * alloc_uid(kuid_t); -static inline struct user_struct *get_uid(struct user_struct *u) -{ - atomic_inc(&u->__count); - return u; -} -extern void free_uid(struct user_struct *); - -#include <asm/current.h> - -extern void xtime_update(unsigned long ticks); +extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk); -#ifdef CONFIG_SMP - extern void kick_process(struct task_struct *tsk); -#else - static inline void kick_process(struct task_struct *tsk) { } -#endif -extern int sched_fork(unsigned long clone_flags, struct task_struct *p); -extern void sched_dead(struct task_struct *p); - -extern void proc_caches_init(void); -extern void flush_signals(struct task_struct *); -extern void ignore_signals(struct task_struct *); -extern void flush_signal_handlers(struct task_struct *, int force_default); -extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); - -static inline int kernel_dequeue_signal(siginfo_t *info) -{ - struct task_struct *tsk = current; - siginfo_t __info; - int ret; - - spin_lock_irq(&tsk->sighand->siglock); - ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); - spin_unlock_irq(&tsk->sighand->siglock); - - return ret; -} - -static inline void kernel_signal_stop(void) -{ - spin_lock_irq(¤t->sighand->siglock); - if (current->jobctl & JOBCTL_STOP_DEQUEUED) - __set_current_state(TASK_STOPPED); - spin_unlock_irq(¤t->sighand->siglock); - - schedule(); -} - -extern void release_task(struct task_struct * p); -extern int send_sig_info(int, struct siginfo *, struct task_struct *); -extern int force_sigsegv(int, struct task_struct *); -extern int force_sig_info(int, struct siginfo *, struct task_struct *); -extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); -extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); -extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, - const struct cred *, u32); -extern int kill_pgrp(struct pid *pid, int sig, int priv); -extern int kill_pid(struct pid *pid, int sig, int priv); -extern int kill_proc_info(int, struct siginfo *, pid_t); -extern __must_check bool do_notify_parent(struct task_struct *, int); -extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); -extern void force_sig(int, struct task_struct *); -extern int send_sig(int, struct task_struct *, int); -extern int zap_other_threads(struct task_struct *p); -extern struct sigqueue *sigqueue_alloc(void); -extern void sigqueue_free(struct sigqueue *); -extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); -extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); - -#ifdef TIF_RESTORE_SIGMASK -/* - * Legacy restore_sigmask accessors. These are inefficient on - * SMP architectures because they require atomic operations. - */ - -/** - * set_restore_sigmask() - make sure saved_sigmask processing gets done - * - * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code - * will run before returning to user mode, to process the flag. For - * all callers, TIF_SIGPENDING is already set or it's no harm to set - * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the - * arch code will notice on return to user mode, in case those bits - * are scarce. We set TIF_SIGPENDING here to ensure that the arch - * signal code always gets run when TIF_RESTORE_SIGMASK is set. - */ -static inline void set_restore_sigmask(void) -{ - set_thread_flag(TIF_RESTORE_SIGMASK); - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); -} -static inline void clear_restore_sigmask(void) -{ - clear_thread_flag(TIF_RESTORE_SIGMASK); -} -static inline bool test_restore_sigmask(void) -{ - return test_thread_flag(TIF_RESTORE_SIGMASK); -} -static inline bool test_and_clear_restore_sigmask(void) -{ - return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); -} - -#else /* TIF_RESTORE_SIGMASK */ - -/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ -static inline void set_restore_sigmask(void) -{ - current->restore_sigmask = true; - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); -} -static inline void clear_restore_sigmask(void) -{ - current->restore_sigmask = false; -} -static inline bool test_restore_sigmask(void) -{ - return current->restore_sigmask; -} -static inline bool test_and_clear_restore_sigmask(void) -{ - if (!current->restore_sigmask) - return false; - current->restore_sigmask = false; - return true; -} -#endif - -static inline void restore_saved_sigmask(void) -{ - if (test_and_clear_restore_sigmask()) - __set_current_blocked(¤t->saved_sigmask); -} - -static inline sigset_t *sigmask_to_save(void) -{ - sigset_t *res = ¤t->blocked; - if (unlikely(test_restore_sigmask())) - res = ¤t->saved_sigmask; - return res; -} - -static inline int kill_cad_pid(int sig, int priv) -{ - return kill_pid(cad_pid, sig, priv); -} - -/* These can be the second arg to send_sig_info/send_group_sig_info. */ -#define SEND_SIG_NOINFO ((struct siginfo *) 0) -#define SEND_SIG_PRIV ((struct siginfo *) 1) -#define SEND_SIG_FORCED ((struct siginfo *) 2) - -/* - * True if we are on the alternate signal stack. - */ -static inline int on_sig_stack(unsigned long sp) -{ - /* - * If the signal stack is SS_AUTODISARM then, by construction, we - * can't be on the signal stack unless user code deliberately set - * SS_AUTODISARM when we were already on it. - * - * This improves reliability: if user state gets corrupted such that - * the stack pointer points very close to the end of the signal stack, - * then this check will enable the signal to be handled anyway. - */ - if (current->sas_ss_flags & SS_AUTODISARM) - return 0; - -#ifdef CONFIG_STACK_GROWSUP - return sp >= current->sas_ss_sp && - sp - current->sas_ss_sp < current->sas_ss_size; -#else - return sp > current->sas_ss_sp && - sp - current->sas_ss_sp <= current->sas_ss_size; -#endif -} - -static inline int sas_ss_flags(unsigned long sp) -{ - if (!current->sas_ss_size) - return SS_DISABLE; - return on_sig_stack(sp) ? SS_ONSTACK : 0; -} - -static inline void sas_ss_reset(struct task_struct *p) -{ - p->sas_ss_sp = 0; - p->sas_ss_size = 0; - p->sas_ss_flags = SS_DISABLE; -} - -static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) -{ - if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) -#ifdef CONFIG_STACK_GROWSUP - return current->sas_ss_sp; -#else - return current->sas_ss_sp + current->sas_ss_size; -#endif - return sp; -} - -/* - * Routines for handling mm_structs - */ -extern struct mm_struct * mm_alloc(void); - -/* mmdrop drops the mm and the page tables */ -extern void __mmdrop(struct mm_struct *); -static inline void mmdrop(struct mm_struct *mm) -{ - if (unlikely(atomic_dec_and_test(&mm->mm_count))) - __mmdrop(mm); -} - -static inline void mmdrop_async_fn(struct work_struct *work) -{ - struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); - __mmdrop(mm); -} - -static inline void mmdrop_async(struct mm_struct *mm) -{ - if (unlikely(atomic_dec_and_test(&mm->mm_count))) { - INIT_WORK(&mm->async_put_work, mmdrop_async_fn); - schedule_work(&mm->async_put_work); - } -} - -static inline bool mmget_not_zero(struct mm_struct *mm) -{ - return atomic_inc_not_zero(&mm->mm_users); -} - -/* mmput gets rid of the mappings and all user-space */ -extern void mmput(struct mm_struct *); -#ifdef CONFIG_MMU -/* same as above but performs the slow path from the async context. Can - * be called from the atomic context as well - */ -extern void mmput_async(struct mm_struct *); -#endif - -/* Grab a reference to a task's mm, if it is not already going away */ -extern struct mm_struct *get_task_mm(struct task_struct *task); -/* - * Grab a reference to a task's mm, if it is not already going away - * and ptrace_may_access with the mode parameter passed to it - * succeeds. - */ -extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); -/* Remove the current tasks stale references to the old mm_struct */ -extern void mm_release(struct task_struct *, struct mm_struct *); - -#ifdef CONFIG_HAVE_COPY_THREAD_TLS -extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, - struct task_struct *, unsigned long); -#else -extern int copy_thread(unsigned long, unsigned long, unsigned long, - struct task_struct *); - -/* Architectures that haven't opted into copy_thread_tls get the tls argument - * via pt_regs, so ignore the tls argument passed via C. */ -static inline int copy_thread_tls( - unsigned long clone_flags, unsigned long sp, unsigned long arg, - struct task_struct *p, unsigned long tls) -{ - return copy_thread(clone_flags, sp, arg, p); -} -#endif -extern void flush_thread(void); - -#ifdef CONFIG_HAVE_EXIT_THREAD -extern void exit_thread(struct task_struct *tsk); +#ifdef CONFIG_SMP +extern void kick_process(struct task_struct *tsk); #else -static inline void exit_thread(struct task_struct *tsk) -{ -} +static inline void kick_process(struct task_struct *tsk) { } #endif -extern void exit_files(struct task_struct *); -extern void __cleanup_sighand(struct sighand_struct *); - -extern void exit_itimers(struct signal_struct *); -extern void flush_itimer_signals(void); - -extern void do_group_exit(int); - -extern int do_execve(struct filename *, - const char __user * const __user *, - const char __user * const __user *); -extern int do_execveat(int, struct filename *, - const char __user * const __user *, - const char __user * const __user *, - int); -extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); -extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); -struct task_struct *fork_idle(int); -extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); - extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); + static inline void set_task_comm(struct task_struct *tsk, const char *from) { __set_task_comm(tsk, from, false); } + extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP @@ -3013,260 +1406,15 @@ void scheduler_ipi(void); extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else static inline void scheduler_ipi(void) { } -static inline unsigned long wait_task_inactive(struct task_struct *p, - long match_state) +static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) { return 1; } #endif -#define tasklist_empty() \ - list_empty(&init_task.tasks) - -#define next_task(p) \ - list_entry_rcu((p)->tasks.next, struct task_struct, tasks) - -#define for_each_process(p) \ - for (p = &init_task ; (p = next_task(p)) != &init_task ; ) - -extern bool current_is_single_threaded(void); - -/* - * Careful: do_each_thread/while_each_thread is a double loop so - * 'break' will not work as expected - use goto instead. - */ -#define do_each_thread(g, t) \ - for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do - -#define while_each_thread(g, t) \ - while ((t = next_thread(t)) != g) - -#define __for_each_thread(signal, t) \ - list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) - -#define for_each_thread(p, t) \ - __for_each_thread((p)->signal, t) - -/* Careful: this is a double loop, 'break' won't work as expected. */ -#define for_each_process_thread(p, t) \ - for_each_process(p) for_each_thread(p, t) - -static inline int get_nr_threads(struct task_struct *tsk) -{ - return tsk->signal->nr_threads; -} - -static inline bool thread_group_leader(struct task_struct *p) -{ - return p->exit_signal >= 0; -} - -/* Do to the insanities of de_thread it is possible for a process - * to have the pid of the thread group leader without actually being - * the thread group leader. For iteration through the pids in proc - * all we care about is that we have a task with the appropriate - * pid, we don't actually care if we have the right task. - */ -static inline bool has_group_leader_pid(struct task_struct *p) -{ - return task_pid(p) == p->signal->leader_pid; -} - -static inline -bool same_thread_group(struct task_struct *p1, struct task_struct *p2) -{ - return p1->signal == p2->signal; -} - -static inline struct task_struct *next_thread(const struct task_struct *p) -{ - return list_entry_rcu(p->thread_group.next, - struct task_struct, thread_group); -} - -static inline int thread_group_empty(struct task_struct *p) -{ - return list_empty(&p->thread_group); -} - -#define delay_group_leader(p) \ - (thread_group_leader(p) && !thread_group_empty(p)) - -/* - * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring - * subscriptions and synchronises with wait4(). Also used in procfs. Also - * pins the final release of task.io_context. Also protects ->cpuset and - * ->cgroup.subsys[]. And ->vfork_done. - * - * Nests both inside and outside of read_lock(&tasklist_lock). - * It must not be nested with write_lock_irq(&tasklist_lock), - * neither inside nor outside. - */ -static inline void task_lock(struct task_struct *p) -{ - spin_lock(&p->alloc_lock); -} - -static inline void task_unlock(struct task_struct *p) -{ - spin_unlock(&p->alloc_lock); -} - -extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, - unsigned long *flags); - -static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, - unsigned long *flags) -{ - struct sighand_struct *ret; - - ret = __lock_task_sighand(tsk, flags); - (void)__cond_lock(&tsk->sighand->siglock, ret); - return ret; -} - -static inline void unlock_task_sighand(struct task_struct *tsk, - unsigned long *flags) -{ - spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); -} - -/** - * threadgroup_change_begin - mark the beginning of changes to a threadgroup - * @tsk: task causing the changes - * - * All operations which modify a threadgroup - a new thread joining the - * group, death of a member thread (the assertion of PF_EXITING) and - * exec(2) dethreading the process and replacing the leader - are wrapped - * by threadgroup_change_{begin|end}(). This is to provide a place which - * subsystems needing threadgroup stability can hook into for - * synchronization. - */ -static inline void threadgroup_change_begin(struct task_struct *tsk) -{ - might_sleep(); - cgroup_threadgroup_change_begin(tsk); -} - -/** - * threadgroup_change_end - mark the end of changes to a threadgroup - * @tsk: task causing the changes - * - * See threadgroup_change_begin(). - */ -static inline void threadgroup_change_end(struct task_struct *tsk) -{ - cgroup_threadgroup_change_end(tsk); -} - -#ifdef CONFIG_THREAD_INFO_IN_TASK - -static inline struct thread_info *task_thread_info(struct task_struct *task) -{ - return &task->thread_info; -} - -/* - * When accessing the stack of a non-current task that might exit, use - * try_get_task_stack() instead. task_stack_page will return a pointer - * that could get freed out from under you. - */ -static inline void *task_stack_page(const struct task_struct *task) -{ - return task->stack; -} - -#define setup_thread_stack(new,old) do { } while(0) - -static inline unsigned long *end_of_stack(const struct task_struct *task) -{ - return task->stack; -} - -#elif !defined(__HAVE_THREAD_FUNCTIONS) - -#define task_thread_info(task) ((struct thread_info *)(task)->stack) -#define task_stack_page(task) ((void *)(task)->stack) - -static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) -{ - *task_thread_info(p) = *task_thread_info(org); - task_thread_info(p)->task = p; -} - /* - * Return the address of the last usable long on the stack. - * - * When the stack grows down, this is just above the thread - * info struct. Going any lower will corrupt the threadinfo. - * - * When the stack grows up, this is the highest address. - * Beyond that position, we corrupt data on the next page. - */ -static inline unsigned long *end_of_stack(struct task_struct *p) -{ -#ifdef CONFIG_STACK_GROWSUP - return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; -#else - return (unsigned long *)(task_thread_info(p) + 1); -#endif -} - -#endif - -#ifdef CONFIG_THREAD_INFO_IN_TASK -static inline void *try_get_task_stack(struct task_struct *tsk) -{ - return atomic_inc_not_zero(&tsk->stack_refcount) ? - task_stack_page(tsk) : NULL; -} - -extern void put_task_stack(struct task_struct *tsk); -#else -static inline void *try_get_task_stack(struct task_struct *tsk) -{ - return task_stack_page(tsk); -} - -static inline void put_task_stack(struct task_struct *tsk) {} -#endif - -#define task_stack_end_corrupted(task) \ - (*(end_of_stack(task)) != STACK_END_MAGIC) - -static inline int object_is_on_stack(void *obj) -{ - void *stack = task_stack_page(current); - - return (obj >= stack) && (obj < (stack + THREAD_SIZE)); -} - -extern void thread_stack_cache_init(void); - -#ifdef CONFIG_DEBUG_STACK_USAGE -static inline unsigned long stack_not_used(struct task_struct *p) -{ - unsigned long *n = end_of_stack(p); - - do { /* Skip over canary */ -# ifdef CONFIG_STACK_GROWSUP - n--; -# else - n++; -# endif - } while (!*n); - -# ifdef CONFIG_STACK_GROWSUP - return (unsigned long)end_of_stack(p) - (unsigned long)n; -# else - return (unsigned long)n - (unsigned long)end_of_stack(p); -# endif -} -#endif -extern void set_task_stack_end_magic(struct task_struct *tsk); - -/* set thread flags in other task's structures - * - see asm/thread_info.h for TIF_xxxx flags available + * Set thread flags in other task's structures. + * See asm/thread_info.h for TIF_xxxx flags available: */ static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) { @@ -3308,37 +1456,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } -static inline int restart_syscall(void) -{ - set_tsk_thread_flag(current, TIF_SIGPENDING); - return -ERESTARTNOINTR; -} - -static inline int signal_pending(struct task_struct *p) -{ - return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); -} - -static inline int __fatal_signal_pending(struct task_struct *p) -{ - return unlikely(sigismember(&p->pending.signal, SIGKILL)); -} - -static inline int fatal_signal_pending(struct task_struct *p) -{ - return signal_pending(p) && __fatal_signal_pending(p); -} - -static inline int signal_pending_state(long state, struct task_struct *p) -{ - if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) - return 0; - if (!signal_pending(p)) - return 0; - - return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); -} - /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return @@ -3380,15 +1497,6 @@ static inline void cond_resched_rcu(void) #endif } -static inline unsigned long get_preempt_disable_ip(struct task_struct *p) -{ -#ifdef CONFIG_DEBUG_PREEMPT - return p->preempt_disable_ip; -#else - return 0; -#endif -} - /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPT, @@ -3403,114 +1511,12 @@ static inline int spin_needbreak(spinlock_t *lock) #endif } -/* - * Idle thread specific functions to determine the need_resched - * polling state. - */ -#ifdef TIF_POLLING_NRFLAG -static inline int tsk_is_polling(struct task_struct *p) -{ - return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); -} - -static inline void __current_set_polling(void) -{ - set_thread_flag(TIF_POLLING_NRFLAG); -} - -static inline bool __must_check current_set_polling_and_test(void) -{ - __current_set_polling(); - - /* - * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_curr() - */ - smp_mb__after_atomic(); - - return unlikely(tif_need_resched()); -} - -static inline void __current_clr_polling(void) -{ - clear_thread_flag(TIF_POLLING_NRFLAG); -} - -static inline bool __must_check current_clr_polling_and_test(void) -{ - __current_clr_polling(); - - /* - * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_curr() - */ - smp_mb__after_atomic(); - - return unlikely(tif_need_resched()); -} - -#else -static inline int tsk_is_polling(struct task_struct *p) { return 0; } -static inline void __current_set_polling(void) { } -static inline void __current_clr_polling(void) { } - -static inline bool __must_check current_set_polling_and_test(void) -{ - return unlikely(tif_need_resched()); -} -static inline bool __must_check current_clr_polling_and_test(void) -{ - return unlikely(tif_need_resched()); -} -#endif - -static inline void current_clr_polling(void) -{ - __current_clr_polling(); - - /* - * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. - * Once the bit is cleared, we'll get IPIs with every new - * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also - * fold. - */ - smp_mb(); /* paired with resched_curr() */ - - preempt_fold_need_resched(); -} - static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); } /* - * Thread group CPU time accounting. - */ -void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); - -/* - * Reevaluate whether the task has signals pending delivery. - * Wake the task if so. - * This is required every time the blocked sigset_t changes. - * callers must hold sighand->siglock. - */ -extern void recalc_sigpending_and_wake(struct task_struct *t); -extern void recalc_sigpending(void); - -extern void signal_wake_up_state(struct task_struct *t, unsigned int state); - -static inline void signal_wake_up(struct task_struct *t, bool resume) -{ - signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); -} -static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) -{ - signal_wake_up_state(t, resume ? __TASK_TRACED : 0); -} - -/* * Wrappers for p->thread_info->cpu access. No-op on UP. */ #ifdef CONFIG_SMP @@ -3524,11 +1530,6 @@ static inline unsigned int task_cpu(const struct task_struct *p) #endif } -static inline int task_node(const struct task_struct *p) -{ - return cpu_to_node(task_cpu(p)); -} - extern void set_task_cpu(struct task_struct *p, unsigned int cpu); #else @@ -3559,100 +1560,8 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); -#ifdef CONFIG_CGROUP_SCHED -extern struct task_group root_task_group; -#endif /* CONFIG_CGROUP_SCHED */ - -extern int task_can_switch_user(struct user_struct *up, - struct task_struct *tsk); - -#ifdef CONFIG_TASK_XACCT -static inline void add_rchar(struct task_struct *tsk, ssize_t amt) -{ - tsk->ioac.rchar += amt; -} - -static inline void add_wchar(struct task_struct *tsk, ssize_t amt) -{ - tsk->ioac.wchar += amt; -} - -static inline void inc_syscr(struct task_struct *tsk) -{ - tsk->ioac.syscr++; -} - -static inline void inc_syscw(struct task_struct *tsk) -{ - tsk->ioac.syscw++; -} -#else -static inline void add_rchar(struct task_struct *tsk, ssize_t amt) -{ -} - -static inline void add_wchar(struct task_struct *tsk, ssize_t amt) -{ -} - -static inline void inc_syscr(struct task_struct *tsk) -{ -} - -static inline void inc_syscw(struct task_struct *tsk) -{ -} -#endif - #ifndef TASK_SIZE_OF #define TASK_SIZE_OF(tsk) TASK_SIZE #endif -#ifdef CONFIG_MEMCG -extern void mm_update_next_owner(struct mm_struct *mm); -#else -static inline void mm_update_next_owner(struct mm_struct *mm) -{ -} -#endif /* CONFIG_MEMCG */ - -static inline unsigned long task_rlimit(const struct task_struct *tsk, - unsigned int limit) -{ - return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); -} - -static inline unsigned long task_rlimit_max(const struct task_struct *tsk, - unsigned int limit) -{ - return READ_ONCE(tsk->signal->rlim[limit].rlim_max); -} - -static inline unsigned long rlimit(unsigned int limit) -{ - return task_rlimit(current, limit); -} - -static inline unsigned long rlimit_max(unsigned int limit) -{ - return task_rlimit_max(current, limit); -} - -#define SCHED_CPUFREQ_RT (1U << 0) -#define SCHED_CPUFREQ_DL (1U << 1) -#define SCHED_CPUFREQ_IOWAIT (1U << 2) - -#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) - -#ifdef CONFIG_CPU_FREQ -struct update_util_data { - void (*func)(struct update_util_data *data, u64 time, unsigned int flags); -}; - -void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, - void (*func)(struct update_util_data *data, u64 time, - unsigned int flags)); -void cpufreq_remove_update_util_hook(int cpu); -#endif /* CONFIG_CPU_FREQ */ - #endif diff --git a/include/linux/sched/autogroup.h b/include/linux/sched/autogroup.h new file mode 100644 index 000000000000..55cd496df884 --- /dev/null +++ b/include/linux/sched/autogroup.h @@ -0,0 +1,31 @@ +#ifndef _LINUX_SCHED_AUTOGROUP_H +#define _LINUX_SCHED_AUTOGROUP_H + +struct signal_struct; +struct task_struct; +struct task_group; +struct seq_file; + +#ifdef CONFIG_SCHED_AUTOGROUP +extern void sched_autogroup_create_attach(struct task_struct *p); +extern void sched_autogroup_detach(struct task_struct *p); +extern void sched_autogroup_fork(struct signal_struct *sig); +extern void sched_autogroup_exit(struct signal_struct *sig); +extern void sched_autogroup_exit_task(struct task_struct *p); +#ifdef CONFIG_PROC_FS +extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); +extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); +#endif +#else +static inline void sched_autogroup_create_attach(struct task_struct *p) { } +static inline void sched_autogroup_detach(struct task_struct *p) { } +static inline void sched_autogroup_fork(struct signal_struct *sig) { } +static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_exit_task(struct task_struct *p) { } +#endif + +#ifdef CONFIG_CGROUP_SCHED +extern struct task_group root_task_group; +#endif /* CONFIG_CGROUP_SCHED */ + +#endif /* _LINUX_SCHED_AUTOGROUP_H */ diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h new file mode 100644 index 000000000000..4a68c6791207 --- /dev/null +++ b/include/linux/sched/clock.h @@ -0,0 +1,104 @@ +#ifndef _LINUX_SCHED_CLOCK_H +#define _LINUX_SCHED_CLOCK_H + +#include <linux/smp.h> + +/* + * Do not use outside of architecture code which knows its limitations. + * + * sched_clock() has no promise of monotonicity or bounded drift between + * CPUs, use (which you should not) requires disabling IRQs. + * + * Please use one of the three interfaces below. + */ +extern unsigned long long notrace sched_clock(void); + +/* + * See the comment in kernel/sched/clock.c + */ +extern u64 running_clock(void); +extern u64 sched_clock_cpu(int cpu); + + +extern void sched_clock_init(void); + +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +static inline void sched_clock_init_late(void) +{ +} + +static inline void sched_clock_tick(void) +{ +} + +static inline void clear_sched_clock_stable(void) +{ +} + +static inline void sched_clock_idle_sleep_event(void) +{ +} + +static inline void sched_clock_idle_wakeup_event(u64 delta_ns) +{ +} + +static inline u64 cpu_clock(int cpu) +{ + return sched_clock(); +} + +static inline u64 local_clock(void) +{ + return sched_clock(); +} +#else +extern void sched_clock_init_late(void); +/* + * Architectures can set this to 1 if they have specified + * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, + * but then during bootup it turns out that sched_clock() + * is reliable after all: + */ +extern int sched_clock_stable(void); +extern void clear_sched_clock_stable(void); + +extern void sched_clock_tick(void); +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(u64 delta_ns); + +/* + * As outlined in clock.c, provides a fast, high resolution, nanosecond + * time source that is monotonic per cpu argument and has bounded drift + * between cpus. + * + * ######################### BIG FAT WARNING ########################## + * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # + * # go backwards !! # + * #################################################################### + */ +static inline u64 cpu_clock(int cpu) +{ + return sched_clock_cpu(cpu); +} + +static inline u64 local_clock(void) +{ + return sched_clock_cpu(raw_smp_processor_id()); +} +#endif + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * An i/f to runtime opt-in for irq time accounting based off of sched_clock. + * The reason for this explicit opt-in is not to have perf penalty with + * slow sched_clocks. + */ +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +#else +static inline void enable_sched_clock_irqtime(void) {} +static inline void disable_sched_clock_irqtime(void) {} +#endif + +#endif /* _LINUX_SCHED_CLOCK_H */ diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h new file mode 100644 index 000000000000..69eedcef8f03 --- /dev/null +++ b/include/linux/sched/coredump.h @@ -0,0 +1,74 @@ +#ifndef _LINUX_SCHED_COREDUMP_H +#define _LINUX_SCHED_COREDUMP_H + +#include <linux/mm_types.h> + +#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ +#define SUID_DUMP_USER 1 /* Dump as user of process */ +#define SUID_DUMP_ROOT 2 /* Dump as root */ + +/* mm flags */ + +/* for SUID_DUMP_* above */ +#define MMF_DUMPABLE_BITS 2 +#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) + +extern void set_dumpable(struct mm_struct *mm, int value); +/* + * This returns the actual value of the suid_dumpable flag. For things + * that are using this for checking for privilege transitions, it must + * test against SUID_DUMP_USER rather than treating it as a boolean + * value. + */ +static inline int __get_dumpable(unsigned long mm_flags) +{ + return mm_flags & MMF_DUMPABLE_MASK; +} + +static inline int get_dumpable(struct mm_struct *mm) +{ + return __get_dumpable(mm->flags); +} + +/* coredump filter bits */ +#define MMF_DUMP_ANON_PRIVATE 2 +#define MMF_DUMP_ANON_SHARED 3 +#define MMF_DUMP_MAPPED_PRIVATE 4 +#define MMF_DUMP_MAPPED_SHARED 5 +#define MMF_DUMP_ELF_HEADERS 6 +#define MMF_DUMP_HUGETLB_PRIVATE 7 +#define MMF_DUMP_HUGETLB_SHARED 8 +#define MMF_DUMP_DAX_PRIVATE 9 +#define MMF_DUMP_DAX_SHARED 10 + +#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS +#define MMF_DUMP_FILTER_BITS 9 +#define MMF_DUMP_FILTER_MASK \ + (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) +#define MMF_DUMP_FILTER_DEFAULT \ + ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ + (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) + +#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS +# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) +#else +# define MMF_DUMP_MASK_DEFAULT_ELF 0 +#endif + /* leave room for more dump flags */ +#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ +#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ +/* + * This one-shot flag is dropped due to necessity of changing exe once again + * on NFS restore + */ +//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ + +#define MMF_HAS_UPROBES 19 /* has uprobes */ +#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ +#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ +#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ +#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) + +#endif /* _LINUX_SCHED_COREDUMP_H */ diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h new file mode 100644 index 000000000000..d2be2ccbb372 --- /dev/null +++ b/include/linux/sched/cpufreq.h @@ -0,0 +1,27 @@ +#ifndef _LINUX_SCHED_CPUFREQ_H +#define _LINUX_SCHED_CPUFREQ_H + +#include <linux/types.h> + +/* + * Interface between cpufreq drivers and the scheduler: + */ + +#define SCHED_CPUFREQ_RT (1U << 0) +#define SCHED_CPUFREQ_DL (1U << 1) +#define SCHED_CPUFREQ_IOWAIT (1U << 2) + +#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) + +#ifdef CONFIG_CPU_FREQ +struct update_util_data { + void (*func)(struct update_util_data *data, u64 time, unsigned int flags); +}; + +void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, + void (*func)(struct update_util_data *data, u64 time, + unsigned int flags)); +void cpufreq_remove_update_util_hook(int cpu); +#endif /* CONFIG_CPU_FREQ */ + +#endif /* _LINUX_SCHED_CPUFREQ_H */ diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h new file mode 100644 index 000000000000..4c5b9735c1ae --- /dev/null +++ b/include/linux/sched/cputime.h @@ -0,0 +1,187 @@ +#ifndef _LINUX_SCHED_CPUTIME_H +#define _LINUX_SCHED_CPUTIME_H + +#include <linux/sched/signal.h> + +/* + * cputime accounting APIs: + */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +#include <asm/cputime.h> + +#ifndef cputime_to_nsecs +# define cputime_to_nsecs(__ct) \ + (cputime_to_usecs(__ct) * NSEC_PER_USEC) +#endif +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +extern void task_cputime(struct task_struct *t, + u64 *utime, u64 *stime); +extern u64 task_gtime(struct task_struct *t); +#else +static inline void task_cputime(struct task_struct *t, + u64 *utime, u64 *stime) +{ + *utime = t->utime; + *stime = t->stime; +} + +static inline u64 task_gtime(struct task_struct *t) +{ + return t->gtime; +} +#endif + +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME +static inline void task_cputime_scaled(struct task_struct *t, + u64 *utimescaled, + u64 *stimescaled) +{ + *utimescaled = t->utimescaled; + *stimescaled = t->stimescaled; +} +#else +static inline void task_cputime_scaled(struct task_struct *t, + u64 *utimescaled, + u64 *stimescaled) +{ + task_cputime(t, utimescaled, stimescaled); +} +#endif + +extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); +extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); + + +/* + * Thread group CPU time accounting. + */ +void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); + + +/* + * The following are functions that support scheduler-internal time accounting. + * These functions are generally called at the timer tick. None of this depends + * on CONFIG_SCHEDSTATS. + */ + +/** + * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running + * + * @tsk: Pointer to target task. + */ +#ifdef CONFIG_POSIX_TIMERS +static inline +struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) +{ + struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; + + /* Check if cputimer isn't running. This is accessed without locking. */ + if (!READ_ONCE(cputimer->running)) + return NULL; + + /* + * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime + * in __exit_signal(), we won't account to the signal struct further + * cputime consumed by that task, even though the task can still be + * ticking after __exit_signal(). + * + * In order to keep a consistent behaviour between thread group cputime + * and thread group cputimer accounting, lets also ignore the cputime + * elapsing after __exit_signal() in any thread group timer running. + * + * This makes sure that POSIX CPU clocks and timers are synchronized, so + * that a POSIX CPU timer won't expire while the corresponding POSIX CPU + * clock delta is behind the expiring timer value. + */ + if (unlikely(!tsk->sighand)) + return NULL; + + return cputimer; +} +#else +static inline +struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) +{ + return NULL; +} +#endif + +/** + * account_group_user_time - Maintain utime for a thread group. + * + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the utime field of the + * thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the utime field there. + */ +static inline void account_group_user_time(struct task_struct *tsk, + u64 cputime) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(cputime, &cputimer->cputime_atomic.utime); +} + +/** + * account_group_system_time - Maintain stime for a thread group. + * + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the stime field of the + * thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the stime field there. + */ +static inline void account_group_system_time(struct task_struct *tsk, + u64 cputime) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(cputime, &cputimer->cputime_atomic.stime); +} + +/** + * account_group_exec_runtime - Maintain exec runtime for a thread group. + * + * @tsk: Pointer to task structure. + * @ns: Time value by which to increment the sum_exec_runtime field + * of the thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the sum_exec_runtime field there. + */ +static inline void account_group_exec_runtime(struct task_struct *tsk, + unsigned long long ns) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); +} + +static inline void prev_cputime_init(struct prev_cputime *prev) +{ +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + prev->utime = prev->stime = 0; + raw_spin_lock_init(&prev->lock); +#endif +} + +extern unsigned long long +task_sched_runtime(struct task_struct *task); + +#endif /* _LINUX_SCHED_CPUTIME_H */ diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 9089a2ae913d..975be862e083 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -1,5 +1,7 @@ -#ifndef _SCHED_DEADLINE_H -#define _SCHED_DEADLINE_H +#ifndef _LINUX_SCHED_DEADLINE_H +#define _LINUX_SCHED_DEADLINE_H + +#include <linux/sched.h> /* * SCHED_DEADLINE tasks has negative priorities, reflecting @@ -26,4 +28,4 @@ static inline bool dl_time_before(u64 a, u64 b) return (s64)(a - b) < 0; } -#endif /* _SCHED_DEADLINE_H */ +#endif /* _LINUX_SCHED_DEADLINE_H */ diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h new file mode 100644 index 000000000000..e0eaee54c5a4 --- /dev/null +++ b/include/linux/sched/debug.h @@ -0,0 +1,50 @@ +#ifndef _LINUX_SCHED_DEBUG_H +#define _LINUX_SCHED_DEBUG_H + +/* + * Various scheduler/task debugging interfaces: + */ + +struct task_struct; + +extern void dump_cpu_task(int cpu); + +/* + * Only dump TASK_* tasks. (0 for all tasks) + */ +extern void show_state_filter(unsigned long state_filter); + +static inline void show_state(void) +{ + show_state_filter(0); +} + +struct pt_regs; + +extern void show_regs(struct pt_regs *); + +/* + * TASK is a pointer to the task whose backtrace we want to see (or NULL for current + * task), SP is the stack pointer of the first frame that should be shown in the back + * trace (or NULL if the entire call-chain of the task should be shown). + */ +extern void show_stack(struct task_struct *task, unsigned long *sp); + +extern void sched_show_task(struct task_struct *p); + +#ifdef CONFIG_SCHED_DEBUG +struct seq_file; +extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); +extern void proc_sched_set_task(struct task_struct *p); +#endif + +/* Attach to any functions which should be ignored in wchan output. */ +#define __sched __attribute__((__section__(".sched.text"))) + +/* Linker adds these: start and end of __sched functions */ +extern char __sched_text_start[], __sched_text_end[]; + +/* Is this address in the __sched functions? */ +extern int in_sched_functions(unsigned long addr); + +#endif /* _LINUX_SCHED_DEBUG_H */ diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h new file mode 100644 index 000000000000..752ac7e628d7 --- /dev/null +++ b/include/linux/sched/hotplug.h @@ -0,0 +1,24 @@ +#ifndef _LINUX_SCHED_HOTPLUG_H +#define _LINUX_SCHED_HOTPLUG_H + +/* + * Scheduler interfaces for hotplug CPU support: + */ + +extern int sched_cpu_starting(unsigned int cpu); +extern int sched_cpu_activate(unsigned int cpu); +extern int sched_cpu_deactivate(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU +extern int sched_cpu_dying(unsigned int cpu); +#else +# define sched_cpu_dying NULL +#endif + +#ifdef CONFIG_HOTPLUG_CPU +extern void idle_task_exit(void); +#else +static inline void idle_task_exit(void) {} +#endif + +#endif /* _LINUX_SCHED_HOTPLUG_H */ diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h new file mode 100644 index 000000000000..5ca63ebad6b4 --- /dev/null +++ b/include/linux/sched/idle.h @@ -0,0 +1,86 @@ +#ifndef _LINUX_SCHED_IDLE_H +#define _LINUX_SCHED_IDLE_H + +#include <linux/sched.h> + +enum cpu_idle_type { + CPU_IDLE, + CPU_NOT_IDLE, + CPU_NEWLY_IDLE, + CPU_MAX_IDLE_TYPES +}; + +extern void wake_up_if_idle(int cpu); + +/* + * Idle thread specific functions to determine the need_resched + * polling state. + */ +#ifdef TIF_POLLING_NRFLAG + +static inline void __current_set_polling(void) +{ + set_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_set_polling_and_test(void) +{ + __current_set_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + +static inline void __current_clr_polling(void) +{ + clear_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_clr_polling_and_test(void) +{ + __current_clr_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + +#else +static inline void __current_set_polling(void) { } +static inline void __current_clr_polling(void) { } + +static inline bool __must_check current_set_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} +static inline bool __must_check current_clr_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} +#endif + +static inline void current_clr_polling(void) +{ + __current_clr_polling(); + + /* + * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. + * Once the bit is cleared, we'll get IPIs with every new + * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also + * fold. + */ + smp_mb(); /* paired with resched_curr() */ + + preempt_fold_need_resched(); +} + +#endif /* _LINUX_SCHED_IDLE_H */ diff --git a/include/linux/sched/init.h b/include/linux/sched/init.h new file mode 100644 index 000000000000..127215045285 --- /dev/null +++ b/include/linux/sched/init.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_SCHED_INIT_H +#define _LINUX_SCHED_INIT_H + +/* + * Scheduler init related prototypes: + */ + +extern void sched_init(void); +extern void sched_init_smp(void); + +#endif /* _LINUX_SCHED_INIT_H */ diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h new file mode 100644 index 000000000000..016afa0fb3bb --- /dev/null +++ b/include/linux/sched/jobctl.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_SCHED_JOBCTL_H +#define _LINUX_SCHED_JOBCTL_H + +#include <linux/types.h> + +struct task_struct; + +/* + * task->jobctl flags + */ +#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ + +#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ +#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ +#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ +#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ +#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ +#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ +#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ + +#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) +#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) +#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) +#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) +#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) +#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) +#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) + +#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) + +extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); +extern void task_clear_jobctl_trapping(struct task_struct *task); +extern void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask); + +#endif /* _LINUX_SCHED_JOBCTL_H */ diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h new file mode 100644 index 000000000000..4264bc6b2c27 --- /dev/null +++ b/include/linux/sched/loadavg.h @@ -0,0 +1,31 @@ +#ifndef _LINUX_SCHED_LOADAVG_H +#define _LINUX_SCHED_LOADAVG_H + +/* + * These are the constant used to fake the fixed-point load-average + * counting. Some notes: + * - 11 bit fractions expand to 22 bits by the multiplies: this gives + * a load-average precision of 10 bits integer + 11 bits fractional + * - if you want to count load-averages more often, you need more + * precision, or rounding will get you. With 2-second counting freq, + * the EXP_n values would be 1981, 2034 and 2043 if still using only + * 11 bit fractions. + */ +extern unsigned long avenrun[]; /* Load averages */ +extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); + +#define FSHIFT 11 /* nr of bits of precision */ +#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ +#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ +#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ +#define EXP_5 2014 /* 1/exp(5sec/5min) */ +#define EXP_15 2037 /* 1/exp(5sec/15min) */ + +#define CALC_LOAD(load,exp,n) \ + load *= exp; \ + load += n*(FIXED_1-exp); \ + load >>= FSHIFT; + +extern void calc_global_load(unsigned long ticks); + +#endif /* _LINUX_SCHED_LOADAVG_H */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h new file mode 100644 index 000000000000..830953ebb391 --- /dev/null +++ b/include/linux/sched/mm.h @@ -0,0 +1,174 @@ +#ifndef _LINUX_SCHED_MM_H +#define _LINUX_SCHED_MM_H + +#include <linux/kernel.h> +#include <linux/atomic.h> +#include <linux/sched.h> +#include <linux/mm_types.h> +#include <linux/gfp.h> + +/* + * Routines for handling mm_structs + */ +extern struct mm_struct * mm_alloc(void); + +/** + * mmgrab() - Pin a &struct mm_struct. + * @mm: The &struct mm_struct to pin. + * + * Make sure that @mm will not get freed even after the owning task + * exits. This doesn't guarantee that the associated address space + * will still exist later on and mmget_not_zero() has to be used before + * accessing it. + * + * This is a preferred way to to pin @mm for a longer/unbounded amount + * of time. + * + * Use mmdrop() to release the reference acquired by mmgrab(). + * + * See also <Documentation/vm/active_mm.txt> for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmgrab(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_count); +} + +/* mmdrop drops the mm and the page tables */ +extern void __mmdrop(struct mm_struct *); +static inline void mmdrop(struct mm_struct *mm) +{ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); +} + +static inline void mmdrop_async_fn(struct work_struct *work) +{ + struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); + __mmdrop(mm); +} + +static inline void mmdrop_async(struct mm_struct *mm) +{ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) { + INIT_WORK(&mm->async_put_work, mmdrop_async_fn); + schedule_work(&mm->async_put_work); + } +} + +/** + * mmget() - Pin the address space associated with a &struct mm_struct. + * @mm: The address space to pin. + * + * Make sure that the address space of the given &struct mm_struct doesn't + * go away. This does not protect against parts of the address space being + * modified or freed, however. + * + * Never use this function to pin this address space for an + * unbounded/indefinite amount of time. + * + * Use mmput() to release the reference acquired by mmget(). + * + * See also <Documentation/vm/active_mm.txt> for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmget(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_users); +} + +static inline bool mmget_not_zero(struct mm_struct *mm) +{ + return atomic_inc_not_zero(&mm->mm_users); +} + +/* mmput gets rid of the mappings and all user-space */ +extern void mmput(struct mm_struct *); +#ifdef CONFIG_MMU +/* same as above but performs the slow path from the async context. Can + * be called from the atomic context as well + */ +extern void mmput_async(struct mm_struct *); +#endif + +/* Grab a reference to a task's mm, if it is not already going away */ +extern struct mm_struct *get_task_mm(struct task_struct *task); +/* + * Grab a reference to a task's mm, if it is not already going away + * and ptrace_may_access with the mode parameter passed to it + * succeeds. + */ +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); +/* Remove the current tasks stale references to the old mm_struct */ +extern void mm_release(struct task_struct *, struct mm_struct *); + +#ifdef CONFIG_MEMCG +extern void mm_update_next_owner(struct mm_struct *mm); +#else +static inline void mm_update_next_owner(struct mm_struct *mm) +{ +} +#endif /* CONFIG_MEMCG */ + +#ifdef CONFIG_MMU +extern void arch_pick_mmap_layout(struct mm_struct *mm); +extern unsigned long +arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +extern unsigned long +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#else +static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} +#endif + +static inline bool in_vfork(struct task_struct *tsk) +{ + bool ret; + + /* + * need RCU to access ->real_parent if CLONE_VM was used along with + * CLONE_PARENT. + * + * We check real_parent->mm == tsk->mm because CLONE_VFORK does not + * imply CLONE_VM + * + * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus + * ->real_parent is not necessarily the task doing vfork(), so in + * theory we can't rely on task_lock() if we want to dereference it. + * + * And in this case we can't trust the real_parent->mm == tsk->mm + * check, it can be false negative. But we do not care, if init or + * another oom-unkillable task does this it should blame itself. + */ + rcu_read_lock(); + ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; + rcu_read_unlock(); + + return ret; +} + +/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags + * __GFP_FS is also cleared as it implies __GFP_IO. + */ +static inline gfp_t memalloc_noio_flags(gfp_t flags) +{ + if (unlikely(current->flags & PF_MEMALLOC_NOIO)) + flags &= ~(__GFP_IO | __GFP_FS); + return flags; +} + +static inline unsigned int memalloc_noio_save(void) +{ + unsigned int flags = current->flags & PF_MEMALLOC_NOIO; + current->flags |= PF_MEMALLOC_NOIO; + return flags; +} + +static inline void memalloc_noio_restore(unsigned int flags) +{ + current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; +} + +#endif /* _LINUX_SCHED_MM_H */ diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h new file mode 100644 index 000000000000..4995b717500b --- /dev/null +++ b/include/linux/sched/nohz.h @@ -0,0 +1,43 @@ +#ifndef _LINUX_SCHED_NOHZ_H +#define _LINUX_SCHED_NOHZ_H + +/* + * This is the interface between the scheduler and nohz/dyntics: + */ + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void cpu_load_update_nohz_start(void); +extern void cpu_load_update_nohz_stop(void); +#else +static inline void cpu_load_update_nohz_start(void) { } +static inline void cpu_load_update_nohz_stop(void) { } +#endif + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void nohz_balance_enter_idle(int cpu); +extern void set_cpu_sd_state_idle(void); +extern int get_nohz_timer_target(void); +#else +static inline void nohz_balance_enter_idle(int cpu) { } +static inline void set_cpu_sd_state_idle(void) { } +#endif + +#ifdef CONFIG_NO_HZ_COMMON +void calc_load_enter_idle(void); +void calc_load_exit_idle(void); +#else +static inline void calc_load_enter_idle(void) { } +static inline void calc_load_exit_idle(void) { } +#endif /* CONFIG_NO_HZ_COMMON */ + +#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) +extern void wake_up_nohz_cpu(int cpu); +#else +static inline void wake_up_nohz_cpu(int cpu) { } +#endif + +#ifdef CONFIG_NO_HZ_FULL +extern u64 scheduler_tick_max_deferment(void); +#endif + +#endif /* _LINUX_SCHED_NOHZ_H */ diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h new file mode 100644 index 000000000000..35d5fc77b4be --- /dev/null +++ b/include/linux/sched/numa_balancing.h @@ -0,0 +1,46 @@ +#ifndef _LINUX_SCHED_NUMA_BALANCING_H +#define _LINUX_SCHED_NUMA_BALANCING_H + +/* + * This is the interface between the scheduler and the MM that + * implements memory access pattern based NUMA-balancing: + */ + +#include <linux/sched.h> + +#define TNF_MIGRATED 0x01 +#define TNF_NO_GROUP 0x02 +#define TNF_SHARED 0x04 +#define TNF_FAULT_LOCAL 0x08 +#define TNF_MIGRATE_FAIL 0x10 + +#ifdef CONFIG_NUMA_BALANCING +extern void task_numa_fault(int last_node, int node, int pages, int flags); +extern pid_t task_numa_group_id(struct task_struct *p); +extern void set_numabalancing_state(bool enabled); +extern void task_numa_free(struct task_struct *p); +extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, + int src_nid, int dst_cpu); +#else +static inline void task_numa_fault(int last_node, int node, int pages, + int flags) +{ +} +static inline pid_t task_numa_group_id(struct task_struct *p) +{ + return 0; +} +static inline void set_numabalancing_state(bool enabled) +{ +} +static inline void task_numa_free(struct task_struct *p) +{ +} +static inline bool should_numa_migrate_memory(struct task_struct *p, + struct page *page, int src_nid, int dst_cpu) +{ + return true; +} +#endif + +#endif /* _LINUX_SCHED_NUMA_BALANCING_H */ diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h index d9cf5a5762d9..2cc450f6ec54 100644 --- a/include/linux/sched/prio.h +++ b/include/linux/sched/prio.h @@ -1,5 +1,5 @@ -#ifndef _SCHED_PRIO_H -#define _SCHED_PRIO_H +#ifndef _LINUX_SCHED_PRIO_H +#define _LINUX_SCHED_PRIO_H #define MAX_NICE 19 #define MIN_NICE -20 @@ -57,4 +57,4 @@ static inline long rlimit_to_nice(long prio) return (MAX_NICE - prio + 1); } -#endif /* _SCHED_PRIO_H */ +#endif /* _LINUX_SCHED_PRIO_H */ diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index a30b172df6e1..3bd668414f61 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -1,7 +1,9 @@ -#ifndef _SCHED_RT_H -#define _SCHED_RT_H +#ifndef _LINUX_SCHED_RT_H +#define _LINUX_SCHED_RT_H -#include <linux/sched/prio.h> +#include <linux/sched.h> + +struct task_struct; static inline int rt_prio(int prio) { @@ -57,4 +59,4 @@ extern void normalize_rt_tasks(void); */ #define RR_TIMESLICE (100 * HZ / 1000) -#endif /* _SCHED_RT_H */ +#endif /* _LINUX_SCHED_RT_H */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h new file mode 100644 index 000000000000..2cf446704cd4 --- /dev/null +++ b/include/linux/sched/signal.h @@ -0,0 +1,613 @@ +#ifndef _LINUX_SCHED_SIGNAL_H +#define _LINUX_SCHED_SIGNAL_H + +#include <linux/rculist.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/sched/jobctl.h> +#include <linux/sched/task.h> +#include <linux/cred.h> + +/* + * Types defining task->signal and task->sighand and APIs using them: + */ + +struct sighand_struct { + atomic_t count; + struct k_sigaction action[_NSIG]; + spinlock_t siglock; + wait_queue_head_t signalfd_wqh; +}; + +/* + * Per-process accounting stats: + */ +struct pacct_struct { + int ac_flag; + long ac_exitcode; + unsigned long ac_mem; + u64 ac_utime, ac_stime; + unsigned long ac_minflt, ac_majflt; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +/* + * This is the atomic variant of task_cputime, which can be used for + * storing and updating task_cputime statistics without locking. + */ +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +#define INIT_CPUTIME_ATOMIC \ + (struct task_cputime_atomic) { \ + .utime = ATOMIC64_INIT(0), \ + .stime = ATOMIC64_INIT(0), \ + .sum_exec_runtime = ATOMIC64_INIT(0), \ + } +/** + * struct thread_group_cputimer - thread group interval timer counts + * @cputime_atomic: atomic thread group interval timers. + * @running: true when there are timers running and + * @cputime_atomic receives updates. + * @checking_timer: true when a thread in the group is in the + * process of checking for thread group timers. + * + * This structure contains the version of task_cputime, above, that is + * used for thread group CPU timer calculations. + */ +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; + bool running; + bool checking_timer; +}; + +/* + * NOTE! "signal_struct" does not have its own + * locking, because a shared signal_struct always + * implies a shared sighand_struct, so locking + * sighand_struct is always a proper superset of + * the locking of signal_struct. + */ +struct signal_struct { + atomic_t sigcnt; + atomic_t live; + int nr_threads; + struct list_head thread_head; + + wait_queue_head_t wait_chldexit; /* for wait4() */ + + /* current thread group signal load-balancing target: */ + struct task_struct *curr_target; + + /* shared signal handling: */ + struct sigpending shared_pending; + + /* thread group exit support */ + int group_exit_code; + /* overloaded: + * - notify group_exit_task when ->count is equal to notify_count + * - everyone except group_exit_task is stopped during signal delivery + * of fatal signals, group_exit_task processes the signal. + */ + int notify_count; + struct task_struct *group_exit_task; + + /* thread group stop support, overloads group_exit_code too */ + int group_stop_count; + unsigned int flags; /* see SIGNAL_* flags below */ + + /* + * PR_SET_CHILD_SUBREAPER marks a process, like a service + * manager, to re-parent orphan (double-forking) child processes + * to this process instead of 'init'. The service manager is + * able to receive SIGCHLD signals and is able to investigate + * the process until it calls wait(). All children of this + * process will inherit a flag if they should look for a + * child_subreaper process at exit. + */ + unsigned int is_child_subreaper:1; + unsigned int has_child_subreaper:1; + +#ifdef CONFIG_POSIX_TIMERS + + /* POSIX.1b Interval Timers */ + int posix_timer_id; + struct list_head posix_timers; + + /* ITIMER_REAL timer for the process */ + struct hrtimer real_timer; + ktime_t it_real_incr; + + /* + * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use + * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these + * values are defined to 0 and 1 respectively + */ + struct cpu_itimer it[2]; + + /* + * Thread group totals for process CPU timers. + * See thread_group_cputimer(), et al, for details. + */ + struct thread_group_cputimer cputimer; + + /* Earliest-expiration cache. */ + struct task_cputime cputime_expires; + + struct list_head cpu_timers[3]; + +#endif + + struct pid *leader_pid; + +#ifdef CONFIG_NO_HZ_FULL + atomic_t tick_dep_mask; +#endif + + struct pid *tty_old_pgrp; + + /* boolean value for session group leader */ + int leader; + + struct tty_struct *tty; /* NULL if no tty */ + +#ifdef CONFIG_SCHED_AUTOGROUP + struct autogroup *autogroup; +#endif + /* + * Cumulative resource counters for dead threads in the group, + * and for reaped dead child processes forked by this group. + * Live threads maintain their own counters and add to these + * in __exit_signal, except for the group leader. + */ + seqlock_t stats_lock; + u64 utime, stime, cutime, cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; + unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; + unsigned long inblock, oublock, cinblock, coublock; + unsigned long maxrss, cmaxrss; + struct task_io_accounting ioac; + + /* + * Cumulative ns of schedule CPU time fo dead threads in the + * group, not including a zombie group leader, (This only differs + * from jiffies_to_ns(utime + stime) if sched_clock uses something + * other than jiffies.) + */ + unsigned long long sum_sched_runtime; + + /* + * We don't bother to synchronize most readers of this at all, + * because there is no reader checking a limit that actually needs + * to get both rlim_cur and rlim_max atomically, and either one + * alone is a single word that can safely be read normally. + * getrlimit/setrlimit use task_lock(current->group_leader) to + * protect this instead of the siglock, because they really + * have no need to disable irqs. + */ + struct rlimit rlim[RLIM_NLIMITS]; + +#ifdef CONFIG_BSD_PROCESS_ACCT + struct pacct_struct pacct; /* per-process accounting information */ +#endif +#ifdef CONFIG_TASKSTATS + struct taskstats *stats; +#endif +#ifdef CONFIG_AUDIT + unsigned audit_tty; + struct tty_audit_buf *tty_audit_buf; +#endif + + /* + * Thread is the potential origin of an oom condition; kill first on + * oom + */ + bool oom_flag_origin; + short oom_score_adj; /* OOM kill score adjustment */ + short oom_score_adj_min; /* OOM kill score adjustment min value. + * Only settable by CAP_SYS_RESOURCE. */ + struct mm_struct *oom_mm; /* recorded mm when the thread group got + * killed by the oom killer */ + + struct mutex cred_guard_mutex; /* guard against foreign influences on + * credential calculations + * (notably. ptrace) */ +}; + +/* + * Bits in flags field of signal_struct. + */ +#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ +#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ +#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ +#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ +/* + * Pending notifications to parent. + */ +#define SIGNAL_CLD_STOPPED 0x00000010 +#define SIGNAL_CLD_CONTINUED 0x00000020 +#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) + +#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ + +#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ + SIGNAL_STOP_CONTINUED) + +static inline void signal_set_stop_flags(struct signal_struct *sig, + unsigned int flags) +{ + WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); + sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; +} + +/* If true, all threads except ->group_exit_task have pending SIGKILL */ +static inline int signal_group_exit(const struct signal_struct *sig) +{ + return (sig->flags & SIGNAL_GROUP_EXIT) || + (sig->group_exit_task != NULL); +} + +extern void flush_signals(struct task_struct *); +extern void ignore_signals(struct task_struct *); +extern void flush_signal_handlers(struct task_struct *, int force_default); +extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); + +static inline int kernel_dequeue_signal(siginfo_t *info) +{ + struct task_struct *tsk = current; + siginfo_t __info; + int ret; + + spin_lock_irq(&tsk->sighand->siglock); + ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); + spin_unlock_irq(&tsk->sighand->siglock); + + return ret; +} + +static inline void kernel_signal_stop(void) +{ + spin_lock_irq(¤t->sighand->siglock); + if (current->jobctl & JOBCTL_STOP_DEQUEUED) + __set_current_state(TASK_STOPPED); + spin_unlock_irq(¤t->sighand->siglock); + + schedule(); +} +extern int send_sig_info(int, struct siginfo *, struct task_struct *); +extern int force_sigsegv(int, struct task_struct *); +extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); +extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); +extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, + const struct cred *, u32); +extern int kill_pgrp(struct pid *pid, int sig, int priv); +extern int kill_pid(struct pid *pid, int sig, int priv); +extern int kill_proc_info(int, struct siginfo *, pid_t); +extern __must_check bool do_notify_parent(struct task_struct *, int); +extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); +extern void force_sig(int, struct task_struct *); +extern int send_sig(int, struct task_struct *, int); +extern int zap_other_threads(struct task_struct *p); +extern struct sigqueue *sigqueue_alloc(void); +extern void sigqueue_free(struct sigqueue *); +extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); +extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); + +static inline int restart_syscall(void) +{ + set_tsk_thread_flag(current, TIF_SIGPENDING); + return -ERESTARTNOINTR; +} + +static inline int signal_pending(struct task_struct *p) +{ + return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); +} + +static inline int __fatal_signal_pending(struct task_struct *p) +{ + return unlikely(sigismember(&p->pending.signal, SIGKILL)); +} + +static inline int fatal_signal_pending(struct task_struct *p) +{ + return signal_pending(p) && __fatal_signal_pending(p); +} + +static inline int signal_pending_state(long state, struct task_struct *p) +{ + if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) + return 0; + if (!signal_pending(p)) + return 0; + + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); +} + +/* + * Reevaluate whether the task has signals pending delivery. + * Wake the task if so. + * This is required every time the blocked sigset_t changes. + * callers must hold sighand->siglock. + */ +extern void recalc_sigpending_and_wake(struct task_struct *t); +extern void recalc_sigpending(void); + +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline void signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); +} +static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? __TASK_TRACED : 0); +} + +#ifdef TIF_RESTORE_SIGMASK +/* + * Legacy restore_sigmask accessors. These are inefficient on + * SMP architectures because they require atomic operations. + */ + +/** + * set_restore_sigmask() - make sure saved_sigmask processing gets done + * + * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code + * will run before returning to user mode, to process the flag. For + * all callers, TIF_SIGPENDING is already set or it's no harm to set + * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the + * arch code will notice on return to user mode, in case those bits + * are scarce. We set TIF_SIGPENDING here to ensure that the arch + * signal code always gets run when TIF_RESTORE_SIGMASK is set. + */ +static inline void set_restore_sigmask(void) +{ + set_thread_flag(TIF_RESTORE_SIGMASK); + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + clear_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_restore_sigmask(void) +{ + return test_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_and_clear_restore_sigmask(void) +{ + return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); +} + +#else /* TIF_RESTORE_SIGMASK */ + +/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ +static inline void set_restore_sigmask(void) +{ + current->restore_sigmask = true; + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + current->restore_sigmask = false; +} +static inline bool test_restore_sigmask(void) +{ + return current->restore_sigmask; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + if (!current->restore_sigmask) + return false; + current->restore_sigmask = false; + return true; +} +#endif + +static inline void restore_saved_sigmask(void) +{ + if (test_and_clear_restore_sigmask()) + __set_current_blocked(¤t->saved_sigmask); +} + +static inline sigset_t *sigmask_to_save(void) +{ + sigset_t *res = ¤t->blocked; + if (unlikely(test_restore_sigmask())) + res = ¤t->saved_sigmask; + return res; +} + +static inline int kill_cad_pid(int sig, int priv) +{ + return kill_pid(cad_pid, sig, priv); +} + +/* These can be the second arg to send_sig_info/send_group_sig_info. */ +#define SEND_SIG_NOINFO ((struct siginfo *) 0) +#define SEND_SIG_PRIV ((struct siginfo *) 1) +#define SEND_SIG_FORCED ((struct siginfo *) 2) + +/* + * True if we are on the alternate signal stack. + */ +static inline int on_sig_stack(unsigned long sp) +{ + /* + * If the signal stack is SS_AUTODISARM then, by construction, we + * can't be on the signal stack unless user code deliberately set + * SS_AUTODISARM when we were already on it. + * + * This improves reliability: if user state gets corrupted such that + * the stack pointer points very close to the end of the signal stack, + * then this check will enable the signal to be handled anyway. + */ + if (current->sas_ss_flags & SS_AUTODISARM) + return 0; + +#ifdef CONFIG_STACK_GROWSUP + return sp >= current->sas_ss_sp && + sp - current->sas_ss_sp < current->sas_ss_size; +#else + return sp > current->sas_ss_sp && + sp - current->sas_ss_sp <= current->sas_ss_size; +#endif +} + +static inline int sas_ss_flags(unsigned long sp) +{ + if (!current->sas_ss_size) + return SS_DISABLE; + + return on_sig_stack(sp) ? SS_ONSTACK : 0; +} + +static inline void sas_ss_reset(struct task_struct *p) +{ + p->sas_ss_sp = 0; + p->sas_ss_size = 0; + p->sas_ss_flags = SS_DISABLE; +} + +static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) +{ + if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) +#ifdef CONFIG_STACK_GROWSUP + return current->sas_ss_sp; +#else + return current->sas_ss_sp + current->sas_ss_size; +#endif + return sp; +} + +extern void __cleanup_sighand(struct sighand_struct *); +extern void flush_itimer_signals(void); + +#define tasklist_empty() \ + list_empty(&init_task.tasks) + +#define next_task(p) \ + list_entry_rcu((p)->tasks.next, struct task_struct, tasks) + +#define for_each_process(p) \ + for (p = &init_task ; (p = next_task(p)) != &init_task ; ) + +extern bool current_is_single_threaded(void); + +/* + * Careful: do_each_thread/while_each_thread is a double loop so + * 'break' will not work as expected - use goto instead. + */ +#define do_each_thread(g, t) \ + for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do + +#define while_each_thread(g, t) \ + while ((t = next_thread(t)) != g) + +#define __for_each_thread(signal, t) \ + list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) + +#define for_each_thread(p, t) \ + __for_each_thread((p)->signal, t) + +/* Careful: this is a double loop, 'break' won't work as expected. */ +#define for_each_process_thread(p, t) \ + for_each_process(p) for_each_thread(p, t) + +typedef int (*proc_visitor)(struct task_struct *p, void *data); +void walk_process_tree(struct task_struct *top, proc_visitor, void *); + +static inline int get_nr_threads(struct task_struct *tsk) +{ + return tsk->signal->nr_threads; +} + +static inline bool thread_group_leader(struct task_struct *p) +{ + return p->exit_signal >= 0; +} + +/* Do to the insanities of de_thread it is possible for a process + * to have the pid of the thread group leader without actually being + * the thread group leader. For iteration through the pids in proc + * all we care about is that we have a task with the appropriate + * pid, we don't actually care if we have the right task. + */ +static inline bool has_group_leader_pid(struct task_struct *p) +{ + return task_pid(p) == p->signal->leader_pid; +} + +static inline +bool same_thread_group(struct task_struct *p1, struct task_struct *p2) +{ + return p1->signal == p2->signal; +} + +static inline struct task_struct *next_thread(const struct task_struct *p) +{ + return list_entry_rcu(p->thread_group.next, + struct task_struct, thread_group); +} + +static inline int thread_group_empty(struct task_struct *p) +{ + return list_empty(&p->thread_group); +} + +#define delay_group_leader(p) \ + (thread_group_leader(p) && !thread_group_empty(p)) + +extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, + unsigned long *flags); + +static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + struct sighand_struct *ret; + + ret = __lock_task_sighand(tsk, flags); + (void)__cond_lock(&tsk->sighand->siglock, ret); + return ret; +} + +static inline void unlock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); +} + +static inline unsigned long task_rlimit(const struct task_struct *tsk, + unsigned int limit) +{ + return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); +} + +static inline unsigned long task_rlimit_max(const struct task_struct *tsk, + unsigned int limit) +{ + return READ_ONCE(tsk->signal->rlim[limit].rlim_max); +} + +static inline unsigned long rlimit(unsigned int limit) +{ + return task_rlimit(current, limit); +} + +static inline unsigned long rlimit_max(unsigned int limit) +{ + return task_rlimit_max(current, limit); +} + +#endif /* _LINUX_SCHED_SIGNAL_H */ diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h new file mode 100644 index 000000000000..141b74c53fad --- /dev/null +++ b/include/linux/sched/stat.h @@ -0,0 +1,40 @@ +#ifndef _LINUX_SCHED_STAT_H +#define _LINUX_SCHED_STAT_H + +#include <linux/percpu.h> + +/* + * Various counters maintained by the scheduler and fork(), + * exposed via /proc, sys.c or used by drivers via these APIs. + * + * ( Note that all these values are aquired without locking, + * so they can only be relied on in narrow circumstances. ) + */ + +extern unsigned long total_forks; +extern int nr_threads; +DECLARE_PER_CPU(unsigned long, process_counts); +extern int nr_processes(void); +extern unsigned long nr_running(void); +extern bool single_task_running(void); +extern unsigned long nr_iowait(void); +extern unsigned long nr_iowait_cpu(int cpu); +extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); + +static inline int sched_info_on(void) +{ +#ifdef CONFIG_SCHEDSTATS + return 1; +#elif defined(CONFIG_TASK_DELAY_ACCT) + extern int delayacct_on; + return delayacct_on; +#else + return 0; +#endif +} + +#ifdef CONFIG_SCHEDSTATS +void force_schedstat_enabled(void); +#endif + +#endif /* _LINUX_SCHED_STAT_H */ diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 49308e142aae..0f5ecd4d298e 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -1,5 +1,9 @@ -#ifndef _SCHED_SYSCTL_H -#define _SCHED_SYSCTL_H +#ifndef _LINUX_SCHED_SYSCTL_H +#define _LINUX_SCHED_SYSCTL_H + +#include <linux/types.h> + +struct ctl_table; #ifdef CONFIG_DETECT_HUNG_TASK extern int sysctl_hung_task_check_count; @@ -78,4 +82,4 @@ extern int sysctl_schedstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -#endif /* _SCHED_SYSCTL_H */ +#endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h new file mode 100644 index 000000000000..a978d7189cfd --- /dev/null +++ b/include/linux/sched/task.h @@ -0,0 +1,139 @@ +#ifndef _LINUX_SCHED_TASK_H +#define _LINUX_SCHED_TASK_H + +/* + * Interface between the scheduler and various task lifetime (fork()/exit()) + * functionality: + */ + +#include <linux/sched.h> + +struct task_struct; +union thread_union; + +/* + * This serializes "schedule()" and also protects + * the run-queue from deletions/modifications (but + * _adding_ to the beginning of the run-queue has + * a separate lock). + */ +extern rwlock_t tasklist_lock; +extern spinlock_t mmlist_lock; + +extern union thread_union init_thread_union; +extern struct task_struct init_task; + +#ifdef CONFIG_PROVE_RCU +extern int lockdep_tasklist_lock_is_held(void); +#endif /* #ifdef CONFIG_PROVE_RCU */ + +extern asmlinkage void schedule_tail(struct task_struct *prev); +extern void init_idle(struct task_struct *idle, int cpu); +extern void init_idle_bootup_task(struct task_struct *idle); + +extern int sched_fork(unsigned long clone_flags, struct task_struct *p); +extern void sched_dead(struct task_struct *p); + +void __noreturn do_task_dead(void); + +extern void proc_caches_init(void); + +extern void release_task(struct task_struct * p); + +#ifdef CONFIG_HAVE_COPY_THREAD_TLS +extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, + struct task_struct *, unsigned long); +#else +extern int copy_thread(unsigned long, unsigned long, unsigned long, + struct task_struct *); + +/* Architectures that haven't opted into copy_thread_tls get the tls argument + * via pt_regs, so ignore the tls argument passed via C. */ +static inline int copy_thread_tls( + unsigned long clone_flags, unsigned long sp, unsigned long arg, + struct task_struct *p, unsigned long tls) +{ + return copy_thread(clone_flags, sp, arg, p); +} +#endif +extern void flush_thread(void); + +#ifdef CONFIG_HAVE_EXIT_THREAD +extern void exit_thread(struct task_struct *tsk); +#else +static inline void exit_thread(struct task_struct *tsk) +{ +} +#endif +extern void do_group_exit(int); + +extern void exit_files(struct task_struct *); +extern void exit_itimers(struct signal_struct *); + +extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); +extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); +struct task_struct *fork_idle(int); +extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); + +extern void free_task(struct task_struct *tsk); + +/* sched_exec is called by processes performing an exec */ +#ifdef CONFIG_SMP +extern void sched_exec(void); +#else +#define sched_exec() {} +#endif + +#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + +extern void __put_task_struct(struct task_struct *t); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); +} + +struct task_struct *task_rcu_dereference(struct task_struct **ptask); +struct task_struct *try_get_task_struct(struct task_struct **ptask); + + +#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT +extern int arch_task_struct_size __read_mostly; +#else +# define arch_task_struct_size (sizeof(struct task_struct)) +#endif + +#ifdef CONFIG_VMAP_STACK +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return t->stack_vm_area; +} +#else +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return NULL; +} +#endif + +/* + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring + * subscriptions and synchronises with wait4(). Also used in procfs. Also + * pins the final release of task.io_context. Also protects ->cpuset and + * ->cgroup.subsys[]. And ->vfork_done. + * + * Nests both inside and outside of read_lock(&tasklist_lock). + * It must not be nested with write_lock_irq(&tasklist_lock), + * neither inside nor outside. + */ +static inline void task_lock(struct task_struct *p) +{ + spin_lock(&p->alloc_lock); +} + +static inline void task_unlock(struct task_struct *p) +{ + spin_unlock(&p->alloc_lock); +} + +#endif /* _LINUX_SCHED_TASK_H */ diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h new file mode 100644 index 000000000000..df6ea6665b31 --- /dev/null +++ b/include/linux/sched/task_stack.h @@ -0,0 +1,121 @@ +#ifndef _LINUX_SCHED_TASK_STACK_H +#define _LINUX_SCHED_TASK_STACK_H + +/* + * task->stack (kernel stack) handling interfaces: + */ + +#include <linux/sched.h> +#include <linux/magic.h> + +#ifdef CONFIG_THREAD_INFO_IN_TASK + +/* + * When accessing the stack of a non-current task that might exit, use + * try_get_task_stack() instead. task_stack_page will return a pointer + * that could get freed out from under you. + */ +static inline void *task_stack_page(const struct task_struct *task) +{ + return task->stack; +} + +#define setup_thread_stack(new,old) do { } while(0) + +static inline unsigned long *end_of_stack(const struct task_struct *task) +{ + return task->stack; +} + +#elif !defined(__HAVE_THREAD_FUNCTIONS) + +#define task_stack_page(task) ((void *)(task)->stack) + +static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) +{ + *task_thread_info(p) = *task_thread_info(org); + task_thread_info(p)->task = p; +} + +/* + * Return the address of the last usable long on the stack. + * + * When the stack grows down, this is just above the thread + * info struct. Going any lower will corrupt the threadinfo. + * + * When the stack grows up, this is the highest address. + * Beyond that position, we corrupt data on the next page. + */ +static inline unsigned long *end_of_stack(struct task_struct *p) +{ +#ifdef CONFIG_STACK_GROWSUP + return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; +#else + return (unsigned long *)(task_thread_info(p) + 1); +#endif +} + +#endif + +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return atomic_inc_not_zero(&tsk->stack_refcount) ? + task_stack_page(tsk) : NULL; +} + +extern void put_task_stack(struct task_struct *tsk); +#else +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return task_stack_page(tsk); +} + +static inline void put_task_stack(struct task_struct *tsk) {} +#endif + +#define task_stack_end_corrupted(task) \ + (*(end_of_stack(task)) != STACK_END_MAGIC) + +static inline int object_is_on_stack(void *obj) +{ + void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); +} + +extern void thread_stack_cache_init(void); + +#ifdef CONFIG_DEBUG_STACK_USAGE +static inline unsigned long stack_not_used(struct task_struct *p) +{ + unsigned long *n = end_of_stack(p); + + do { /* Skip over canary */ +# ifdef CONFIG_STACK_GROWSUP + n--; +# else + n++; +# endif + } while (!*n); + +# ifdef CONFIG_STACK_GROWSUP + return (unsigned long)end_of_stack(p) - (unsigned long)n; +# else + return (unsigned long)n - (unsigned long)end_of_stack(p); +# endif +} +#endif +extern void set_task_stack_end_magic(struct task_struct *tsk); + +#ifndef __HAVE_ARCH_KSTACK_END +static inline int kstack_end(void *addr) +{ + /* Reliable end of stack detection: + * Some APM bios versions misalign the stack + */ + return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); +} +#endif + +#endif /* _LINUX_SCHED_TASK_STACK_H */ diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h new file mode 100644 index 000000000000..7d065abc7a47 --- /dev/null +++ b/include/linux/sched/topology.h @@ -0,0 +1,226 @@ +#ifndef _LINUX_SCHED_TOPOLOGY_H +#define _LINUX_SCHED_TOPOLOGY_H + +#include <linux/topology.h> + +#include <linux/sched/idle.h> + +/* + * sched-domains (multiprocessor balancing) declarations: + */ +#ifdef CONFIG_SMP + +#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ +#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ +#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ +#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ +#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ +#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ +#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ +#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ +#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ +#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ +#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ +#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ +#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ +#define SD_NUMA 0x4000 /* cross-node balancing */ + +/* + * Increase resolution of cpu_capacity calculations + */ +#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT +#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) + +#ifdef CONFIG_SCHED_SMT +static inline int cpu_smt_flags(void) +{ + return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_SCHED_MC +static inline int cpu_core_flags(void) +{ + return SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_NUMA +static inline int cpu_numa_flags(void) +{ + return SD_NUMA; +} +#endif + +extern int arch_asym_cpu_priority(int cpu); + +struct sched_domain_attr { + int relax_domain_level; +}; + +#define SD_ATTR_INIT (struct sched_domain_attr) { \ + .relax_domain_level = -1, \ +} + +extern int sched_domain_level_max; + +struct sched_group; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; +}; + +struct sched_domain { + /* These fields must be setup */ + struct sched_domain *parent; /* top domain must be null terminated */ + struct sched_domain *child; /* bottom domain must be null terminated */ + struct sched_group *groups; /* the balancing groups of the domain */ + unsigned long min_interval; /* Minimum balance interval ms */ + unsigned long max_interval; /* Maximum balance interval ms */ + unsigned int busy_factor; /* less balancing by factor if busy */ + unsigned int imbalance_pct; /* No balance until over watermark */ + unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int busy_idx; + unsigned int idle_idx; + unsigned int newidle_idx; + unsigned int wake_idx; + unsigned int forkexec_idx; + unsigned int smt_gain; + + int nohz_idle; /* NOHZ IDLE status */ + int flags; /* See SD_* */ + int level; + + /* Runtime fields. */ + unsigned long last_balance; /* init to jiffies. units in jiffies */ + unsigned int balance_interval; /* initialise to 1. units in ms. */ + unsigned int nr_balance_failed; /* initialise to 0 */ + + /* idle_balance() stats */ + u64 max_newidle_lb_cost; + unsigned long next_decay_max_lb_cost; + + u64 avg_scan_cost; /* select_idle_sibling */ + +#ifdef CONFIG_SCHEDSTATS + /* load_balance() stats */ + unsigned int lb_count[CPU_MAX_IDLE_TYPES]; + unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; + unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; + + /* Active load balancing */ + unsigned int alb_count; + unsigned int alb_failed; + unsigned int alb_pushed; + + /* SD_BALANCE_EXEC stats */ + unsigned int sbe_count; + unsigned int sbe_balanced; + unsigned int sbe_pushed; + + /* SD_BALANCE_FORK stats */ + unsigned int sbf_count; + unsigned int sbf_balanced; + unsigned int sbf_pushed; + + /* try_to_wake_up() stats */ + unsigned int ttwu_wake_remote; + unsigned int ttwu_move_affine; + unsigned int ttwu_move_balance; +#endif +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif + union { + void *private; /* used during construction */ + struct rcu_head rcu; /* used during destruction */ + }; + struct sched_domain_shared *shared; + + unsigned int span_weight; + /* + * Span of all CPUs in this domain. + * + * NOTE: this field is variable length. (Allocated dynamically + * by attaching extra space to the end of the structure, + * depending on how many CPUs the kernel has booted up with) + */ + unsigned long span[0]; +}; + +static inline struct cpumask *sched_domain_span(struct sched_domain *sd) +{ + return to_cpumask(sd->span); +} + +extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new); + +/* Allocate an array of sched domains, for partition_sched_domains(). */ +cpumask_var_t *alloc_sched_domains(unsigned int ndoms); +void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); + +bool cpus_share_cache(int this_cpu, int that_cpu); + +typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); +typedef int (*sched_domain_flags_f)(void); + +#define SDTL_OVERLAP 0x01 + +struct sd_data { + struct sched_domain **__percpu sd; + struct sched_domain_shared **__percpu sds; + struct sched_group **__percpu sg; + struct sched_group_capacity **__percpu sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif +}; + +extern void set_sched_topology(struct sched_domain_topology_level *tl); + +#ifdef CONFIG_SCHED_DEBUG +# define SD_INIT_NAME(type) .name = #type +#else +# define SD_INIT_NAME(type) +#endif + +#else /* CONFIG_SMP */ + +struct sched_domain_attr; + +static inline void +partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new) +{ +} + +static inline bool cpus_share_cache(int this_cpu, int that_cpu) +{ + return true; +} + +#endif /* !CONFIG_SMP */ + +static inline int task_node(const struct task_struct *p) +{ + return cpu_to_node(task_cpu(p)); +} + +#endif /* _LINUX_SCHED_TOPOLOGY_H */ diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h new file mode 100644 index 000000000000..5d5415e129d4 --- /dev/null +++ b/include/linux/sched/user.h @@ -0,0 +1,61 @@ +#ifndef _LINUX_SCHED_USER_H +#define _LINUX_SCHED_USER_H + +#include <linux/uidgid.h> +#include <linux/atomic.h> + +struct key; + +/* + * Some day this will be a full-fledged user tracking system.. + */ +struct user_struct { + atomic_t __count; /* reference count */ + atomic_t processes; /* How many processes does this user have? */ + atomic_t sigpending; /* How many pending signals does this user have? */ +#ifdef CONFIG_FANOTIFY + atomic_t fanotify_listeners; +#endif +#ifdef CONFIG_EPOLL + atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ +#endif +#ifdef CONFIG_POSIX_MQUEUE + /* protected by mq_lock */ + unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ +#endif + unsigned long locked_shm; /* How many pages of mlocked shm ? */ + unsigned long unix_inflight; /* How many files in flight in unix sockets */ + atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ + +#ifdef CONFIG_KEYS + struct key *uid_keyring; /* UID specific keyring */ + struct key *session_keyring; /* UID's default session keyring */ +#endif + + /* Hash table maintenance information */ + struct hlist_node uidhash_node; + kuid_t uid; + +#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) + atomic_long_t locked_vm; +#endif +}; + +extern int uids_sysfs_init(void); + +extern struct user_struct *find_user(kuid_t); + +extern struct user_struct root_user; +#define INIT_USER (&root_user) + + +/* per-UID process charging. */ +extern struct user_struct * alloc_uid(kuid_t); +static inline struct user_struct *get_uid(struct user_struct *u) +{ + atomic_inc(&u->__count); + return u; +} +extern void free_uid(struct user_struct *); + +#endif /* _LINUX_SCHED_USER_H */ diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h new file mode 100644 index 000000000000..d03d8a9047dc --- /dev/null +++ b/include/linux/sched/wake_q.h @@ -0,0 +1,53 @@ +#ifndef _LINUX_SCHED_WAKE_Q_H +#define _LINUX_SCHED_WAKE_Q_H + +/* + * Wake-queues are lists of tasks with a pending wakeup, whose + * callers have already marked the task as woken internally, + * and can thus carry on. A common use case is being able to + * do the wakeups once the corresponding user lock as been + * released. + * + * We hold reference to each task in the list across the wakeup, + * thus guaranteeing that the memory is still valid by the time + * the actual wakeups are performed in wake_up_q(). + * + * One per task suffices, because there's never a need for a task to be + * in two wake queues simultaneously; it is forbidden to abandon a task + * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is + * already in a wake queue, the wakeup will happen soon and the second + * waker can just skip it. + * + * The DEFINE_WAKE_Q macro declares and initializes the list head. + * wake_up_q() does NOT reinitialize the list; it's expected to be + * called near the end of a function. Otherwise, the list can be + * re-initialized for later re-use by wake_q_init(). + * + * Note that this can cause spurious wakeups. schedule() callers + * must ensure the call is done inside a loop, confirming that the + * wakeup condition has in fact occurred. + */ + +#include <linux/sched.h> + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) + +#define DEFINE_WAKE_Q(name) \ + struct wake_q_head name = { WAKE_Q_TAIL, &name.first } + +static inline void wake_q_init(struct wake_q_head *head) +{ + head->first = WAKE_Q_TAIL; + head->lastp = &head->first; +} + +extern void wake_q_add(struct wake_q_head *head, + struct task_struct *task); +extern void wake_up_q(struct wake_q_head *head); + +#endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/include/linux/sched/xacct.h b/include/linux/sched/xacct.h new file mode 100644 index 000000000000..a28156a0d34a --- /dev/null +++ b/include/linux/sched/xacct.h @@ -0,0 +1,48 @@ +#ifndef _LINUX_SCHED_XACCT_H +#define _LINUX_SCHED_XACCT_H + +/* + * Extended task accounting methods: + */ + +#include <linux/sched.h> + +#ifdef CONFIG_TASK_XACCT +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.rchar += amt; +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.wchar += amt; +} + +static inline void inc_syscr(struct task_struct *tsk) +{ + tsk->ioac.syscr++; +} + +static inline void inc_syscw(struct task_struct *tsk) +{ + tsk->ioac.syscw++; +} +#else +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void inc_syscr(struct task_struct *tsk) +{ +} + +static inline void inc_syscw(struct task_struct *tsk) +{ +} +#endif + +#endif /* _LINUX_SCHED_XACCT_H */ diff --git a/include/linux/sctp.h b/include/linux/sctp.h index fcb4c3646173..7a4804c4a593 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -62,7 +62,7 @@ typedef struct sctphdr { __be16 dest; __be32 vtag; __le32 checksum; -} __packed sctp_sctphdr_t; +} sctp_sctphdr_t; static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) { @@ -74,7 +74,7 @@ typedef struct sctp_chunkhdr { __u8 type; __u8 flags; __be16 length; -} __packed sctp_chunkhdr_t; +} sctp_chunkhdr_t; /* Section 3.2. Chunk Type Values. @@ -108,6 +108,7 @@ typedef enum { /* Use hex, as defined in ADDIP sec. 3.1 */ SCTP_CID_ASCONF = 0xC1, SCTP_CID_ASCONF_ACK = 0x80, + SCTP_CID_RECONF = 0x82, } sctp_cid_t; /* enum */ @@ -164,7 +165,7 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 }; typedef struct sctp_paramhdr { __be16 type; __be16 length; -} __packed sctp_paramhdr_t; +} sctp_paramhdr_t; typedef enum { @@ -199,6 +200,13 @@ typedef enum { SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005), SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006), + /* RE-CONFIG. Section 4 */ + SCTP_PARAM_RESET_OUT_REQUEST = cpu_to_be16(0x000d), + SCTP_PARAM_RESET_IN_REQUEST = cpu_to_be16(0x000e), + SCTP_PARAM_RESET_TSN_REQUEST = cpu_to_be16(0x000f), + SCTP_PARAM_RESET_RESPONSE = cpu_to_be16(0x0010), + SCTP_PARAM_RESET_ADD_OUT_STREAMS = cpu_to_be16(0x0011), + SCTP_PARAM_RESET_ADD_IN_STREAMS = cpu_to_be16(0x0012), } sctp_param_t; /* enum */ @@ -225,12 +233,12 @@ typedef struct sctp_datahdr { __be16 ssn; __be32 ppid; __u8 payload[0]; -} __packed sctp_datahdr_t; +} sctp_datahdr_t; typedef struct sctp_data_chunk { sctp_chunkhdr_t chunk_hdr; sctp_datahdr_t data_hdr; -} __packed sctp_data_chunk_t; +} sctp_data_chunk_t; /* DATA Chuck Specific Flags */ enum { @@ -256,78 +264,78 @@ typedef struct sctp_inithdr { __be16 num_inbound_streams; __be32 initial_tsn; __u8 params[0]; -} __packed sctp_inithdr_t; +} sctp_inithdr_t; typedef struct sctp_init_chunk { sctp_chunkhdr_t chunk_hdr; sctp_inithdr_t init_hdr; -} __packed sctp_init_chunk_t; +} sctp_init_chunk_t; /* Section 3.3.2.1. IPv4 Address Parameter (5) */ typedef struct sctp_ipv4addr_param { sctp_paramhdr_t param_hdr; struct in_addr addr; -} __packed sctp_ipv4addr_param_t; +} sctp_ipv4addr_param_t; /* Section 3.3.2.1. IPv6 Address Parameter (6) */ typedef struct sctp_ipv6addr_param { sctp_paramhdr_t param_hdr; struct in6_addr addr; -} __packed sctp_ipv6addr_param_t; +} sctp_ipv6addr_param_t; /* Section 3.3.2.1 Cookie Preservative (9) */ typedef struct sctp_cookie_preserve_param { sctp_paramhdr_t param_hdr; __be32 lifespan_increment; -} __packed sctp_cookie_preserve_param_t; +} sctp_cookie_preserve_param_t; /* Section 3.3.2.1 Host Name Address (11) */ typedef struct sctp_hostname_param { sctp_paramhdr_t param_hdr; uint8_t hostname[0]; -} __packed sctp_hostname_param_t; +} sctp_hostname_param_t; /* Section 3.3.2.1 Supported Address Types (12) */ typedef struct sctp_supported_addrs_param { sctp_paramhdr_t param_hdr; __be16 types[0]; -} __packed sctp_supported_addrs_param_t; +} sctp_supported_addrs_param_t; /* Appendix A. ECN Capable (32768) */ typedef struct sctp_ecn_capable_param { sctp_paramhdr_t param_hdr; -} __packed sctp_ecn_capable_param_t; +} sctp_ecn_capable_param_t; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ typedef struct sctp_adaptation_ind_param { struct sctp_paramhdr param_hdr; __be32 adaptation_ind; -} __packed sctp_adaptation_ind_param_t; +} sctp_adaptation_ind_param_t; /* ADDIP Section 4.2.7 Supported Extensions Parameter */ typedef struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; -} __packed sctp_supported_ext_param_t; +} sctp_supported_ext_param_t; /* AUTH Section 3.1 Random */ typedef struct sctp_random_param { sctp_paramhdr_t param_hdr; __u8 random_val[0]; -} __packed sctp_random_param_t; +} sctp_random_param_t; /* AUTH Section 3.2 Chunk List */ typedef struct sctp_chunks_param { sctp_paramhdr_t param_hdr; __u8 chunks[0]; -} __packed sctp_chunks_param_t; +} sctp_chunks_param_t; /* AUTH Section 3.3 HMAC Algorithm */ typedef struct sctp_hmac_algo_param { sctp_paramhdr_t param_hdr; __be16 hmac_ids[0]; -} __packed sctp_hmac_algo_param_t; +} sctp_hmac_algo_param_t; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): * The INIT ACK chunk is used to acknowledge the initiation of an SCTP @@ -339,13 +347,13 @@ typedef sctp_init_chunk_t sctp_initack_chunk_t; typedef struct sctp_cookie_param { sctp_paramhdr_t p; __u8 body[0]; -} __packed sctp_cookie_param_t; +} sctp_cookie_param_t; /* Section 3.3.3.1 Unrecognized Parameters (8) */ typedef struct sctp_unrecognized_param { sctp_paramhdr_t param_hdr; sctp_paramhdr_t unrecognized; -} __packed sctp_unrecognized_param_t; +} sctp_unrecognized_param_t; @@ -360,7 +368,7 @@ typedef struct sctp_unrecognized_param { typedef struct sctp_gap_ack_block { __be16 start; __be16 end; -} __packed sctp_gap_ack_block_t; +} sctp_gap_ack_block_t; typedef __be32 sctp_dup_tsn_t; @@ -375,12 +383,12 @@ typedef struct sctp_sackhdr { __be16 num_gap_ack_blocks; __be16 num_dup_tsns; sctp_sack_variable_t variable[0]; -} __packed sctp_sackhdr_t; +} sctp_sackhdr_t; typedef struct sctp_sack_chunk { sctp_chunkhdr_t chunk_hdr; sctp_sackhdr_t sack_hdr; -} __packed sctp_sack_chunk_t; +} sctp_sack_chunk_t; /* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): @@ -392,12 +400,12 @@ typedef struct sctp_sack_chunk { typedef struct sctp_heartbeathdr { sctp_paramhdr_t info; -} __packed sctp_heartbeathdr_t; +} sctp_heartbeathdr_t; typedef struct sctp_heartbeat_chunk { sctp_chunkhdr_t chunk_hdr; sctp_heartbeathdr_t hb_hdr; -} __packed sctp_heartbeat_chunk_t; +} sctp_heartbeat_chunk_t; /* For the abort and shutdown ACK we must carry the init tag in the @@ -406,7 +414,7 @@ typedef struct sctp_heartbeat_chunk { */ typedef struct sctp_abort_chunk { sctp_chunkhdr_t uh; -} __packed sctp_abort_chunk_t; +} sctp_abort_chunk_t; /* For the graceful shutdown we must carry the tag (in common header) @@ -414,12 +422,12 @@ typedef struct sctp_abort_chunk { */ typedef struct sctp_shutdownhdr { __be32 cum_tsn_ack; -} __packed sctp_shutdownhdr_t; +} sctp_shutdownhdr_t; struct sctp_shutdown_chunk_t { sctp_chunkhdr_t chunk_hdr; sctp_shutdownhdr_t shutdown_hdr; -} __packed; +}; /* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ @@ -427,12 +435,12 @@ typedef struct sctp_errhdr { __be16 cause; __be16 length; __u8 variable[0]; -} __packed sctp_errhdr_t; +} sctp_errhdr_t; typedef struct sctp_operr_chunk { sctp_chunkhdr_t chunk_hdr; sctp_errhdr_t err_hdr; -} __packed sctp_operr_chunk_t; +} sctp_operr_chunk_t; /* RFC 2960 3.3.10 - Operation Error * @@ -522,7 +530,7 @@ typedef struct sctp_ecnehdr { typedef struct sctp_ecne_chunk { sctp_chunkhdr_t chunk_hdr; sctp_ecnehdr_t ence_hdr; -} __packed sctp_ecne_chunk_t; +} sctp_ecne_chunk_t; /* RFC 2960. Appendix A. Explicit Congestion Notification. * Congestion Window Reduced (CWR) (13) @@ -534,7 +542,7 @@ typedef struct sctp_cwrhdr { typedef struct sctp_cwr_chunk { sctp_chunkhdr_t chunk_hdr; sctp_cwrhdr_t cwr_hdr; -} __packed sctp_cwr_chunk_t; +} sctp_cwr_chunk_t; /* PR-SCTP * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) @@ -585,17 +593,17 @@ typedef struct sctp_cwr_chunk { struct sctp_fwdtsn_skip { __be16 stream; __be16 ssn; -} __packed; +}; struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; struct sctp_fwdtsn_skip skip[0]; -} __packed; +}; struct sctp_fwdtsn_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_fwdtsn_hdr fwdtsn_hdr; -} __packed; +}; /* ADDIP @@ -633,17 +641,17 @@ struct sctp_fwdtsn_chunk { typedef struct sctp_addip_param { sctp_paramhdr_t param_hdr; __be32 crr_id; -} __packed sctp_addip_param_t; +} sctp_addip_param_t; typedef struct sctp_addiphdr { __be32 serial; __u8 params[0]; -} __packed sctp_addiphdr_t; +} sctp_addiphdr_t; typedef struct sctp_addip_chunk { sctp_chunkhdr_t chunk_hdr; sctp_addiphdr_t addip_hdr; -} __packed sctp_addip_chunk_t; +} sctp_addip_chunk_t; /* AUTH * Section 4.1 Authentication Chunk (AUTH) @@ -698,16 +706,71 @@ typedef struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; __u8 hmac[0]; -} __packed sctp_authhdr_t; +} sctp_authhdr_t; typedef struct sctp_auth_chunk { sctp_chunkhdr_t chunk_hdr; sctp_authhdr_t auth_hdr; -} __packed sctp_auth_chunk_t; +} sctp_auth_chunk_t; struct sctp_infox { struct sctp_info *sctpinfo; struct sctp_association *asoc; }; +struct sctp_reconf_chunk { + sctp_chunkhdr_t chunk_hdr; + __u8 params[0]; +}; + +struct sctp_strreset_outreq { + sctp_paramhdr_t param_hdr; + __u32 request_seq; + __u32 response_seq; + __u32 send_reset_at_tsn; + __u16 list_of_streams[0]; +}; + +struct sctp_strreset_inreq { + sctp_paramhdr_t param_hdr; + __u32 request_seq; + __u16 list_of_streams[0]; +}; + +struct sctp_strreset_tsnreq { + sctp_paramhdr_t param_hdr; + __u32 request_seq; +}; + +struct sctp_strreset_addstrm { + sctp_paramhdr_t param_hdr; + __u32 request_seq; + __u16 number_of_streams; + __u16 reserved; +}; + +enum { + SCTP_STRRESET_NOTHING_TO_DO = 0x00, + SCTP_STRRESET_PERFORMED = 0x01, + SCTP_STRRESET_DENIED = 0x02, + SCTP_STRRESET_ERR_WRONG_SSN = 0x03, + SCTP_STRRESET_ERR_IN_PROGRESS = 0x04, + SCTP_STRRESET_ERR_BAD_SEQNO = 0x05, + SCTP_STRRESET_IN_PROGRESS = 0x06, +}; + +struct sctp_strreset_resp { + sctp_paramhdr_t param_hdr; + __u32 response_seq; + __u32 result; +}; + +struct sctp_strreset_resptsn { + sctp_paramhdr_t param_hdr; + __u32 response_seq; + __u32 result; + __u32 senders_next_tsn; + __u32 receivers_next_tsn; +}; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/linux/security.h b/include/linux/security.h index d3868f2ebada..96899fad7016 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -140,8 +140,7 @@ struct request_sock; /* bprm->unsafe reasons */ #define LSM_UNSAFE_SHARE 1 #define LSM_UNSAFE_PTRACE 2 -#define LSM_UNSAFE_PTRACE_CAP 4 -#define LSM_UNSAFE_NO_NEW_PRIVS 8 +#define LSM_UNSAFE_NO_NEW_PRIVS 4 #ifdef CONFIG_MMU extern int mmap_min_addr_handler(struct ctl_table *table, int write, diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h index deee23d012e7..04b124fca51e 100644 --- a/include/linux/sed-opal.h +++ b/include/linux/sed-opal.h @@ -27,6 +27,7 @@ typedef int (sec_send_recv)(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send); #ifdef CONFIG_BLK_SED_OPAL +void free_opal_dev(struct opal_dev *dev); bool opal_unlock_from_suspend(struct opal_dev *dev); struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv); int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr); @@ -51,6 +52,10 @@ static inline bool is_sed_ioctl(unsigned int cmd) return false; } #else +static inline void free_opal_dev(struct opal_dev *dev) +{ +} + static inline bool is_sed_ioctl(unsigned int cmd) { return false; diff --git a/include/linux/sem.h b/include/linux/sem.h index d0efd6e6c20a..4fc222f8755d 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -21,7 +21,7 @@ struct sem_array { struct list_head list_id; /* undo requests on this array */ int sem_nsems; /* no. of semaphores in array */ int complex_count; /* pending complex operations */ - bool complex_mode; /* no parallel simple ops */ + unsigned int use_global_lock;/* >0: global lock required */ }; #ifdef CONFIG_SYSVIPC diff --git a/include/linux/serdev.h b/include/linux/serdev.h new file mode 100644 index 000000000000..9519da6253a8 --- /dev/null +++ b/include/linux/serdev.h @@ -0,0 +1,262 @@ +/* + * Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _LINUX_SERDEV_H +#define _LINUX_SERDEV_H + +#include <linux/types.h> +#include <linux/device.h> + +struct serdev_controller; +struct serdev_device; + +/* + * serdev device structures + */ + +/** + * struct serdev_device_ops - Callback operations for a serdev device + * @receive_buf: Function called with data received from device. + * @write_wakeup: Function called when ready to transmit more data. + */ +struct serdev_device_ops { + int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t); + void (*write_wakeup)(struct serdev_device *); +}; + +/** + * struct serdev_device - Basic representation of an serdev device + * @dev: Driver model representation of the device. + * @nr: Device number on serdev bus. + * @ctrl: serdev controller managing this device. + * @ops: Device operations. + */ +struct serdev_device { + struct device dev; + int nr; + struct serdev_controller *ctrl; + const struct serdev_device_ops *ops; +}; + +static inline struct serdev_device *to_serdev_device(struct device *d) +{ + return container_of(d, struct serdev_device, dev); +} + +/** + * struct serdev_device_driver - serdev slave device driver + * @driver: serdev device drivers should initialize name field of this + * structure. + * @probe: binds this driver to a serdev device. + * @remove: unbinds this driver from the serdev device. + */ +struct serdev_device_driver { + struct device_driver driver; + int (*probe)(struct serdev_device *); + void (*remove)(struct serdev_device *); +}; + +static inline struct serdev_device_driver *to_serdev_device_driver(struct device_driver *d) +{ + return container_of(d, struct serdev_device_driver, driver); +} + +/* + * serdev controller structures + */ +struct serdev_controller_ops { + int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t); + void (*write_flush)(struct serdev_controller *); + int (*write_room)(struct serdev_controller *); + int (*open)(struct serdev_controller *); + void (*close)(struct serdev_controller *); + void (*set_flow_control)(struct serdev_controller *, bool); + unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); +}; + +/** + * struct serdev_controller - interface to the serdev controller + * @dev: Driver model representation of the device. + * @nr: number identifier for this controller/bus. + * @serdev: Pointer to slave device for this controller. + * @ops: Controller operations. + */ +struct serdev_controller { + struct device dev; + unsigned int nr; + struct serdev_device *serdev; + const struct serdev_controller_ops *ops; +}; + +static inline struct serdev_controller *to_serdev_controller(struct device *d) +{ + return container_of(d, struct serdev_controller, dev); +} + +static inline void *serdev_device_get_drvdata(const struct serdev_device *serdev) +{ + return dev_get_drvdata(&serdev->dev); +} + +static inline void serdev_device_set_drvdata(struct serdev_device *serdev, void *data) +{ + dev_set_drvdata(&serdev->dev, data); +} + +/** + * serdev_device_put() - decrement serdev device refcount + * @serdev serdev device. + */ +static inline void serdev_device_put(struct serdev_device *serdev) +{ + if (serdev) + put_device(&serdev->dev); +} + +static inline void serdev_device_set_client_ops(struct serdev_device *serdev, + const struct serdev_device_ops *ops) +{ + serdev->ops = ops; +} + +static inline +void *serdev_controller_get_drvdata(const struct serdev_controller *ctrl) +{ + return ctrl ? dev_get_drvdata(&ctrl->dev) : NULL; +} + +static inline void serdev_controller_set_drvdata(struct serdev_controller *ctrl, + void *data) +{ + dev_set_drvdata(&ctrl->dev, data); +} + +/** + * serdev_controller_put() - decrement controller refcount + * @ctrl serdev controller. + */ +static inline void serdev_controller_put(struct serdev_controller *ctrl) +{ + if (ctrl) + put_device(&ctrl->dev); +} + +struct serdev_device *serdev_device_alloc(struct serdev_controller *); +int serdev_device_add(struct serdev_device *); +void serdev_device_remove(struct serdev_device *); + +struct serdev_controller *serdev_controller_alloc(struct device *, size_t); +int serdev_controller_add(struct serdev_controller *); +void serdev_controller_remove(struct serdev_controller *); + +static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl) +{ + struct serdev_device *serdev = ctrl->serdev; + + if (!serdev || !serdev->ops->write_wakeup) + return; + + serdev->ops->write_wakeup(ctrl->serdev); +} + +static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl, + const unsigned char *data, + size_t count) +{ + struct serdev_device *serdev = ctrl->serdev; + + if (!serdev || !serdev->ops->receive_buf) + return -EINVAL; + + return serdev->ops->receive_buf(ctrl->serdev, data, count); +} + +#if IS_ENABLED(CONFIG_SERIAL_DEV_BUS) + +int serdev_device_open(struct serdev_device *); +void serdev_device_close(struct serdev_device *); +unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); +void serdev_device_set_flow_control(struct serdev_device *, bool); +int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); +void serdev_device_write_flush(struct serdev_device *); +int serdev_device_write_room(struct serdev_device *); + +/* + * serdev device driver functions + */ +int __serdev_device_driver_register(struct serdev_device_driver *, struct module *); +#define serdev_device_driver_register(sdrv) \ + __serdev_device_driver_register(sdrv, THIS_MODULE) + +/** + * serdev_device_driver_unregister() - unregister an serdev client driver + * @sdrv: the driver to unregister + */ +static inline void serdev_device_driver_unregister(struct serdev_device_driver *sdrv) +{ + if (sdrv) + driver_unregister(&sdrv->driver); +} + +#define module_serdev_device_driver(__serdev_device_driver) \ + module_driver(__serdev_device_driver, serdev_device_driver_register, \ + serdev_device_driver_unregister) + +#else + +static inline int serdev_device_open(struct serdev_device *sdev) +{ + return -ENODEV; +} +static inline void serdev_device_close(struct serdev_device *sdev) {} +static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev, unsigned int baudrate) +{ + return 0; +} +static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} +static inline int serdev_device_write_buf(struct serdev_device *sdev, const unsigned char *buf, size_t count) +{ + return -ENODEV; +} +static inline void serdev_device_write_flush(struct serdev_device *sdev) {} +static inline int serdev_device_write_room(struct serdev_device *sdev) +{ + return 0; +} + +#define serdev_device_driver_register(x) +#define serdev_device_driver_unregister(x) + +#endif /* CONFIG_SERIAL_DEV_BUS */ + +/* + * serdev hooks into TTY core + */ +struct tty_port; +struct tty_driver; + +#ifdef CONFIG_SERIAL_DEV_CTRL_TTYPORT +struct device *serdev_tty_port_register(struct tty_port *port, + struct device *parent, + struct tty_driver *drv, int idx); +void serdev_tty_port_unregister(struct tty_port *port); +#else +static inline struct device *serdev_tty_port_register(struct tty_port *port, + struct device *parent, + struct tty_driver *drv, int idx) +{ + return ERR_PTR(-ENODEV); +} +static inline void serdev_tty_port_unregister(struct tty_port *port) {} +#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ + +#endif /*_LINUX_SERDEV_H */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 5def8e830fb0..58484fb35cc8 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -450,7 +450,7 @@ extern void uart_handle_cts_change(struct uart_port *uport, extern void uart_insert_char(struct uart_port *port, unsigned int status, unsigned int overrun, unsigned int ch, unsigned int flag); -#ifdef SUPPORT_SYSRQ +#if defined(SUPPORT_SYSRQ) && defined(CONFIG_MAGIC_SYSRQ_SERIAL) static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 9f2bfd055742..e598eaef3962 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -9,8 +9,6 @@ * Generic header for SuperH (H)SCI(F) (used by sh/sh64 and related parts) */ -#define SCIx_NOT_SUPPORTED (-1) - /* Serial Control Register (@ = not supported by all parts) */ #define SCSCR_TIE BIT(7) /* Transmit Interrupt Enable */ #define SCSCR_RIE BIT(6) /* Receive Interrupt Enable */ @@ -41,24 +39,16 @@ enum { SCIx_NR_REGTYPES, }; -struct device; - struct plat_sci_port_ops { void (*init_pins)(struct uart_port *, unsigned int cflag); }; /* - * Port-specific capabilities - */ -#define SCIx_HAVE_RTSCTS BIT(0) - -/* * Platform device specific platform_data struct */ struct plat_sci_port { unsigned int type; /* SCI / SCIF / IRDA / HSCIF */ upf_t flags; /* UPF_* flags */ - unsigned long capabilities; /* Port features/capabilities */ unsigned int sampling_rate; unsigned int scscr; /* SCSCR initialization */ @@ -66,14 +56,9 @@ struct plat_sci_port { /* * Platform overrides if necessary, defaults otherwise. */ - int port_reg; - unsigned char regshift; unsigned char regtype; struct plat_sci_port_ops *ops; - - unsigned int dma_slave_tx; - unsigned int dma_slave_rx; }; #endif /* __LINUX_SERIAL_SCI_H */ diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index ff078e7043b6..a7d6bd2a918f 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -57,7 +57,14 @@ extern int shmem_zero_setup(struct vm_area_struct *); extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); +#ifdef CONFIG_SHMEM extern bool shmem_mapping(struct address_space *mapping); +#else +static inline bool shmem_mapping(struct address_space *mapping) +{ + return false; +} +#endif /* CONFIG_SHMEM */ extern void shmem_unlock_mapping(struct address_space *mapping); extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); @@ -124,4 +131,15 @@ static inline bool shmem_huge_enabled(struct vm_area_struct *vma) } #endif +#ifdef CONFIG_SHMEM +extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, + unsigned long src_addr, + struct page **pagep); +#else +#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ + src_addr, pagep) ({ BUG(); 0; }) +#endif + #endif diff --git a/include/linux/signal.h b/include/linux/signal.h index 5308304993be..94ad6eea9550 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -1,32 +1,13 @@ #ifndef _LINUX_SIGNAL_H #define _LINUX_SIGNAL_H -#include <linux/list.h> #include <linux/bug.h> -#include <uapi/linux/signal.h> +#include <linux/signal_types.h> struct task_struct; /* for sysctl */ extern int print_fatal_signals; -/* - * Real Time signals may be queued. - */ - -struct sigqueue { - struct list_head list; - int flags; - siginfo_t info; - struct user_struct *user; -}; - -/* flags values. */ -#define SIGQUEUE_PREALLOC 1 - -struct sigpending { - struct list_head list; - sigset_t signal; -}; #ifndef HAVE_ARCH_COPY_SIGINFO @@ -272,42 +253,6 @@ extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; -struct sigaction { -#ifndef __ARCH_HAS_IRIX_SIGACTION - __sighandler_t sa_handler; - unsigned long sa_flags; -#else - unsigned int sa_flags; - __sighandler_t sa_handler; -#endif -#ifdef __ARCH_HAS_SA_RESTORER - __sigrestore_t sa_restorer; -#endif - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -#ifdef __ARCH_HAS_KA_RESTORER - __sigrestore_t ka_restorer; -#endif -}; - -#ifdef CONFIG_OLD_SIGACTION -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - __sigrestore_t sa_restorer; -}; -#endif - -struct ksignal { - struct k_sigaction ka; - siginfo_t info; - int sig; -}; - extern int get_signal(struct ksignal *ksig); extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); extern void exit_signals(struct task_struct *tsk); diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h new file mode 100644 index 000000000000..16d862a3d8f3 --- /dev/null +++ b/include/linux/signal_types.h @@ -0,0 +1,66 @@ +#ifndef _LINUX_SIGNAL_TYPES_H +#define _LINUX_SIGNAL_TYPES_H + +/* + * Basic signal handling related data type definitions: + */ + +#include <linux/list.h> +#include <uapi/linux/signal.h> + +/* + * Real Time signals may be queued. + */ + +struct sigqueue { + struct list_head list; + int flags; + siginfo_t info; + struct user_struct *user; +}; + +/* flags values. */ +#define SIGQUEUE_PREALLOC 1 + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +struct sigaction { +#ifndef __ARCH_HAS_IRIX_SIGACTION + __sighandler_t sa_handler; + unsigned long sa_flags; +#else + unsigned int sa_flags; + __sighandler_t sa_handler; +#endif +#ifdef __ARCH_HAS_SA_RESTORER + __sigrestore_t sa_restorer; +#endif + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +#ifdef __ARCH_HAS_KA_RESTORER + __sigrestore_t ka_restorer; +#endif +}; + +#ifdef CONFIG_OLD_SIGACTION +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + __sigrestore_t sa_restorer; +}; +#endif + +struct ksignal { + struct k_sigaction ka; + siginfo_t info; + int sig; +}; + +#endif /* _LINUX_SIGNAL_TYPES_H */ diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h index eadbe227c256..4985048640a7 100644 --- a/include/linux/signalfd.h +++ b/include/linux/signalfd.h @@ -8,7 +8,7 @@ #define _LINUX_SIGNALFD_H #include <uapi/linux/signalfd.h> - +#include <linux/sched/signal.h> #ifdef CONFIG_SIGNALFD diff --git a/include/linux/siphash.h b/include/linux/siphash.h new file mode 100644 index 000000000000..fa7a6b9cedbf --- /dev/null +++ b/include/linux/siphash.h @@ -0,0 +1,140 @@ +/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. + */ + +#ifndef _LINUX_SIPHASH_H +#define _LINUX_SIPHASH_H + +#include <linux/types.h> +#include <linux/kernel.h> + +#define SIPHASH_ALIGNMENT __alignof__(u64) +typedef struct { + u64 key[2]; +} siphash_key_t; + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); +#endif + +u64 siphash_1u64(const u64 a, const siphash_key_t *key); +u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); +u64 siphash_3u64(const u64 a, const u64 b, const u64 c, + const siphash_key_t *key); +u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d, + const siphash_key_t *key); +u64 siphash_1u32(const u32 a, const siphash_key_t *key); +u64 siphash_3u32(const u32 a, const u32 b, const u32 c, + const siphash_key_t *key); + +static inline u64 siphash_2u32(const u32 a, const u32 b, + const siphash_key_t *key) +{ + return siphash_1u64((u64)b << 32 | a, key); +} +static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c, + const u32 d, const siphash_key_t *key) +{ + return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key); +} + + +static inline u64 ___siphash_aligned(const __le64 *data, size_t len, + const siphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return siphash_1u32(le32_to_cpup((const __le32 *)data), key); + if (__builtin_constant_p(len) && len == 8) + return siphash_1u64(le64_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 16) + return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 24) + return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 32) + return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), le64_to_cpu(data[3]), + key); + return __siphash_aligned(data, len, key); +} + +/** + * siphash - compute 64-bit siphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the siphash key + */ +static inline u64 siphash(const void *data, size_t len, + const siphash_key_t *key) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) + return __siphash_unaligned(data, len, key); +#endif + return ___siphash_aligned(data, len, key); +} + +#define HSIPHASH_ALIGNMENT __alignof__(unsigned long) +typedef struct { + unsigned long key[2]; +} hsiphash_key_t; + +u32 __hsiphash_aligned(const void *data, size_t len, + const hsiphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key); +#endif + +u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); +u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); +u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c, + const hsiphash_key_t *key); +u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d, + const hsiphash_key_t *key); + +static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, + const hsiphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return hsiphash_1u32(le32_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 8) + return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 12) + return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 16) + return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), le32_to_cpu(data[3]), + key); + return __hsiphash_aligned(data, len, key); +} + +/** + * hsiphash - compute 32-bit hsiphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the hsiphash key + */ +static inline u32 hsiphash(const void *data, size_t len, + const hsiphash_key_t *key) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) + return __hsiphash_unaligned(data, len, key); +#endif + return ___hsiphash_aligned(data, len, key); +} + +#endif /* _LINUX_SIPHASH_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a410715bbef8..c776abd86937 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -34,6 +34,7 @@ #include <linux/dma-mapping.h> #include <linux/netdev_features.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <net/flow_dissector.h> #include <linux/splice.h> #include <linux/in6.h> @@ -585,20 +586,22 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @cloned: Head may be cloned (check refcnt to be sure) * @ip_summed: Driver fed us an IP checksum * @nohdr: Payload reference only, must not modify header - * @nfctinfo: Relationship of this skb to the connection * @pkt_type: Packet class * @fclone: skbuff clone status * @ipvs_property: skbuff is owned by ipvs + * @tc_skip_classify: do not classify packet. set by IFB device + * @tc_at_ingress: used within tc_classify to distinguish in/egress + * @tc_redirected: packet was redirected by a tc action + * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect * @peeked: this packet has been seen already, so stats have been * done for it, don't do them again * @nf_trace: netfilter packet trace flag * @protocol: Packet protocol from driver * @destructor: Destruct function - * @nfct: Associated connection, if any + * @_nfct: Associated connection, if any (with nfctinfo bits) * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @skb_iif: ifindex of device we arrived on * @tc_index: Traffic control index - * @tc_verd: traffic control verdict * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices * @xmit_more: More SKBs are pending for this queue @@ -610,6 +613,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, * @wifi_acked_valid: wifi_acked was set * @wifi_acked: whether frame was acked on wifi or not * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS + * @dst_pending_confirm: need to confirm neighbour * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark @@ -668,7 +672,7 @@ struct sk_buff { struct sec_path *sp; #endif #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - struct nf_conntrack *nfct; + unsigned long _nfct; #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info *nf_bridge; @@ -721,7 +725,6 @@ struct sk_buff { __u8 pkt_type:3; __u8 pfmemalloc:1; __u8 ignore_df:1; - __u8 nfctinfo:3; __u8 nf_trace:1; __u8 ip_summed:2; @@ -740,6 +743,7 @@ struct sk_buff { __u8 csum_level:2; __u8 csum_bad:1; + __u8 dst_pending_confirm:1; #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif @@ -749,13 +753,15 @@ struct sk_buff { #ifdef CONFIG_NET_SWITCHDEV __u8 offload_fwd_mark:1; #endif - /* 2, 4 or 5 bit hole */ +#ifdef CONFIG_NET_CLS_ACT + __u8 tc_skip_classify:1; + __u8 tc_at_ingress:1; + __u8 tc_redirected:1; + __u8 tc_from_ingress:1; +#endif #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ -#ifdef CONFIG_NET_CLS_ACT - __u16 tc_verd; /* traffic control verdict */ -#endif #endif union { @@ -836,6 +842,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb) #define SKB_DST_NOREF 1UL #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) +#define SKB_NFCT_PTRMASK ~(7UL) /** * skb_dst - returns skb dst_entry * @skb: buffer @@ -2178,6 +2185,11 @@ static inline unsigned char *skb_mac_header(const struct sk_buff *skb) return skb->head + skb->mac_header; } +static inline int skb_mac_offset(const struct sk_buff *skb) +{ + return skb_mac_header(skb) - skb->data; +} + static inline int skb_mac_header_was_set(const struct sk_buff *skb) { return skb->mac_header != (typeof(skb->mac_header))~0U; @@ -3553,6 +3565,15 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, skb->csum = csum_add(skb->csum, delta); } +static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + return (void *)(skb->_nfct & SKB_NFCT_PTRMASK); +#else + return NULL; +#endif +} + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) void nf_conntrack_destroy(struct nf_conntrack *nfct); static inline void nf_conntrack_put(struct nf_conntrack *nfct) @@ -3581,8 +3602,8 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) static inline void nf_reset(struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - nf_conntrack_put(skb->nfct); - skb->nfct = NULL; + nf_conntrack_put(skb_nfct(skb)); + skb->_nfct = 0; #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(skb->nf_bridge); @@ -3602,10 +3623,8 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, bool copy) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - dst->nfct = src->nfct; - nf_conntrack_get(src->nfct); - if (copy) - dst->nfctinfo = src->nfctinfo; + dst->_nfct = src->_nfct; + nf_conntrack_get(skb_nfct(src)); #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) dst->nf_bridge = src->nf_bridge; @@ -3620,7 +3639,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - nf_conntrack_put(dst->nfct); + nf_conntrack_put(skb_nfct(dst)); #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(dst->nf_bridge); @@ -3652,9 +3671,7 @@ static inline bool skb_irq_freeable(const struct sk_buff *skb) #if IS_ENABLED(CONFIG_XFRM) !skb->sp && #endif -#if IS_ENABLED(CONFIG_NF_CONNTRACK) - !skb->nfct && -#endif + !skb_nfct(skb) && !skb->_skb_refdst && !skb_has_frag_list(skb); } @@ -3689,6 +3706,16 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) return skb->queue_mapping != 0; } +static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val) +{ + skb->dst_pending_confirm = val; +} + +static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb) +{ + return skb->dst_pending_confirm != 0; +} + static inline struct sec_path *skb_sec_path(struct sk_buff *skb) { #ifdef CONFIG_XFRM diff --git a/include/linux/slab.h b/include/linux/slab.h index 4c5363566815..3c37a8c51921 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -545,22 +545,49 @@ struct memcg_cache_array { * array to be accessed without taking any locks, on relocation we free the old * version only after a grace period. * - * Child caches will hold extra metadata needed for its operation. Fields are: + * Root and child caches hold different metadata. * - * @memcg: pointer to the memcg this cache belongs to - * @root_cache: pointer to the global, root cache, this cache was derived from + * @root_cache: Common to root and child caches. NULL for root, pointer to + * the root cache for children. * - * Both root and child caches of the same kind are linked into a list chained - * through @list. + * The following fields are specific to root caches. + * + * @memcg_caches: kmemcg ID indexed table of child caches. This table is + * used to index child cachces during allocation and cleared + * early during shutdown. + * + * @root_caches_node: List node for slab_root_caches list. + * + * @children: List of all child caches. While the child caches are also + * reachable through @memcg_caches, a child cache remains on + * this list until it is actually destroyed. + * + * The following fields are specific to child caches. + * + * @memcg: Pointer to the memcg this cache belongs to. + * + * @children_node: List node for @root_cache->children list. + * + * @kmem_caches_node: List node for @memcg->kmem_caches list. */ struct memcg_cache_params { - bool is_root_cache; - struct list_head list; + struct kmem_cache *root_cache; union { - struct memcg_cache_array __rcu *memcg_caches; + struct { + struct memcg_cache_array __rcu *memcg_caches; + struct list_head __root_caches_node; + struct list_head children; + }; struct { struct mem_cgroup *memcg; - struct kmem_cache *root_cache; + struct list_head children_node; + struct list_head kmem_caches_node; + + void (*deact_fn)(struct kmem_cache *); + union { + struct rcu_head deact_rcu_head; + struct work_struct deact_work; + }; }; }; }; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 75f56c2ef2d4..07ef550c6627 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -113,9 +113,9 @@ struct kmem_cache { #ifdef CONFIG_SYSFS #define SLAB_SUPPORTS_SYSFS -void sysfs_slab_remove(struct kmem_cache *); +void sysfs_slab_release(struct kmem_cache *); #else -static inline void sysfs_slab_remove(struct kmem_cache *s) +static inline void sysfs_slab_release(struct kmem_cache *s) { } #endif diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h new file mode 100644 index 000000000000..f423001db3a9 --- /dev/null +++ b/include/linux/soc/qcom/mdt_loader.h @@ -0,0 +1,18 @@ +#ifndef __QCOM_MDT_LOADER_H__ +#define __QCOM_MDT_LOADER_H__ + +#include <linux/types.h> + +#define QCOM_MDT_TYPE_MASK (7 << 24) +#define QCOM_MDT_TYPE_HASH (2 << 24) +#define QCOM_MDT_RELOCATABLE BIT(27) + +struct device; +struct firmware; + +ssize_t qcom_mdt_get_size(const struct firmware *fw); +int qcom_mdt_load(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, void *mem_region, + phys_addr_t mem_phys, size_t mem_size); + +#endif diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h index 7b88697929e9..b8478ee7a71f 100644 --- a/include/linux/soc/qcom/smem_state.h +++ b/include/linux/soc/qcom/smem_state.h @@ -1,7 +1,7 @@ #ifndef __QCOM_SMEM_STATE__ #define __QCOM_SMEM_STATE__ -#include <linux/errno.h> +#include <linux/err.h> struct device_node; struct qcom_smem_state; diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index d30186e2b609..49df0a01a2cc 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -7,7 +7,13 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. -*/ + * + * + * Notice: + * This is not a list of all Exynos Power Management Unit SFRs. + * There are too many of them, not mentioning subtle differences + * between SoCs. For now, put here only the used registers. + */ #ifndef __LINUX_SOC_EXYNOS_REGS_PMU_H #define __LINUX_SOC_EXYNOS_REGS_PMU_H __FILE__ @@ -38,7 +44,6 @@ #define EXYNOS_CORE_PO_RESET(n) ((1 << 4) << n) #define EXYNOS_WAKEUP_FROM_LOWPWR (1 << 28) #define EXYNOS_SWRESET 0x0400 -#define EXYNOS5440_SWRESET 0x00C4 #define S5P_WAKEUP_STAT 0x0600 #define S5P_EINT_WAKEUP_MASK 0x0604 @@ -136,12 +141,6 @@ #define EXYNOS_COMMON_OPTION(_nr) \ (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8) -#define EXYNOS_CORE_LOCAL_PWR_EN 0x3 - -#define EXYNOS_ARM_COMMON_STATUS 0x2504 -#define EXYNOS_COMMON_OPTION(_nr) \ - (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8) - #define EXYNOS_ARM_L2_CONFIGURATION 0x2600 #define EXYNOS_L2_CONFIGURATION(_nr) \ (EXYNOS_ARM_L2_CONFIGURATION + ((_nr) * 0x80)) @@ -149,17 +148,8 @@ (EXYNOS_L2_CONFIGURATION(_nr) + 0x4) #define EXYNOS_L2_OPTION(_nr) \ (EXYNOS_L2_CONFIGURATION(_nr) + 0x8) -#define EXYNOS_L2_COMMON_PWR_EN 0x3 -#define EXYNOS_ARM_CORE_X_STATUS_OFFSET 0x4 - -#define EXYNOS5_APLL_SYSCLK_CONFIGURATION 0x2A00 -#define EXYNOS5_APLL_SYSCLK_STATUS 0x2A04 - -#define EXYNOS5_ARM_L2_OPTION 0x2608 -#define EXYNOS5_USE_RETENTION BIT(4) - -#define EXYNOS5_L2RSTDISABLE_VALUE BIT(3) +#define EXYNOS_L2_USE_RETENTION BIT(4) #define S5P_PAD_RET_MAUDIO_OPTION 0x3028 #define S5P_PAD_RET_MMC2_OPTION 0x30c8 @@ -411,7 +401,6 @@ #define EXYNOS5_SATA_MEM_SYS_PWR_REG 0x11FC #define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200 #define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG 0x1204 -#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG 0x1208 #define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220 #define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG 0x1224 #define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG 0x1228 @@ -485,7 +474,6 @@ #define EXYNOS5420_SWRESET_KFC_SEL 0x3 /* Only for EXYNOS5420 */ -#define EXYNOS5420_ISP_ARM_OPTION 0x2488 #define EXYNOS5420_L2RSTDISABLE_VALUE BIT(3) #define EXYNOS5420_LPI_MASK 0x0004 @@ -494,9 +482,6 @@ #define EXYNOS5420_ATB_KFC BIT(13) #define EXYNOS5420_ATB_ISP_ARM BIT(19) #define EXYNOS5420_EMULATION BIT(31) -#define ATB_ISP_ARM BIT(12) -#define ATB_KFC BIT(13) -#define ATB_NOC BIT(14) #define EXYNOS5420_ARM_INTR_SPREAD_ENABLE 0x0100 #define EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI 0x0104 @@ -510,11 +495,6 @@ #define EXYNOS5420_KFC_CORE_RESET(_nr) \ ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr)) -#define EXYNOS5420_BB_CON1 0x0784 -#define EXYNOS5420_BB_SEL_EN BIT(31) -#define EXYNOS5420_BB_PMOS_EN BIT(7) -#define EXYNOS5420_BB_1300X 0XF - #define EXYNOS5420_ARM_CORE2_SYS_PWR_REG 0x1020 #define EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG 0x1024 #define EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG 0x1028 @@ -546,15 +526,6 @@ #define EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG 0x1178 #define EXYNOS5420_INTRAM_MEM_SYS_PWR_REG 0x11B8 #define EXYNOS5420_INTROM_MEM_SYS_PWR_REG 0x11BC -#define EXYNOS5420_ONENANDXL_MEM_SYS_PWR 0x11C0 -#define EXYNOS5420_USBDEV_MEM_SYS_PWR 0x11CC -#define EXYNOS5420_USBDEV1_MEM_SYS_PWR 0x11D0 -#define EXYNOS5420_SDMMC_MEM_SYS_PWR 0x11D4 -#define EXYNOS5420_CSSYS_MEM_SYS_PWR 0x11D8 -#define EXYNOS5420_SECSS_MEM_SYS_PWR 0x11DC -#define EXYNOS5420_ROTATOR_MEM_SYS_PWR 0x11E0 -#define EXYNOS5420_INTRAM_MEM_SYS_PWR 0x11E4 -#define EXYNOS5420_INTROM_MEM_SYS_PWR 0x11E8 #define EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1208 #define EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1210 #define EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG 0x1214 @@ -605,13 +576,7 @@ #define EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG 0x159C #define EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG 0x15A0 #define EXYNOS5420_SFR_AXI_CGDIS1 0x15E4 -#define EXYNOS_ARM_CORE2_CONFIGURATION 0x2100 -#define EXYNOS5420_ARM_CORE2_OPTION 0x2108 -#define EXYNOS_ARM_CORE3_CONFIGURATION 0x2180 -#define EXYNOS5420_ARM_CORE3_OPTION 0x2188 -#define EXYNOS5420_ARM_COMMON_STATUS 0x2504 #define EXYNOS5420_ARM_COMMON_OPTION 0x2508 -#define EXYNOS5420_KFC_COMMON_STATUS 0x2584 #define EXYNOS5420_KFC_COMMON_OPTION 0x2588 #define EXYNOS5420_LOGIC_RESET_DURATION3 0x2D1C @@ -626,33 +591,9 @@ #define EXYNOS_PAD_RET_DRAM_OPTION 0x3008 #define EXYNOS_PAD_RET_MAUDIO_OPTION 0x3028 #define EXYNOS_PAD_RET_JTAG_OPTION 0x3048 -#define EXYNOS_PAD_RET_GPIO_OPTION 0x3108 -#define EXYNOS_PAD_RET_UART_OPTION 0x3128 -#define EXYNOS_PAD_RET_MMCA_OPTION 0x3148 -#define EXYNOS_PAD_RET_MMCB_OPTION 0x3168 #define EXYNOS_PAD_RET_EBIA_OPTION 0x3188 #define EXYNOS_PAD_RET_EBIB_OPTION 0x31A8 -#define EXYNOS_PS_HOLD_CONTROL 0x330C - -/* For SYS_PWR_REG */ -#define EXYNOS_SYS_PWR_CFG BIT(0) - -#define EXYNOS5420_MFC_CONFIGURATION 0x4060 -#define EXYNOS5420_MFC_STATUS 0x4064 -#define EXYNOS5420_MFC_OPTION 0x4068 -#define EXYNOS5420_G3D_CONFIGURATION 0x4080 -#define EXYNOS5420_G3D_STATUS 0x4084 -#define EXYNOS5420_G3D_OPTION 0x4088 -#define EXYNOS5420_DISP0_CONFIGURATION 0x40A0 -#define EXYNOS5420_DISP0_STATUS 0x40A4 -#define EXYNOS5420_DISP0_OPTION 0x40A8 -#define EXYNOS5420_DISP1_CONFIGURATION 0x40C0 -#define EXYNOS5420_DISP1_STATUS 0x40C4 -#define EXYNOS5420_DISP1_OPTION 0x40C8 -#define EXYNOS5420_MAU_CONFIGURATION 0x40E0 -#define EXYNOS5420_MAU_STATUS 0x40E4 -#define EXYNOS5420_MAU_OPTION 0x40E8 #define EXYNOS5420_FSYS2_OPTION 0x4168 #define EXYNOS5420_PSGEN_OPTION 0x4188 @@ -690,4 +631,20 @@ | EXYNOS5420_KFC_USE_STANDBY_WFI2 \ | EXYNOS5420_KFC_USE_STANDBY_WFI3) +/* For EXYNOS5433 */ +#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028) +#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8) +#define EXYNOS5433_PAD_RETENTION_TOP_OPTION (0x3108) +#define EXYNOS5433_PAD_RETENTION_UART_OPTION (0x3128) +#define EXYNOS5433_PAD_RETENTION_MMC0_OPTION (0x3148) +#define EXYNOS5433_PAD_RETENTION_MMC1_OPTION (0x3168) +#define EXYNOS5433_PAD_RETENTION_EBIA_OPTION (0x3188) +#define EXYNOS5433_PAD_RETENTION_EBIB_OPTION (0x31A8) +#define EXYNOS5433_PAD_RETENTION_SPI_OPTION (0x31C8) +#define EXYNOS5433_PAD_RETENTION_MIF_OPTION (0x31E8) +#define EXYNOS5433_PAD_RETENTION_USBXTI_OPTION (0x3228) +#define EXYNOS5433_PAD_RETENTION_BOOTLDO_OPTION (0x3248) +#define EXYNOS5433_PAD_RETENTION_UFS_OPTION (0x3268) +#define EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION (0x32A8) + #endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */ diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h index 35cb9264e0d5..2b7882666ef6 100644 --- a/include/linux/soc/ti/knav_dma.h +++ b/include/linux/soc/ti/knav_dma.h @@ -41,6 +41,8 @@ #define KNAV_DMA_DESC_RETQ_SHIFT 0 #define KNAV_DMA_DESC_RETQ_MASK MASK(14) #define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22) +#define KNAV_DMA_DESC_EFLAGS_MASK MASK(4) +#define KNAV_DMA_DESC_EFLAGS_SHIFT 20 #define KNAV_DMA_NUM_EPIB_WORDS 4 #define KNAV_DMA_NUM_PS_WORDS 16 diff --git a/include/linux/socket.h b/include/linux/socket.h index b5cc5a6d7011..082027457825 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -92,9 +92,9 @@ struct cmsghdr { #define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) ) -#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + CMSG_ALIGN(sizeof(struct cmsghdr)))) -#define CMSG_SPACE(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + CMSG_ALIGN(len)) -#define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + (len)) +#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + sizeof(struct cmsghdr))) +#define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len)) +#define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len)) #define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \ (struct cmsghdr *)(ctl) : \ @@ -202,8 +202,12 @@ struct ucred { #define AF_VSOCK 40 /* vSockets */ #define AF_KCM 41 /* Kernel Connection Multiplexor*/ #define AF_QIPCRTR 42 /* Qualcomm IPC Router */ +#define AF_SMC 43 /* smc sockets: reserve number for + * PF_SMC protocol family that + * reuses AF_INET address family + */ -#define AF_MAX 43 /* For now.. */ +#define AF_MAX 44 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -251,6 +255,7 @@ struct ucred { #define PF_VSOCK AF_VSOCK #define PF_KCM AF_KCM #define PF_QIPCRTR AF_QIPCRTR +#define PF_SMC AF_SMC #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ diff --git a/include/linux/spi/flash.h b/include/linux/spi/flash.h index 3f22932e67a4..f4199e758f97 100644 --- a/include/linux/spi/flash.h +++ b/include/linux/spi/flash.h @@ -7,7 +7,7 @@ struct mtd_partition; * struct flash_platform_data: board-specific flash data * @name: optional flash device name (eg, as used with mtdparts=) * @parts: optional array of mtd_partitions for static partitioning - * @nr_parts: number of mtd_partitions for static partitoning + * @nr_parts: number of mtd_partitions for static partitioning * @type: optional flash device type (e.g. m25p80 vs m25p64), for use * with chips that can't be queried for JEDEC or other IDs * diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h deleted file mode 100644 index 563b3b1799a8..000000000000 --- a/include/linux/spi/tsc2005.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * This file is part of TSC2005 touchscreen driver - * - * Copyright (C) 2009-2010 Nokia Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _LINUX_SPI_TSC2005_H -#define _LINUX_SPI_TSC2005_H - -#include <linux/types.h> - -struct tsc2005_platform_data { - int ts_pressure_max; - int ts_pressure_fudge; - int ts_x_max; - int ts_x_fudge; - int ts_y_max; - int ts_y_fudge; - int ts_x_plate_ohm; - unsigned int esd_timeout_ms; - void (*set_reset)(bool enable); -}; - -#endif diff --git a/include/linux/sram.h b/include/linux/sram.h new file mode 100644 index 000000000000..c97dcbe8ce25 --- /dev/null +++ b/include/linux/sram.h @@ -0,0 +1,27 @@ +/* + * Generic SRAM Driver Interface + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __LINUX_SRAM_H__ +#define __LINUX_SRAM_H__ + +struct gen_pool; + +#ifdef CONFIG_SRAM_EXEC +int sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size); +#else +static inline int sram_exec_copy(struct gen_pool *pool, void *dst, void *src, + size_t size) +{ + return -ENODEV; +} +#endif /* CONFIG_SRAM_EXEC */ +#endif /* __LINUX_SRAM_H__ */ diff --git a/include/linux/stat.h b/include/linux/stat.h index 075cb0c7eb2a..c76e524fb34b 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -18,20 +18,32 @@ #include <linux/time.h> #include <linux/uidgid.h> +#define KSTAT_QUERY_FLAGS (AT_STATX_SYNC_TYPE) + struct kstat { - u64 ino; - dev_t dev; + u32 result_mask; /* What fields the user got */ umode_t mode; unsigned int nlink; + uint32_t blksize; /* Preferred I/O size */ + u64 attributes; +#define KSTAT_ATTR_FS_IOC_FLAGS \ + (STATX_ATTR_COMPRESSED | \ + STATX_ATTR_IMMUTABLE | \ + STATX_ATTR_APPEND | \ + STATX_ATTR_NODUMP | \ + STATX_ATTR_ENCRYPTED \ + )/* Attrs corresponding to FS_*_FL flags */ + u64 ino; + dev_t dev; + dev_t rdev; kuid_t uid; kgid_t gid; - dev_t rdev; loff_t size; - struct timespec atime; + struct timespec atime; struct timespec mtime; struct timespec ctime; - unsigned long blksize; - unsigned long long blocks; + struct timespec btime; /* File creation time */ + u64 blocks; }; #endif diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 266dab9ad782..fc273e9d5f67 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -103,7 +103,6 @@ struct stmmac_axi { u32 axi_wr_osr_lmt; u32 axi_rd_osr_lmt; bool axi_kbbe; - bool axi_axi_all; u32 axi_blen[AXI_BLEN]; bool axi_fb; bool axi_mb; @@ -135,13 +134,18 @@ struct plat_stmmacenet_data { int tx_fifo_size; int rx_fifo_size; void (*fix_mac_speed)(void *priv, unsigned int speed); - void (*bus_setup)(void __iomem *ioaddr); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); void *bsp_priv; + struct clk *stmmac_clk; + struct clk *pclk; + struct clk *clk_ptp_ref; + unsigned int clk_ptp_rate; + struct reset_control *stmmac_rst; struct stmmac_axi *axi; int has_gmac4; bool tso_en; int mac_port_sel_speed; + bool en_tx_lpi_clockgating; }; #endif diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index b1bc62ba20a2..8fd3504946ad 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -32,6 +32,7 @@ */ #define UNX_MAXNODENAME __NEW_UTS_LEN #define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME)) +#define UNX_NGROUPS 16 struct rpcsec_gss_info; @@ -63,9 +64,6 @@ struct rpc_cred { struct rcu_head cr_rcu; struct rpc_auth * cr_auth; const struct rpc_credops *cr_ops; -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) - unsigned long cr_magic; /* 0x0f4aa4f0 */ -#endif unsigned long cr_expire; /* when to gc */ unsigned long cr_flags; /* various flags */ atomic_t cr_count; /* ref count */ @@ -79,8 +77,6 @@ struct rpc_cred { #define RPCAUTH_CRED_HASHED 2 #define RPCAUTH_CRED_NEGATIVE 3 -#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 - /* rpc_auth au_flags */ #define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */ diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 8a511c0985aa..270bad0e1bed 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -63,15 +63,6 @@ struct cache_head { #define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ -struct cache_detail_procfs { - struct proc_dir_entry *proc_ent; - struct proc_dir_entry *flush_ent, *channel_ent, *content_ent; -}; - -struct cache_detail_pipefs { - struct dentry *dir; -}; - struct cache_detail { struct module * owner; int hash_size; @@ -123,9 +114,9 @@ struct cache_detail { time_t last_warn; /* when we last warned about no readers */ union { - struct cache_detail_procfs procfs; - struct cache_detail_pipefs pipefs; - } u; + struct proc_dir_entry *procfs; + struct dentry *pipefs; + }; struct net *net; }; @@ -204,8 +195,11 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd) kref_put(&h->ref, cd->cache_put); } -static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) +static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) { + if (!test_bit(CACHE_VALID, &h->flags)) + return false; + return (h->expiry_time < seconds_since_boot()) || (detail->flush_time >= h->last_refresh); } @@ -227,6 +221,7 @@ extern void sunrpc_destroy_cache_detail(struct cache_detail *cd); extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, umode_t, struct cache_detail *); extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); +extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *); /* Must store cache_detail in seq_file->private if using next three functions */ extern void *cache_seq_start(struct seq_file *file, loff_t *pos); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 333ad11b3dd9..6095ecba0dde 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -182,7 +182,6 @@ int rpc_protocol(struct rpc_clnt *); struct net * rpc_net_ns(struct rpc_clnt *); size_t rpc_max_payload(struct rpc_clnt *); size_t rpc_max_bc_payload(struct rpc_clnt *); -unsigned long rpc_get_timeout(struct rpc_clnt *clnt); void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); @@ -202,8 +201,9 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *, struct rpc_xprt *, void *), void *data); -void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, - unsigned long timeo); +void rpc_set_connect_timeout(struct rpc_clnt *clnt, + unsigned long connect_timeout, + unsigned long reconnect_timeout); int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *, struct rpc_xprt_switch *, diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 59a7889e15db..8da0f37f3bdc 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -20,33 +20,55 @@ extern unsigned int nfsd_debug; extern unsigned int nlm_debug; #endif -#define dprintk(args...) dfprintk(FACILITY, ## args) -#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) +#define dprintk(fmt, ...) \ + dfprintk(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_cont(fmt, ...) \ + dfprintk_cont(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_rcu(fmt, ...) \ + dfprintk_rcu(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_rcu_cont(fmt, ...) \ + dfprintk_rcu_cont(FACILITY, fmt, ##__VA_ARGS__) #undef ifdebug #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) -# define dfprintk(fac, args...) \ - do { \ - ifdebug(fac) \ - printk(KERN_DEFAULT args); \ - } while (0) - -# define dfprintk_rcu(fac, args...) \ - do { \ - ifdebug(fac) { \ - rcu_read_lock(); \ - printk(KERN_DEFAULT args); \ - rcu_read_unlock(); \ - } \ - } while (0) +# define dfprintk(fac, fmt, ...) \ +do { \ + ifdebug(fac) \ + printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ +} while (0) + +# define dfprintk_cont(fac, fmt, ...) \ +do { \ + ifdebug(fac) \ + printk(KERN_CONT fmt, ##__VA_ARGS__); \ +} while (0) + +# define dfprintk_rcu(fac, fmt, ...) \ +do { \ + ifdebug(fac) { \ + rcu_read_lock(); \ + printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ + rcu_read_unlock(); \ + } \ +} while (0) + +# define dfprintk_rcu_cont(fac, fmt, ...) \ +do { \ + ifdebug(fac) { \ + rcu_read_lock(); \ + printk(KERN_CONT fmt, ##__VA_ARGS__); \ + rcu_read_unlock(); \ + } \ +} while (0) # define RPC_IFDEBUG(x) x #else # define ifdebug(fac) if (0) -# define dfprintk(fac, args...) do {} while (0) -# define dfprintk_rcu(fac, args...) do {} while (0) +# define dfprintk(fac, fmt, ...) do {} while (0) +# define dfprintk_cont(fac, fmt, ...) do {} while (0) +# define dfprintk_rcu(fac, fmt, ...) do {} while (0) # define RPC_IFDEBUG(x) #endif diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index cfda6adcf33c..245fc59b7324 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -110,6 +110,15 @@ struct rpcrdma_msg { }; /* + * XDR sizes, in quads + */ +enum { + rpcrdma_fixed_maxsz = 4, + rpcrdma_segment_maxsz = 4, + rpcrdma_readchunk_maxsz = 2 + rpcrdma_segment_maxsz, +}; + +/* * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks */ #define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 7321ae933867..e770abeed32d 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -400,10 +400,14 @@ struct svc_version { struct svc_procedure * vs_proc; /* per-procedure info */ u32 vs_xdrsize; /* xdrsize needed for this version */ - unsigned int vs_hidden : 1, /* Don't register with portmapper. - * Only used for nfsacl so far. */ - vs_rpcb_optnl:1;/* Don't care the result of register. - * Only used for nfsv4. */ + /* Don't register with rpcbind */ + bool vs_hidden; + + /* Don't care if the rpcbind registration fails */ + bool vs_rpcb_optnl; + + /* Need xprt with congestion control */ + bool vs_need_cong_ctrl; /* Override dispatch function (e.g. when caching replies). * A return value of 0 means drop the request. diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 757fb963696c..b105f73e3ca2 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -70,7 +70,7 @@ extern atomic_t rdma_stat_sq_prod; * completes. */ struct svc_rdma_op_ctxt { - struct list_head free; + struct list_head list; struct svc_rdma_op_ctxt *read_hdr; struct svc_rdma_fastreg_mr *frmr; int hdr_count; @@ -78,7 +78,6 @@ struct svc_rdma_op_ctxt { struct ib_cqe cqe; struct ib_cqe reg_cqe; struct ib_cqe inv_cqe; - struct list_head dto_q; u32 byte_len; u32 position; struct svcxprt_rdma *xprt; @@ -141,7 +140,8 @@ struct svcxprt_rdma { atomic_t sc_sq_avail; /* SQEs ready to be consumed */ unsigned int sc_sq_depth; /* Depth of SQ */ unsigned int sc_rq_depth; /* Depth of RQ */ - u32 sc_max_requests; /* Forward credits */ + __be32 sc_fc_credits; /* Forward credits */ + u32 sc_max_requests; /* Max requests */ u32 sc_max_bc_requests;/* Backward credits */ int sc_max_req_size; /* Size of each RQ WR buf */ @@ -171,7 +171,6 @@ struct svcxprt_rdma { wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */ unsigned long sc_flags; - struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */ struct list_head sc_read_complete_q; struct work_struct sc_work; }; @@ -214,11 +213,7 @@ extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, __be32, __be64, u32); -extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *, - struct rpcrdma_msg *, - struct rpcrdma_msg *, - enum rpcrdma_proc); -extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *); +extern unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp); /* svc_rdma_recvfrom.c */ extern int svc_rdma_recvfrom(struct svc_rqst *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 7440290f64ac..ddb7f94a9d06 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -67,6 +67,7 @@ struct svc_xprt { #define XPT_CACHE_AUTH 11 /* cache auth info */ #define XPT_LOCAL 12 /* connection from loopback interface */ #define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */ +#define XPT_CONG_CTRL 14 /* has congestion control */ struct svc_serv *xpt_server; /* service for transport */ atomic_t xpt_reserved; /* space on outq that is rsvd */ diff --git a/include/linux/sunrpc/types.h b/include/linux/sunrpc/types.h index d222f47550af..11a7536c0fd2 100644 --- a/include/linux/sunrpc/types.h +++ b/include/linux/sunrpc/types.h @@ -10,6 +10,7 @@ #define _LINUX_SUNRPC_TYPES_H_ #include <linux/timer.h> +#include <linux/sched/signal.h> #include <linux/workqueue.h> #include <linux/sunrpc/debug.h> #include <linux/list.h> diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 56c48c884a24..054c8cde18f3 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -242,6 +242,185 @@ extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); +ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, + size_t maxlen, gfp_t gfp_flags); +/** + * xdr_align_size - Calculate padded size of an object + * @n: Size of an object being XDR encoded (in bytes) + * + * Return value: + * Size (in bytes) of the object including xdr padding + */ +static inline size_t +xdr_align_size(size_t n) +{ + const size_t mask = sizeof(__u32) - 1; + + return (n + mask) & ~mask; +} + +/** + * xdr_stream_encode_u32 - Encode a 32-bit integer + * @xdr: pointer to xdr_stream + * @n: integer to encode + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n) +{ + const size_t len = sizeof(n); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + *p = cpu_to_be32(n); + return len; +} + +/** + * xdr_stream_encode_u64 - Encode a 64-bit integer + * @xdr: pointer to xdr_stream + * @n: 64-bit integer to encode + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n) +{ + const size_t len = sizeof(n); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_hyper(p, n); + return len; +} + +/** + * xdr_stream_encode_opaque_fixed - Encode fixed length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: pointer to opaque data object + * @len: size of object pointed to by @ptr + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_opaque_fixed(struct xdr_stream *xdr, const void *ptr, size_t len) +{ + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_opaque_fixed(p, ptr, len); + return xdr_align_size(len); +} + +/** + * xdr_stream_encode_opaque - Encode variable length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: pointer to opaque data object + * @len: size of object pointed to by @ptr + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_opaque(struct xdr_stream *xdr, const void *ptr, size_t len) +{ + size_t count = sizeof(__u32) + xdr_align_size(len); + __be32 *p = xdr_reserve_space(xdr, count); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_opaque(p, ptr, len); + return count; +} + +/** + * xdr_stream_decode_u32 - Decode a 32-bit integer + * @xdr: pointer to xdr_stream + * @ptr: location to store integer + * + * Return values: + * %0 on success + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr) +{ + const size_t count = sizeof(*ptr); + __be32 *p = xdr_inline_decode(xdr, count); + + if (unlikely(!p)) + return -EBADMSG; + *ptr = be32_to_cpup(p); + return 0; +} + +/** + * xdr_stream_decode_opaque_fixed - Decode fixed length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: location to store data + * @len: size of buffer pointed to by @ptr + * + * Return values: + * On success, returns size of object stored in @ptr + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_opaque_fixed(struct xdr_stream *xdr, void *ptr, size_t len) +{ + __be32 *p = xdr_inline_decode(xdr, len); + + if (unlikely(!p)) + return -EBADMSG; + xdr_decode_opaque_fixed(p, ptr, len); + return len; +} + +/** + * xdr_stream_decode_opaque_inline - Decode variable length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: location to store pointer to opaque data + * @maxlen: maximum acceptable object size + * + * Note: the pointer stored in @ptr cannot be assumed valid after the XDR + * buffer has been destroyed, or even after calling xdr_inline_decode() + * on @xdr. It is therefore expected that the object it points to should + * be processed immediately. + * + * Return values: + * On success, returns size of object stored in *@ptr + * %-EBADMSG on XDR buffer overflow + * %-EMSGSIZE if the size of the object would exceed @maxlen + */ +static inline ssize_t +xdr_stream_decode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t maxlen) +{ + __be32 *p; + __u32 len; + + *ptr = NULL; + if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) + return -EBADMSG; + if (len != 0) { + p = xdr_inline_decode(xdr, len); + if (unlikely(!p)) + return -EBADMSG; + if (unlikely(len > maxlen)) + return -EMSGSIZE; + *ptr = p; + } + return len; +} #endif /* __KERNEL__ */ #endif /* _SUNRPC_XDR_H_ */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index a5da60b24d83..eab1c749e192 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -137,6 +137,9 @@ struct rpc_xprt_ops { void (*release_request)(struct rpc_task *task); void (*close)(struct rpc_xprt *xprt); void (*destroy)(struct rpc_xprt *xprt); + void (*set_connect_timeout)(struct rpc_xprt *xprt, + unsigned long connect_timeout, + unsigned long reconnect_timeout); void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); int (*enable_swap)(struct rpc_xprt *xprt); void (*disable_swap)(struct rpc_xprt *xprt); @@ -221,6 +224,7 @@ struct rpc_xprt { struct timer_list timer; unsigned long last_used, idle_timeout, + connect_timeout, max_reconnect_timeout; /* diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index bef3fb0abb8f..c9959d7e3579 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -55,6 +55,8 @@ struct sock_xprt { size_t rcvsize, sndsize; + struct rpc_timeout tcp_timeout; + /* * Saved socket callback addresses */ @@ -81,6 +83,7 @@ struct sock_xprt { #define XPRT_SOCK_CONNECTING 1U #define XPRT_SOCK_DATA_READY (2) +#define XPRT_SOCK_UPD_TIMEOUT (3) #endif /* __KERNEL__ */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 7f47b7098b1b..45e91dd6716d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -27,6 +27,7 @@ struct bio; #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ SWAP_FLAG_DISCARD_PAGES) +#define SWAP_BATCH 64 static inline int current_is_kswapd(void) { @@ -176,6 +177,12 @@ enum { * protected by swap_info_struct.lock. */ struct swap_cluster_info { + spinlock_t lock; /* + * Protect swap_cluster_info fields + * and swap_info_struct->swap_map + * elements correspond to the swap + * cluster + */ unsigned int data:24; unsigned int flags:8; }; @@ -337,8 +344,13 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *, sector_t *); /* linux/mm/swap_state.c */ -extern struct address_space swapper_spaces[]; -#define swap_address_space(entry) (&swapper_spaces[swp_type(entry)]) +/* One swap address space for each 64M swap space */ +#define SWAP_ADDRESS_SPACE_SHIFT 14 +#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) +extern struct address_space *swapper_spaces[]; +#define swap_address_space(entry) \ + (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ + >> SWAP_ADDRESS_SPACE_SHIFT]) extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); extern int add_to_swap(struct page *, struct list_head *list); @@ -360,6 +372,7 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t, /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; extern long total_swap_pages; +extern bool has_usable_swap(void); /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) @@ -375,23 +388,31 @@ static inline long get_nr_swap_pages(void) extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); extern swp_entry_t get_swap_page_of_type(int); +extern int get_swap_pages(int n, swp_entry_t swp_entries[]); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); extern void swapcache_free(swp_entry_t); +extern void swapcache_free_entries(swp_entry_t *entries, int n); extern int free_swap_and_cache(swp_entry_t); extern int swap_type_of(dev_t, sector_t, struct block_device **); extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); extern int page_swapcount(struct page *); +extern int __swp_swapcount(swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); extern bool reuse_swap_page(struct page *, int *); extern int try_to_free_swap(struct page *); struct backing_dev_info; +extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); +extern void exit_swap_address_space(unsigned int type); + +extern int get_swap_slots(int n, swp_entry_t *slots); +extern void swapcache_free_batch(swp_entry_t *entries, int n); #else /* CONFIG_SWAP */ @@ -479,6 +500,11 @@ static inline int page_swapcount(struct page *page) return 0; } +static inline int __swp_swapcount(swp_entry_t entry) +{ + return 0; +} + static inline int swp_swapcount(swp_entry_t entry) { return 0; diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h new file mode 100644 index 000000000000..6ef92d17633d --- /dev/null +++ b/include/linux/swap_slots.h @@ -0,0 +1,30 @@ +#ifndef _LINUX_SWAP_SLOTS_H +#define _LINUX_SWAP_SLOTS_H + +#include <linux/swap.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> + +#define SWAP_SLOTS_CACHE_SIZE SWAP_BATCH +#define THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE (5*SWAP_SLOTS_CACHE_SIZE) +#define THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE (2*SWAP_SLOTS_CACHE_SIZE) + +struct swap_slots_cache { + bool lock_initialized; + struct mutex alloc_lock; /* protects slots, nr, cur */ + swp_entry_t *slots; + int nr; + int cur; + spinlock_t free_lock; /* protects slots_ret, n_ret */ + swp_entry_t *slots_ret; + int n_ret; +}; + +void disable_swap_slots_cache_lock(void); +void reenable_swap_slots_cache_unlock(void); +int enable_swap_slots_cache(void); +int free_swap_slot(swp_entry_t entry); + +extern bool swap_slot_cache_enabled; + +#endif /* _LINUX_SWAP_SLOTS_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 91a740f6b884..980c3c9b06f8 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -48,6 +48,7 @@ struct stat; struct stat64; struct statfs; struct statfs64; +struct statx; struct __sysctl_args; struct sysinfo; struct timespec; @@ -902,5 +903,7 @@ asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len, unsigned long prot, int pkey); asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val); asmlinkage long sys_pkey_free(int pkey); +asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags, + unsigned mask, struct statx __user *buffer); #endif diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index adf4e51cf597..b7e82049fec7 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -143,6 +143,7 @@ struct ctl_table_header struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; + struct list_head inodes; /* head for proc_inode->sysctl_inodes */ }; struct ctl_dir { diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h index 58de6edf751f..e2a5daf8d14f 100644 --- a/include/linux/taskstats_kern.h +++ b/include/linux/taskstats_kern.h @@ -8,7 +8,7 @@ #define _LINUX_TASKSTATS_KERN_H #include <linux/taskstats.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #ifdef CONFIG_TASKSTATS diff --git a/include/linux/tcp.h b/include/linux/tcp.h index c93f4b3a59cb..cfc2d9506ce8 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -212,6 +212,8 @@ struct tcp_sock { /* Information of the most recently (s)acked skb */ struct tcp_rack { struct skb_mstamp mstamp; /* (Re)sent time of the skb */ + u32 rtt_us; /* Associated RTT */ + u32 end_seq; /* Ending TCP sequence of the skb */ u8 advanced; /* mstamp advanced since last lost marking */ u8 reord; /* reordering detected */ } rack; @@ -220,15 +222,15 @@ struct tcp_sock { u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ u8 chrono_type:2, /* current chronograph type */ rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ - unused:5; + fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ + unused:4; u8 nonagle : 4,/* Disable Nagle algorithm? */ thin_lto : 1,/* Use linear timeouts for thin streams */ - thin_dupack : 1,/* Fast retransmit on first dupack */ + unused1 : 1, repair : 1, frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */ u8 repair_queue; - u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ - syn_data:1, /* SYN includes data */ + u8 syn_data:1, /* SYN includes data */ syn_fastopen:1, /* SYN includes Fast Open option */ syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ @@ -310,7 +312,6 @@ struct tcp_sock { */ int lost_cnt_hint; - u32 retransmit_high; /* L-bits may be on up to this seqno */ u32 prior_ssthresh; /* ssthresh saved at recovery start */ u32 high_seq; /* snd_nxt at onset of congestion */ @@ -444,4 +445,13 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); +static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) +{ + /* We use READ_ONCE() here because socket might not be locked. + * This happens for listeners. + */ + u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); + + return (user_mss && user_mss < mss) ? user_mss : mss; +} #endif /* _LINUX_TCP_H */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index e275e98bdceb..dab11f97e1c6 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -194,7 +194,7 @@ struct thermal_attr { * @governor: pointer to the governor for this thermal zone * @governor_data: private pointer for governor data * @thermal_instances: list of &struct thermal_instance of this thermal zone - * @idr: &struct idr to generate unique id for this zone's cooling + * @ida: &struct ida to generate unique id for this zone's cooling * devices * @lock: lock to protect thermal_instances list * @node: node in thermal_tz_list (in thermal_core.c) @@ -227,7 +227,7 @@ struct thermal_zone_device { struct thermal_governor *governor; void *governor_data; struct list_head thermal_instances; - struct idr idr; + struct ida ida; struct mutex lock; struct list_head node; struct delayed_work poll_queue; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index d2e804e15c3e..b598cbc7b576 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -8,6 +8,10 @@ void timekeeping_init(void); extern int timekeeping_suspended; +/* Architecture timer tick functions: */ +extern void update_process_times(int user); +extern void xtime_update(unsigned long ticks); + /* * Get and set timeofday */ diff --git a/include/linux/timer.h b/include/linux/timer.h index 5a209b84fd9e..e6789b8757d5 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -61,6 +61,8 @@ struct timer_list { #define TIMER_ARRAYSHIFT 22 #define TIMER_ARRAYMASK 0xFFC00000 +#define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE) + #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ .entry = { .next = TIMER_ENTRY_STATIC }, \ .function = (_function), \ @@ -210,7 +212,7 @@ struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -#include <linux/sysctl.h> +struct ctl_table; extern unsigned int sysctl_timer_migration; int timer_migration_handler(struct ctl_table *table, int write, diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index be007610ceb0..0af63c4381b9 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -23,6 +23,10 @@ const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val, const struct trace_print_flags *symbol_array); #if BITS_PER_LONG == 32 +const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim, + unsigned long long flags, + const struct trace_print_flags_u64 *flag_array); + const char *trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, const struct trace_print_flags_u64 @@ -33,7 +37,8 @@ const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, unsigned int bitmask_size); const char *trace_print_hex_seq(struct trace_seq *p, - const unsigned char *buf, int len); + const unsigned char *buf, int len, + bool concatenate); const char *trace_print_array_seq(struct trace_seq *p, const void *buf, int count, diff --git a/include/linux/tty.h b/include/linux/tty.h index 40144f382516..1017e904c0a3 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -217,12 +217,18 @@ struct tty_port_operations { /* Called on the final put of a port */ void (*destruct)(struct tty_port *port); }; - + +struct tty_port_client_operations { + int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t); + void (*write_wakeup)(struct tty_port *port); +}; + struct tty_port { struct tty_bufhead buf; /* Locked internally */ struct tty_struct *tty; /* Back pointer */ struct tty_struct *itty; /* internal back ptr */ const struct tty_port_operations *ops; /* Port operations */ + const struct tty_port_client_operations *client_ops; /* Port client operations */ spinlock_t lock; /* Lock protecting tty field */ int blocked_open; /* Waiting to open */ int count; /* Usage count */ @@ -241,6 +247,7 @@ struct tty_port { based drain is needed else set to size of fifo */ struct kref kref; /* Ref counter */ + void *client_data; }; /* tty_port::iflags bits -- use atomic bit ops */ @@ -528,6 +535,7 @@ extern int tty_alloc_file(struct file *file); extern void tty_add_file(struct tty_struct *tty, struct file *file); extern void tty_free_file(struct file *file); extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); +extern void tty_release_struct(struct tty_struct *tty, int idx); extern int tty_release(struct inode *inode, struct file *filp); extern void tty_init_termios(struct tty_struct *tty); extern int tty_standard_install(struct tty_driver *driver, @@ -656,7 +664,7 @@ extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); extern void tty_ldisc_release(struct tty_struct *tty); extern void tty_ldisc_init(struct tty_struct *tty); extern void tty_ldisc_deinit(struct tty_struct *tty); -extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p, +extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, char *f, int count); /* n_tty.c */ diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index 5dd75fa47dd8..c5fdfcf99828 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -12,16 +12,18 @@ struct ci_hdrc; /** * struct ci_hdrc_cable - structure for external connector cable state tracking - * @state: current state of the line + * @connected: true if cable is connected, false otherwise * @changed: set to true when extcon event happen + * @enabled: set to true if we've enabled the vbus or id interrupt * @edev: device which generate events * @ci: driver state of the chipidea device * @nb: hold event notification callback * @conn: used for notification registration */ struct ci_hdrc_cable { - bool state; + bool connected; bool changed; + bool enabled; struct extcon_dev *edev; struct ci_hdrc *ci; struct notifier_block nb; @@ -55,10 +57,11 @@ struct ci_hdrc_platform_data { #define CI_HDRC_OVERRIDE_AHB_BURST BIT(9) #define CI_HDRC_OVERRIDE_TX_BURST BIT(10) #define CI_HDRC_OVERRIDE_RX_BURST BIT(11) +#define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */ enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 - void (*notify_event) (struct ci_hdrc *ci, unsigned event); + int (*notify_event) (struct ci_hdrc *ci, unsigned event); struct regulator *reg_vbus; struct usb_otg_caps ci_otg_caps; bool tpl_support; diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index eb209d4523f5..32354b4b4b2b 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -5,6 +5,9 @@ #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/sched.h> +#include <linux/workqueue.h> +#include <linux/rwsem.h> +#include <linux/sysctl.h> #include <linux/err.h> #define UID_GID_MAP_MAX_EXTENTS 5 @@ -32,6 +35,10 @@ enum ucount_type { UCOUNT_NET_NAMESPACES, UCOUNT_MNT_NAMESPACES, UCOUNT_CGROUP_NAMESPACES, +#ifdef CONFIG_INOTIFY_USER + UCOUNT_INOTIFY_INSTANCES, + UCOUNT_INOTIFY_WATCHES, +#endif UCOUNT_COUNTS, }; @@ -65,7 +72,7 @@ struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; - atomic_t count; + int count; atomic_t ucount[UCOUNT_COUNTS]; }; diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 11b92b047a1e..48a3483dccb1 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -52,6 +52,25 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); } +extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); +extern void dup_userfaultfd_complete(struct list_head *); + +extern void mremap_userfaultfd_prep(struct vm_area_struct *, + struct vm_userfaultfd_ctx *); +extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, + unsigned long from, unsigned long to, + unsigned long len); + +extern bool userfaultfd_remove(struct vm_area_struct *vma, + unsigned long start, + unsigned long end); + +extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct list_head *uf); +extern void userfaultfd_unmap_complete(struct mm_struct *mm, + struct list_head *uf); + #else /* CONFIG_USERFAULTFD */ /* mm helpers */ @@ -76,6 +95,47 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) return false; } +static inline int dup_userfaultfd(struct vm_area_struct *vma, + struct list_head *l) +{ + return 0; +} + +static inline void dup_userfaultfd_complete(struct list_head *l) +{ +} + +static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma, + struct vm_userfaultfd_ctx *ctx) +{ +} + +static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, + unsigned long from, + unsigned long to, + unsigned long len) +{ +} + +static inline bool userfaultfd_remove(struct vm_area_struct *vma, + unsigned long start, + unsigned long end) +{ + return true; +} + +static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct list_head *uf) +{ + return 0; +} + +static inline void userfaultfd_unmap_complete(struct mm_struct *mm, + struct list_head *uf) +{ +} + #endif /* CONFIG_USERFAULTFD */ #endif /* _LINUX_USERFAULTFD_K_H */ diff --git a/include/linux/uuid.h b/include/linux/uuid.h index 2d095fc60204..4dff73a89758 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h @@ -19,6 +19,30 @@ #include <uapi/linux/uuid.h> /* + * V1 (time-based) UUID definition [RFC 4122]. + * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns + * increments since midnight 15th October 1582 + * - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID + * time + * - the clock sequence is a 14-bit counter to avoid duplicate times + */ +struct uuid_v1 { + __be32 time_low; /* low part of timestamp */ + __be16 time_mid; /* mid part of timestamp */ + __be16 time_hi_and_version; /* high part of timestamp and version */ +#define UUID_TO_UNIX_TIME 0x01b21dd213814000ULL +#define UUID_TIMEHI_MASK 0x0fff +#define UUID_VERSION_TIME 0x1000 /* time-based UUID */ +#define UUID_VERSION_NAME 0x3000 /* name-based UUID */ +#define UUID_VERSION_RANDOM 0x4000 /* (pseudo-)random generated UUID */ + u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */ +#define UUID_CLOCKHI_MASK 0x3f +#define UUID_VARIANT_STD 0x80 + u8 clock_seq_low; /* clock seq low */ + u8 node[6]; /* spatially unique node ID (MAC addr) */ +}; + +/* * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee") * not including trailing NUL. */ diff --git a/include/linux/virtio.h b/include/linux/virtio.h index d5eb5479a425..04b0d3f95043 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -132,12 +132,16 @@ static inline struct virtio_device *dev_to_virtio(struct device *_dev) return container_of(_dev, struct virtio_device, dev); } +void virtio_add_status(struct virtio_device *dev, unsigned int status); int register_virtio_device(struct virtio_device *dev); void unregister_virtio_device(struct virtio_device *dev); void virtio_break_device(struct virtio_device *dev); void virtio_config_changed(struct virtio_device *dev); +void virtio_config_disable(struct virtio_device *dev); +void virtio_config_enable(struct virtio_device *dev); +int virtio_finalize_features(struct virtio_device *dev); #ifdef CONFIG_PM_SLEEP int virtio_device_freeze(struct virtio_device *dev); int virtio_device_restore(struct virtio_device *dev); diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 26c155bb639b..8355bab175e1 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -7,6 +7,8 @@ #include <linux/virtio_byteorder.h> #include <uapi/linux/virtio_config.h> +struct irq_affinity; + /** * virtio_config_ops - operations for configuring a virtio device * @get: read the value of a configuration field @@ -56,6 +58,7 @@ * This returns a pointer to the bus name a la pci_name from which * the caller can then copy. * @set_vq_affinity: set the affinity for a virtqueue. + * @get_vq_affinity: get the affinity for a virtqueue (optional). */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { @@ -68,14 +71,15 @@ struct virtio_config_ops { void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); int (*find_vqs)(struct virtio_device *, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]); + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc); void (*del_vqs)(struct virtio_device *); u64 (*get_features)(struct virtio_device *vdev); int (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, int cpu); + const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, + int index); }; /* If driver didn't advertise the feature, it will never appear. */ @@ -169,7 +173,7 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *callbacks[] = { c }; const char *names[] = { n }; struct virtqueue *vq; - int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); + int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL); if (err < 0) return ERR_PTR(err); return vq; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 4d6ec58a8d45..a80b7b59cf33 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -56,6 +56,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, COMPACTISOLATED, COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, KCOMPACTD_WAKE, + KCOMPACTD_MIGRATE_SCANNED, KCOMPACTD_FREE_SCANNED, #endif #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, @@ -78,6 +79,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_SPLIT_PAGE_FAILED, THP_DEFERRED_SPLIT_PAGE, THP_SPLIT_PMD, +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + THP_SPLIT_PUD, +#endif THP_ZERO_PAGE_ALLOC, THP_ZERO_PAGE_ALLOC_FAILED, #endif diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index c3fa0fd43949..1081db987391 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h @@ -12,7 +12,7 @@ static inline void vmacache_flush(struct task_struct *tsk) { - memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); + memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); } extern void vmacache_flush_all(struct mm_struct *mm); diff --git a/include/linux/vme.h b/include/linux/vme.h index 8c589176c2f8..ec5e8bf6118e 100644 --- a/include/linux/vme.h +++ b/include/linux/vme.h @@ -108,7 +108,6 @@ struct vme_dev { }; struct vme_driver { - struct list_head node; const char *name; int (*match)(struct vme_dev *); int (*probe)(struct vme_dev *); diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 1bd31a38c51e..b724ef7005de 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -54,13 +54,6 @@ #define VMCI_IMR_DATAGRAM 0x1 #define VMCI_IMR_NOTIFICATION 0x2 -/* Interrupt type. */ -enum { - VMCI_INTR_TYPE_INTX = 0, - VMCI_INTR_TYPE_MSI = 1, - VMCI_INTR_TYPE_MSIX = 2, -}; - /* Maximum MSI/MSI-X interrupt vectors in the device. */ #define VMCI_MAX_INTRS 2 diff --git a/include/linux/wait.h b/include/linux/wait.h index 1421132e9086..db076ca7f11d 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -6,6 +6,7 @@ #include <linux/list.h> #include <linux/stddef.h> #include <linux/spinlock.h> + #include <asm/current.h> #include <uapi/linux/wait.h> @@ -619,30 +620,19 @@ do { \ __ret; \ }) +extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *); +extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *); -#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ +#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ ({ \ - int __ret = 0; \ + int __ret; \ DEFINE_WAIT(__wait); \ if (exclusive) \ __wait.flags |= WQ_FLAG_EXCLUSIVE; \ do { \ - if (likely(list_empty(&__wait.task_list))) \ - __add_wait_queue_tail(&(wq), &__wait); \ - set_current_state(TASK_INTERRUPTIBLE); \ - if (signal_pending(current)) { \ - __ret = -ERESTARTSYS; \ + __ret = fn(&(wq), &__wait); \ + if (__ret) \ break; \ - } \ - if (irq) \ - spin_unlock_irq(&(wq).lock); \ - else \ - spin_unlock(&(wq).lock); \ - schedule(); \ - if (irq) \ - spin_lock_irq(&(wq).lock); \ - else \ - spin_lock(&(wq).lock); \ } while (!(condition)); \ __remove_wait_queue(&(wq), &__wait); \ __set_current_state(TASK_RUNNING); \ @@ -675,7 +665,7 @@ do { \ */ #define wait_event_interruptible_locked(wq, condition) \ ((condition) \ - ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) + ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr)) /** * wait_event_interruptible_locked_irq - sleep until a condition gets true @@ -702,7 +692,7 @@ do { \ */ #define wait_event_interruptible_locked_irq(wq, condition) \ ((condition) \ - ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) + ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq)) /** * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true @@ -733,7 +723,7 @@ do { \ */ #define wait_event_interruptible_exclusive_locked(wq, condition) \ ((condition) \ - ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) + ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr)) /** * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true @@ -764,7 +754,7 @@ do { \ */ #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ ((condition) \ - ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) + ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq)) #define __wait_event_killable(wq, condition) \ diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 35a4d8185b51..a786e5e8973b 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -117,6 +117,7 @@ struct watchdog_device { #define WDOG_NO_WAY_OUT 1 /* Is 'nowayout' feature set ? */ #define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */ #define WDOG_HW_RUNNING 3 /* True if HW watchdog running */ +#define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */ struct list_head deferred; }; @@ -151,6 +152,12 @@ static inline void watchdog_stop_on_reboot(struct watchdog_device *wdd) set_bit(WDOG_STOP_ON_REBOOT, &wdd->status); } +/* Use the following function to stop the watchdog when unregistering it */ +static inline void watchdog_stop_on_unregister(struct watchdog_device *wdd) +{ + set_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status); +} + /* Use the following function to check if a timeout value is invalid */ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) { diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index a26cc437293c..bde063cefd04 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -106,9 +106,9 @@ struct work_struct { #endif }; -#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL) +#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) #define WORK_DATA_STATIC_INIT() \ - ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC) + ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) struct delayed_work { struct work_struct work; diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 5527d910ba3d..a3c0cbd7c888 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -46,7 +46,7 @@ enum writeback_sync_modes { */ enum wb_reason { WB_REASON_BACKGROUND, - WB_REASON_TRY_TO_FREE_PAGES, + WB_REASON_VMSCAN, WB_REASON_SYNC, WB_REASON_PERIODIC, WB_REASON_LAPTOP_TIMER, diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index e1006b391cdc..bee1404391dd 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -174,10 +174,10 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); * not freed when the control is deleted. Should this be needed * then a new internal bitfield can be added to tell the framework * to free this pointer. - * @p_cur: The control's current value represented via an union with + * @p_cur: The control's current value represented via a union with * provides a standard way of accessing control types * through a pointer. - * @p_new: The control's new value represented via an union with provides + * @p_new: The control's new value represented via a union with provides * a standard way of accessing control types * through a pointer. */ diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h index 574ff2ae94be..6cd94e5ee113 100644 --- a/include/media/v4l2-ioctl.h +++ b/include/media/v4l2-ioctl.h @@ -12,6 +12,7 @@ #include <linux/poll.h> #include <linux/fs.h> #include <linux/mutex.h> +#include <linux/sched/signal.h> #include <linux/compiler.h> /* need __user */ #include <linux/videodev2.h> diff --git a/include/media/vsp1.h b/include/media/vsp1.h index 458b400373d4..38aac554dbba 100644 --- a/include/media/vsp1.h +++ b/include/media/vsp1.h @@ -20,8 +20,17 @@ struct device; int vsp1_du_init(struct device *dev); -int vsp1_du_setup_lif(struct device *dev, unsigned int width, - unsigned int height); +/** + * struct vsp1_du_lif_config - VSP LIF configuration + * @width: output frame width + * @height: output frame height + */ +struct vsp1_du_lif_config { + unsigned int width; + unsigned int height; +}; + +int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg); struct vsp1_du_atomic_config { u32 pixelformat; diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 27dfe85772b1..b8eb51a661e5 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -402,10 +402,10 @@ struct p9_wstat { u32 atime; u32 mtime; u64 length; - char *name; - char *uid; - char *gid; - char *muid; + const char *name; + const char *uid; + const char *gid; + const char *muid; char *extension; /* 9p2000.u extensions */ kuid_t n_uid; /* 9p2000.u extensions */ kgid_t n_gid; /* 9p2000.u extensions */ diff --git a/include/net/9p/client.h b/include/net/9p/client.h index c6b97e58cf84..b582339ccef5 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -223,16 +223,16 @@ void p9_client_destroy(struct p9_client *clnt); void p9_client_disconnect(struct p9_client *clnt); void p9_client_begin_disconnect(struct p9_client *clnt); struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, - char *uname, kuid_t n_uname, char *aname); + const char *uname, kuid_t n_uname, const char *aname); struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, - char **wnames, int clone); + const unsigned char * const *wnames, int clone); int p9_client_open(struct p9_fid *fid, int mode); -int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, +int p9_client_fcreate(struct p9_fid *fid, const char *name, u32 perm, int mode, char *extension); -int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, char *newname); -int p9_client_symlink(struct p9_fid *fid, char *name, char *symname, kgid_t gid, - struct p9_qid *qid); -int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, +int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, const char *newname); +int p9_client_symlink(struct p9_fid *fid, const char *name, const char *symname, + kgid_t gid, struct p9_qid *qid); +int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags, u32 mode, kgid_t gid, struct p9_qid *qid); int p9_client_clunk(struct p9_fid *fid); int p9_client_fsync(struct p9_fid *fid, int datasync); @@ -250,9 +250,9 @@ int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr); struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, u64 request_mask); -int p9_client_mknod_dotl(struct p9_fid *oldfid, char *name, int mode, +int p9_client_mknod_dotl(struct p9_fid *oldfid, const char *name, int mode, dev_t rdev, kgid_t gid, struct p9_qid *); -int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, +int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode, kgid_t gid, struct p9_qid *); int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status); int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl); diff --git a/include/net/act_api.h b/include/net/act_api.h index 1d716449209e..cfa2ae33da9a 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -41,6 +41,7 @@ struct tc_action { struct rcu_head tcfa_rcu; struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_queue __percpu *cpu_qstats; + struct tc_cookie *act_cookie; }; #define tcf_head common.tcfa_head #define tcf_index common.tcfa_index diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 8f998afc1384..17c6fd84e287 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -88,9 +88,7 @@ int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, u32 banned_flags); int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, u32 banned_flags); -int ipv4_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, - bool match_wildcard); -int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, +int inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, bool match_wildcard); void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr); void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr); diff --git a/include/net/arp.h b/include/net/arp.h index 5e0f891d476c..65619a2de6f4 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -35,6 +35,22 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 return n; } +static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key) +{ + struct neighbour *n; + + rcu_read_lock_bh(); + n = __ipv4_neigh_lookup_noref(dev, key); + if (n) { + unsigned long now = jiffies; + + /* avoid dirtying neighbour */ + if (n->confirmed != now) + n->confirmed = now; + } + rcu_read_unlock_bh(); +} + void arp_init(void); int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg); void arp_send(int type, int ptype, __be32 dest_ip, diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 90708f68cc02..95ccc1eef558 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -26,6 +26,8 @@ #define __HCI_CORE_H #include <linux/leds.h> +#include <linux/rculist.h> + #include <net/bluetooth/hci.h> #include <net/bluetooth/hci_sock.h> diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index d73b849e29a6..c0452de83086 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -25,6 +25,8 @@ #define _LINUX_NET_BUSY_POLL_H #include <linux/netdevice.h> +#include <linux/sched/clock.h> +#include <linux/sched/signal.h> #include <net/ip.h> #ifdef CONFIG_NET_RX_BUSY_POLL @@ -33,10 +35,6 @@ struct napi_struct; extern unsigned int sysctl_net_busy_read __read_mostly; extern unsigned int sysctl_net_busy_poll __read_mostly; -/* return values from ndo_ll_poll */ -#define LL_FLUSH_FAILED -1 -#define LL_FLUSH_BUSY -2 - static inline bool net_busy_loop_on(void) { return sysctl_net_busy_poll; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 814be4b4200c..ead1aa6d003e 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -5,7 +5,7 @@ * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright 2015-2016 Intel Deutschland GmbH + * Copyright 2015-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -311,6 +311,34 @@ struct ieee80211_supported_band { struct ieee80211_sta_vht_cap vht_cap; }; +/** + * wiphy_read_of_freq_limits - read frequency limits from device tree + * + * @wiphy: the wireless device to get extra limits for + * + * Some devices may have extra limitations specified in DT. This may be useful + * for chipsets that normally support more bands but are limited due to board + * design (e.g. by antennas or external power amplifier). + * + * This function reads info from DT and uses it to *modify* channels (disable + * unavailable ones). It's usually a *bad* idea to use it in drivers with + * shared channel data as DT limitations are device specific. You should make + * sure to call it only if channels in wiphy are copied and can be modified + * without affecting other devices. + * + * As this function access device node it has to be called after set_wiphy_dev. + * It also modifies channels so they have to be set first. + * If using this helper, call it before wiphy_register(). + */ +#ifdef CONFIG_OF +void wiphy_read_of_freq_limits(struct wiphy *wiphy); +#else /* CONFIG_OF */ +static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy) +{ +} +#endif /* !CONFIG_OF */ + + /* * Wireless hardware/device configuration structures and methods */ @@ -720,6 +748,10 @@ struct cfg80211_bitrate_mask { * @pbss: If set, start as a PCP instead of AP. Relevant for DMG * networks. * @beacon_rate: bitrate to be used for beacons + * @ht_cap: HT capabilities (or %NULL if HT isn't enabled) + * @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled) + * @ht_required: stations must support HT + * @vht_required: stations must support VHT */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@ -740,6 +772,10 @@ struct cfg80211_ap_settings { const struct cfg80211_acl_data *acl; bool pbss; struct cfg80211_bitrate_mask beacon_rate; + + const struct ieee80211_ht_cap *ht_cap; + const struct ieee80211_vht_cap *vht_cap; + bool ht_required, vht_required; }; /** @@ -1592,6 +1628,17 @@ struct cfg80211_sched_scan_plan { }; /** + * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment. + * + * @band: band of BSS which should match for RSSI level adjustment. + * @delta: value of RSSI level adjustment. + */ +struct cfg80211_bss_select_adjust { + enum nl80211_band band; + s8 delta; +}; + +/** * struct cfg80211_sched_scan_request - scheduled scan request description * * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans) @@ -1626,6 +1673,16 @@ struct cfg80211_sched_scan_plan { * cycle. The driver may ignore this parameter and start * immediately (or at any other time), if this feature is not * supported. + * @relative_rssi_set: Indicates whether @relative_rssi is set or not. + * @relative_rssi: Relative RSSI threshold in dB to restrict scan result + * reporting in connected state to cases where a matching BSS is determined + * to have better or slightly worse RSSI than the current connected BSS. + * The relative RSSI threshold values are ignored in disconnected state. + * @rssi_adjust: delta dB of RSSI preference to be given to the BSSs that belong + * to the specified band while deciding whether a better BSS is reported + * using @relative_rssi. If delta is a negative number, the BSSs that + * belong to the specified band will be penalized by delta dB in relative + * comparisions. */ struct cfg80211_sched_scan_request { struct cfg80211_ssid *ssids; @@ -1645,6 +1702,10 @@ struct cfg80211_sched_scan_request { u8 mac_addr[ETH_ALEN] __aligned(2); u8 mac_addr_mask[ETH_ALEN] __aligned(2); + bool relative_rssi_set; + s8 relative_rssi; + struct cfg80211_bss_select_adjust rssi_adjust; + /* internal */ struct wiphy *wiphy; struct net_device *dev; @@ -1887,7 +1948,7 @@ struct cfg80211_deauth_request { * struct cfg80211_disassoc_request - Disassociation request data * * This structure provides information needed to complete IEEE 802.11 - * disassocation. + * disassociation. * * @bss: the BSS to disassociate from * @ie: Extra IEs to add to Disassociation frame or %NULL @@ -1953,17 +2014,6 @@ struct cfg80211_ibss_params { }; /** - * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment. - * - * @band: band of BSS which should match for RSSI level adjustment. - * @delta: value of RSSI level adjustment. - */ -struct cfg80211_bss_select_adjust { - enum nl80211_band band; - s8 delta; -}; - -/** * struct cfg80211_bss_selection - connection parameters for BSS selection. * * @behaviour: requested BSS selection behaviour. @@ -2366,11 +2416,13 @@ struct cfg80211_qos_map { * This struct defines NAN configuration parameters * * @master_pref: master preference (1 - 255) - * @dual: dual band operation mode, see &enum nl80211_nan_dual_band_conf + * @bands: operating bands, a bitmap of &enum nl80211_band values. + * For instance, for NL80211_BAND_2GHZ, bit 0 would be set + * (i.e. BIT(NL80211_BAND_2GHZ)). */ struct cfg80211_nan_conf { u8 master_pref; - u8 dual; + u8 bands; }; /** @@ -2378,11 +2430,11 @@ struct cfg80211_nan_conf { * configuration * * @CFG80211_NAN_CONF_CHANGED_PREF: master preference - * @CFG80211_NAN_CONF_CHANGED_DUAL: dual band operation + * @CFG80211_NAN_CONF_CHANGED_BANDS: operating bands */ enum cfg80211_nan_conf_changes { CFG80211_NAN_CONF_CHANGED_PREF = BIT(0), - CFG80211_NAN_CONF_CHANGED_DUAL = BIT(1), + CFG80211_NAN_CONF_CHANGED_BANDS = BIT(1), }; /** @@ -3136,22 +3188,6 @@ struct ieee80211_iface_limit { /** * struct ieee80211_iface_combination - possible interface combination - * @limits: limits for the given interface types - * @n_limits: number of limitations - * @num_different_channels: can use up to this many different channels - * @max_interfaces: maximum number of interfaces in total allowed in this - * group - * @beacon_int_infra_match: In this combination, the beacon intervals - * between infrastructure and AP types must match. This is required - * only in special cases. - * @radar_detect_widths: bitmap of channel widths supported for radar detection - * @radar_detect_regions: bitmap of regions supported for radar detection - * @beacon_int_min_gcd: This interface combination supports different - * beacon intervals. - * = 0 - all beacon intervals for different interface must be same. - * > 0 - any beacon interval for the interface part of this combination AND - * *GCD* of all beacon intervals from beaconing interfaces of this - * combination must be greater or equal to this value. * * With this structure the driver can describe which interface * combinations it supports concurrently. @@ -3210,13 +3246,60 @@ struct ieee80211_iface_limit { * */ struct ieee80211_iface_combination { + /** + * @limits: + * limits for the given interface types + */ const struct ieee80211_iface_limit *limits; + + /** + * @num_different_channels: + * can use up to this many different channels + */ u32 num_different_channels; + + /** + * @max_interfaces: + * maximum number of interfaces in total allowed in this group + */ u16 max_interfaces; + + /** + * @n_limits: + * number of limitations + */ u8 n_limits; + + /** + * @beacon_int_infra_match: + * In this combination, the beacon intervals between infrastructure + * and AP types must match. This is required only in special cases. + */ bool beacon_int_infra_match; + + /** + * @radar_detect_widths: + * bitmap of channel widths supported for radar detection + */ u8 radar_detect_widths; + + /** + * @radar_detect_regions: + * bitmap of regions supported for radar detection + */ u8 radar_detect_regions; + + /** + * @beacon_int_min_gcd: + * This interface combination supports different beacon intervals. + * + * = 0 + * all beacon intervals for different interface must be same. + * > 0 + * any beacon interval for the interface part of this combination AND + * GCD of all beacon intervals from beaconing interfaces of this + * combination must be greater or equal to this value. + */ u32 beacon_int_min_gcd; }; @@ -3515,6 +3598,10 @@ struct wiphy_iftype_ext_capab { * attribute indices defined in &enum nl80211_bss_select_attr. * * @cookie_counter: unique generic cookie counter, used to identify objects. + * @nan_supported_bands: bands supported by the device in NAN mode, a + * bitmap of &enum nl80211_band values. For instance, for + * NL80211_BAND_2GHZ, bit 0 would be set + * (i.e. BIT(NL80211_BAND_2GHZ)). */ struct wiphy { /* assign these fields before you register the wiphy */ @@ -3646,6 +3733,8 @@ struct wiphy { u64 cookie_counter; + u8 nan_supported_bands; + char priv[0] __aligned(NETDEV_ALIGN); }; @@ -3837,6 +3926,9 @@ struct cfg80211_cached_keys; * @conn: (private) cfg80211 software SME connection state machine data * @connect_keys: (private) keys to set after connection is established * @conn_bss_type: connecting/connected BSS type + * @conn_owner_nlportid: (private) connection owner socket port ID + * @disconnect_wk: (private) auto-disconnect work + * @disconnect_bssid: (private) the BSSID to use for auto-disconnect * @ibss_fixed: (private) IBSS is using fixed BSSID * @ibss_dfs_possible: (private) IBSS may change to a DFS channel * @event_list: (private) list for internal event processing @@ -3868,6 +3960,10 @@ struct wireless_dev { struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; enum ieee80211_bss_type conn_bss_type; + u32 conn_owner_nlportid; + + struct work_struct disconnect_wk; + u8 disconnect_bssid[ETH_ALEN]; struct list_head event_list; spinlock_t event_lock; @@ -3955,26 +4051,15 @@ int ieee80211_channel_to_frequency(int chan, enum nl80211_band band); */ int ieee80211_frequency_to_channel(int freq); -/* - * Name indirection necessary because the ieee80211 code also has - * a function named "ieee80211_get_channel", so if you include - * cfg80211's header file you get cfg80211's version, if you try - * to include both header files you'll (rightfully!) get a symbol - * clash. - */ -struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, - int freq); /** * ieee80211_get_channel - get channel struct from wiphy for specified frequency + * * @wiphy: the struct wiphy to get the channel for * @freq: the center frequency of the channel + * * Return: The channel struct from @wiphy at @freq. */ -static inline struct ieee80211_channel * -ieee80211_get_channel(struct wiphy *wiphy, int freq) -{ - return __ieee80211_get_channel(wiphy, freq); -} +struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq); /** * ieee80211_get_response_rate - get basic rate for a given rate @@ -5048,20 +5133,32 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length - * @status: status code, 0 for successful connection, use - * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you - * the real status code for failures. + * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use + * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you + * the real status code for failures. If this call is used to report a + * failure due to a timeout (e.g., not receiving an Authentication frame + * from the AP) instead of an explicit rejection by the AP, -1 is used to + * indicate that this is a failure, but without a status code. + * @timeout_reason is used to report the reason for the timeout in that + * case. * @gfp: allocation flags - * - * It should be called by the underlying driver whenever connect() has - * succeeded. This is similar to cfg80211_connect_result(), but with the - * option of identifying the exact bss entry for the connection. Only one of - * these functions should be called. + * @timeout_reason: reason for connection timeout. This is used when the + * connection fails due to a timeout instead of an explicit rejection from + * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is + * not known. This value is used only if @status < 0 to indicate that the + * failure is due to a timeout and not due to explicit rejection by the AP. + * This value is ignored in other cases (@status >= 0). + * + * It should be called by the underlying driver once execution of the connection + * request from connect() has been completed. This is similar to + * cfg80211_connect_result(), but with the option of identifying the exact bss + * entry for the connection. Only one of these functions should be called. */ void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, struct cfg80211_bss *bss, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, - size_t resp_ie_len, int status, gfp_t gfp); + size_t resp_ie_len, int status, gfp_t gfp, + enum nl80211_timeout_reason timeout_reason); /** * cfg80211_connect_result - notify cfg80211 of connection result @@ -5072,13 +5169,15 @@ void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length - * @status: status code, 0 for successful connection, use + * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you * the real status code for failures. * @gfp: allocation flags * - * It should be called by the underlying driver whenever connect() has - * succeeded. + * It should be called by the underlying driver once execution of the connection + * request from connect() has been completed. This is similar to + * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only + * one of these functions should be called. */ static inline void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, @@ -5087,7 +5186,8 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid, u16 status, gfp_t gfp) { cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie, - resp_ie_len, status, gfp); + resp_ie_len, status, gfp, + NL80211_TIMEOUT_UNSPECIFIED); } /** @@ -5098,6 +5198,7 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid, * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @gfp: allocation flags + * @timeout_reason: reason for connection timeout. * * It should be called by the underlying driver whenever connect() has failed * in a sequence where no explicit authentication/association rejection was @@ -5107,10 +5208,11 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid, */ static inline void cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, gfp_t gfp) + const u8 *req_ie, size_t req_ie_len, gfp_t gfp, + enum nl80211_timeout_reason timeout_reason) { cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1, - gfp); + gfp, timeout_reason); } /** @@ -5296,6 +5398,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event * @dev: network device * @rssi_event: the triggered RSSI event + * @rssi_level: new RSSI level value or 0 if not available * @gfp: context flags * * This function is called when a configured connection quality monitoring @@ -5303,7 +5406,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, */ void cfg80211_cqm_rssi_notify(struct net_device *dev, enum nl80211_cqm_rssi_threshold_event rssi_event, - gfp_t gfp); + s32 rssi_level, gfp_t gfp); /** * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer diff --git a/include/net/checksum.h b/include/net/checksum.h index 35d0fabd2782..aef2b2bb6603 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -179,7 +179,7 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum, static inline void remcsum_unadjust(__sum16 *psum, __wsum delta) { - *psum = csum_fold(csum_sub(delta, *psum)); + *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum)); } #endif diff --git a/include/net/dsa.h b/include/net/dsa.h index b122196d5a1f..4e13e695f025 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -11,15 +11,19 @@ #ifndef __LINUX_NET_DSA_H #define __LINUX_NET_DSA_H +#include <linux/if.h> #include <linux/if_ether.h> #include <linux/list.h> +#include <linux/notifier.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/of.h> -#include <linux/phy.h> -#include <linux/phy_fixed.h> #include <linux/ethtool.h> +struct tc_action; +struct phy_device; +struct fixed_phy_status; + enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA, @@ -42,6 +46,11 @@ struct dsa_chip_data { struct device *host_dev; int sw_addr; + /* + * Reference to network devices + */ + struct device *netdev[DSA_MAX_PORTS]; + /* set to size of eeprom if supported by the switch */ int eeprom_len; @@ -90,6 +99,9 @@ struct packet_type; struct dsa_switch_tree { struct list_head list; + /* Notifier chain for switch-wide events */ + struct raw_notifier_head nh; + /* Tree identifier */ u32 tree; @@ -124,7 +136,7 @@ struct dsa_switch_tree { /* * The switch and port to which the CPU is attached. */ - s8 cpu_switch; + struct dsa_switch *cpu_switch; s8 cpu_port; /* @@ -139,11 +151,37 @@ struct dsa_switch_tree { const struct dsa_device_ops *tag_ops; }; +/* TC matchall action types, only mirroring for now */ +enum dsa_port_mall_action_type { + DSA_PORT_MALL_MIRROR, +}; + +/* TC mirroring entry */ +struct dsa_mall_mirror_tc_entry { + u8 to_local_port; + bool ingress; +}; + +/* TC matchall entry */ +struct dsa_mall_tc_entry { + struct list_head list; + unsigned long cookie; + enum dsa_port_mall_action_type type; + union { + struct dsa_mall_mirror_tc_entry mirror; + }; +}; + + struct dsa_port { + struct dsa_switch *ds; + unsigned int index; + const char *name; struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; u8 stp_state; + struct net_device *bridge_dev; }; struct dsa_switch { @@ -155,6 +193,9 @@ struct dsa_switch { struct dsa_switch_tree *dst; int index; + /* Listener for switch fabric events */ + struct notifier_block nb; + /* * Give the switch driver somewhere to hang its private data * structure. @@ -169,7 +210,7 @@ struct dsa_switch { /* * The switch operations. */ - struct dsa_switch_ops *ops; + const struct dsa_switch_ops *ops; /* * An array of which element [a] indicates which port on this @@ -178,14 +219,6 @@ struct dsa_switch { */ s8 rtable[DSA_MAX_SWITCHES]; -#ifdef CONFIG_NET_DSA_HWMON - /* - * Hardware monitoring information - */ - char hwmon_name[IFNAMSIZ + 8]; - struct device *hwmon_dev; -#endif - /* * The lower device this switch uses to talk to the host */ @@ -198,13 +231,16 @@ struct dsa_switch { u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; - struct dsa_port ports[DSA_MAX_PORTS]; struct mii_bus *slave_mii_bus; + + /* Dynamically allocated ports, keep last */ + size_t num_ports; + struct dsa_port ports[]; }; static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) { - return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port); + return !!(ds == ds->dst->cpu_switch && p == ds->dst->cpu_port); } static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p) @@ -227,10 +263,10 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) * Else return the (DSA) port number that connects to the * switch that is one hop closer to the cpu. */ - if (dst->cpu_switch == ds->index) + if (dst->cpu_switch == ds) return dst->cpu_port; else - return ds->rtable[dst->cpu_switch]; + return ds->rtable[dst->cpu_switch->index]; } struct switchdev_trans; @@ -239,9 +275,17 @@ struct switchdev_obj_port_fdb; struct switchdev_obj_port_mdb; struct switchdev_obj_port_vlan; -struct dsa_switch_ops { - struct list_head list; +#define DSA_NOTIFIER_BRIDGE_JOIN 1 +#define DSA_NOTIFIER_BRIDGE_LEAVE 2 + +/* DSA_NOTIFIER_BRIDGE_* */ +struct dsa_notifier_bridge_info { + struct net_device *br; + int sw_index; + int port; +}; +struct dsa_switch_ops { /* * Probing and setup. */ @@ -309,14 +353,6 @@ struct dsa_switch_ops { int (*get_eee)(struct dsa_switch *ds, int port, struct ethtool_eee *e); -#ifdef CONFIG_NET_DSA_HWMON - /* Hardware monitoring */ - int (*get_temp)(struct dsa_switch *ds, int *temp); - int (*get_temp_limit)(struct dsa_switch *ds, int *temp); - int (*set_temp_limit)(struct dsa_switch *ds, int temp); - int (*get_temp_alarm)(struct dsa_switch *ds, bool *alarm); -#endif - /* EEPROM access */ int (*get_eeprom_len)(struct dsa_switch *ds); int (*get_eeprom)(struct dsa_switch *ds, @@ -337,7 +373,8 @@ struct dsa_switch_ops { int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs); int (*port_bridge_join)(struct dsa_switch *ds, int port, struct net_device *bridge); - void (*port_bridge_leave)(struct dsa_switch *ds, int port); + void (*port_bridge_leave)(struct dsa_switch *ds, int port, + struct net_device *bridge); void (*port_stp_state_set)(struct dsa_switch *ds, int port, u8 state); void (*port_fast_age)(struct dsa_switch *ds, int port); @@ -388,19 +425,43 @@ struct dsa_switch_ops { int (*port_mdb_dump)(struct dsa_switch *ds, int port, struct switchdev_obj_port_mdb *mdb, int (*cb)(struct switchdev_obj *obj)); + + /* + * RXNFC + */ + int (*get_rxnfc)(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc, u32 *rule_locs); + int (*set_rxnfc)(struct dsa_switch *ds, int port, + struct ethtool_rxnfc *nfc); + + /* + * TC integration + */ + int (*port_mirror_add)(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress); + void (*port_mirror_del)(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror); +}; + +struct dsa_switch_driver { + struct list_head list; + const struct dsa_switch_ops *ops; }; -void register_switch_driver(struct dsa_switch_ops *type); -void unregister_switch_driver(struct dsa_switch_ops *type); +void register_switch_driver(struct dsa_switch_driver *type); +void unregister_switch_driver(struct dsa_switch_driver *type); struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); +struct net_device *dsa_dev_to_net_device(struct device *dev); static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) { return dst->rcv != NULL; } +struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n); void dsa_unregister_switch(struct dsa_switch *ds); -int dsa_register_switch(struct dsa_switch *ds, struct device_node *np); +int dsa_register_switch(struct dsa_switch *ds, struct device *dev); #ifdef CONFIG_PM_SLEEP int dsa_switch_suspend(struct dsa_switch *ds); int dsa_switch_resume(struct dsa_switch *ds); diff --git a/include/net/dst.h b/include/net/dst.h index 6835d224d47b..049af33da3b6 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -59,8 +59,6 @@ struct dst_entry { #define DST_XFRM_QUEUE 0x0100 #define DST_METADATA 0x0200 - unsigned short pending_confirm; - short error; /* A non-zero value of dst->obsolete forces by-hand validation @@ -78,6 +76,8 @@ struct dst_entry { #define DST_OBSOLETE_KILL -2 unsigned short header_len; /* more space at head required */ unsigned short trailer_len; /* space to reserve at tail */ + unsigned short __pad3; + #ifdef CONFIG_IP_ROUTE_CLASSID __u32 tclassid; #else @@ -440,28 +440,6 @@ static inline void dst_rcu_free(struct rcu_head *head) static inline void dst_confirm(struct dst_entry *dst) { - dst->pending_confirm = 1; -} - -static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, - struct sk_buff *skb) -{ - const struct hh_cache *hh; - - if (dst->pending_confirm) { - unsigned long now = jiffies; - - dst->pending_confirm = 0; - /* avoid dirtying neighbour */ - if (n->confirmed != now) - n->confirmed = now; - } - - hh = &n->hh; - if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) - return neigh_hh_output(hh, skb); - else - return n->output(n, skb); } static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) @@ -477,6 +455,13 @@ static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst return IS_ERR(n) ? NULL : n; } +static inline void dst_confirm_neigh(const struct dst_entry *dst, + const void *daddr) +{ + if (dst->ops->confirm_neigh) + dst->ops->confirm_neigh(dst, daddr); +} + static inline void dst_link_failure(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index a0d443ca16fc..c84b3287e38b 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -33,6 +33,8 @@ struct dst_ops { struct neighbour * (*neigh_lookup)(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr); + void (*confirm_neigh)(const struct dst_entry *dst, + const void *daddr); struct kmem_cache *kmem_cachep; @@ -46,19 +48,12 @@ static inline int dst_entries_get_fast(struct dst_ops *dst) static inline int dst_entries_get_slow(struct dst_ops *dst) { - int res; - - local_bh_disable(); - res = percpu_counter_sum_positive(&dst->pcpuc_entries); - local_bh_enable(); - return res; + return percpu_counter_sum_positive(&dst->pcpuc_entries); } static inline void dst_entries_add(struct dst_ops *dst, int val) { - local_bh_disable(); percpu_counter_add(&dst->pcpuc_entries, val); - local_bh_enable(); } static inline int dst_entries_init(struct dst_ops *dst) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index d896a33e00d4..ac9703018a3a 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -89,6 +89,24 @@ struct flow_dissector_key_addrs { }; /** + * flow_dissector_key_arp: + * @ports: Operation, source and target addresses for an ARP header + * for Ethernet hardware addresses and IPv4 protocol addresses + * sip: Sender IP address + * tip: Target IP address + * op: Operation + * sha: Sender hardware address + * tpa: Target hardware address + */ +struct flow_dissector_key_arp { + __u32 sip; + __u32 tip; + __u8 op; + unsigned char sha[ETH_ALEN]; + unsigned char tha[ETH_ALEN]; +}; + +/** * flow_dissector_key_tp_ports: * @ports: port numbers of Transport header * src: source port number @@ -141,6 +159,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ + FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */ FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */ diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h index 2a1abbf8da74..fcaf8f479130 100644 --- a/include/net/gro_cells.h +++ b/include/net/gro_cells.h @@ -5,92 +5,14 @@ #include <linux/slab.h> #include <linux/netdevice.h> -struct gro_cell { - struct sk_buff_head napi_skbs; - struct napi_struct napi; -}; +struct gro_cell; struct gro_cells { struct gro_cell __percpu *cells; }; -static inline int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) -{ - struct gro_cell *cell; - struct net_device *dev = skb->dev; - - if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) - return netif_rx(skb); - - cell = this_cpu_ptr(gcells->cells); - - if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - - __skb_queue_tail(&cell->napi_skbs, skb); - if (skb_queue_len(&cell->napi_skbs) == 1) - napi_schedule(&cell->napi); - return NET_RX_SUCCESS; -} - -/* called under BH context */ -static inline int gro_cell_poll(struct napi_struct *napi, int budget) -{ - struct gro_cell *cell = container_of(napi, struct gro_cell, napi); - struct sk_buff *skb; - int work_done = 0; - - while (work_done < budget) { - skb = __skb_dequeue(&cell->napi_skbs); - if (!skb) - break; - napi_gro_receive(napi, skb); - work_done++; - } - - if (work_done < budget) - napi_complete_done(napi, work_done); - return work_done; -} - -static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev) -{ - int i; - - gcells->cells = alloc_percpu(struct gro_cell); - if (!gcells->cells) - return -ENOMEM; - - for_each_possible_cpu(i) { - struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); - - __skb_queue_head_init(&cell->napi_skbs); - - set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); - - netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); - napi_enable(&cell->napi); - } - return 0; -} - -static inline void gro_cells_destroy(struct gro_cells *gcells) -{ - int i; - - if (!gcells->cells) - return; - for_each_possible_cpu(i) { - struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); - - netif_napi_del(&cell->napi); - __skb_queue_purge(&cell->napi_skbs); - } - free_percpu(gcells->cells); - gcells->cells = NULL; -} +int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb); +int gro_cells_init(struct gro_cells *gcells, struct net_device *dev); +void gro_cells_destroy(struct gro_cells *gcells); #endif diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index d0e7e3f8e67a..d91f9e7f4d71 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -1,201 +1,54 @@ /* - * Copyright (c) 2003, 2004 David Young. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of David Young may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID - * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY - * OF SUCH DAMAGE. - */ - -/* - * Modifications to fit into the linux IEEE 802.11 stack, - * Mike Kershaw (dragorn@kismetwireless.net) + * Copyright (c) 2017 Intel Deutschland GmbH + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#ifndef __RADIOTAP_H +#define __RADIOTAP_H -#ifndef IEEE80211RADIOTAP_H -#define IEEE80211RADIOTAP_H - -#include <linux/if_ether.h> #include <linux/kernel.h> #include <asm/unaligned.h> -/* Base version of the radiotap packet header data */ -#define PKTHDR_RADIOTAP_VERSION 0 - -/* A generic radio capture format is desirable. There is one for - * Linux, but it is neither rigidly defined (there were not even - * units given for some fields) nor easily extensible. - * - * I suggest the following extensible radio capture format. It is - * based on a bitmap indicating which fields are present. - * - * I am trying to describe precisely what the application programmer - * should expect in the following, and for that reason I tell the - * units and origin of each measurement (where it applies), or else I - * use sufficiently weaselly language ("is a monotonically nondecreasing - * function of...") that I cannot set false expectations for lawyerly - * readers. - */ - -/* - * The radio capture header precedes the 802.11 header. - * All data in the header is little endian on all platforms. +/** + * struct ieee82011_radiotap_header - base radiotap header */ struct ieee80211_radiotap_header { - u8 it_version; /* Version 0. Only increases - * for drastic changes, - * introduction of compatible - * new fields does not count. - */ - u8 it_pad; - __le16 it_len; /* length of the whole - * header in bytes, including - * it_version, it_pad, - * it_len, and data fields. - */ - __le32 it_present; /* A bitmap telling which - * fields are present. Set bit 31 - * (0x80000000) to extend the - * bitmap by another 32 bits. - * Additional extensions are made - * by setting bit 31. - */ + /** + * @it_version: radiotap version, always 0 + */ + uint8_t it_version; + + /** + * @it_pad: padding (or alignment) + */ + uint8_t it_pad; + + /** + * @it_len: overall radiotap header length + */ + __le16 it_len; + + /** + * @it_present: (first) present word + */ + __le32 it_present; } __packed; -/* Name Data type Units - * ---- --------- ----- - * - * IEEE80211_RADIOTAP_TSFT __le64 microseconds - * - * Value in microseconds of the MAC's 64-bit 802.11 Time - * Synchronization Function timer when the first bit of the - * MPDU arrived at the MAC. For received frames, only. - * - * IEEE80211_RADIOTAP_CHANNEL 2 x __le16 MHz, bitmap - * - * Tx/Rx frequency in MHz, followed by flags (see below). - * - * IEEE80211_RADIOTAP_FHSS __le16 see below - * - * For frequency-hopping radios, the hop set (first byte) - * and pattern (second byte). - * - * IEEE80211_RADIOTAP_RATE u8 500kb/s - * - * Tx/Rx data rate - * - * IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8 decibels from - * one milliwatt (dBm) - * - * RF signal power at the antenna, decibel difference from - * one milliwatt. - * - * IEEE80211_RADIOTAP_DBM_ANTNOISE s8 decibels from - * one milliwatt (dBm) - * - * RF noise power at the antenna, decibel difference from one - * milliwatt. - * - * IEEE80211_RADIOTAP_DB_ANTSIGNAL u8 decibel (dB) - * - * RF signal power at the antenna, decibel difference from an - * arbitrary, fixed reference. - * - * IEEE80211_RADIOTAP_DB_ANTNOISE u8 decibel (dB) - * - * RF noise power at the antenna, decibel difference from an - * arbitrary, fixed reference point. - * - * IEEE80211_RADIOTAP_LOCK_QUALITY __le16 unitless - * - * Quality of Barker code lock. Unitless. Monotonically - * nondecreasing with "better" lock strength. Called "Signal - * Quality" in datasheets. (Is there a standard way to measure - * this?) - * - * IEEE80211_RADIOTAP_TX_ATTENUATION __le16 unitless - * - * Transmit power expressed as unitless distance from max - * power set at factory calibration. 0 is max power. - * Monotonically nondecreasing with lower power levels. - * - * IEEE80211_RADIOTAP_DB_TX_ATTENUATION __le16 decibels (dB) - * - * Transmit power expressed as decibel distance from max power - * set at factory calibration. 0 is max power. Monotonically - * nondecreasing with lower power levels. - * - * IEEE80211_RADIOTAP_DBM_TX_POWER s8 decibels from - * one milliwatt (dBm) - * - * Transmit power expressed as dBm (decibels from a 1 milliwatt - * reference). This is the absolute power level measured at - * the antenna port. - * - * IEEE80211_RADIOTAP_FLAGS u8 bitmap - * - * Properties of transmitted and received frames. See flags - * defined below. - * - * IEEE80211_RADIOTAP_ANTENNA u8 antenna index - * - * Unitless indication of the Rx/Tx antenna for this packet. - * The first antenna is antenna 0. - * - * IEEE80211_RADIOTAP_RX_FLAGS __le16 bitmap - * - * Properties of received frames. See flags defined below. - * - * IEEE80211_RADIOTAP_TX_FLAGS __le16 bitmap - * - * Properties of transmitted frames. See flags defined below. - * - * IEEE80211_RADIOTAP_RTS_RETRIES u8 data - * - * Number of rts retries a transmitted frame used. - * - * IEEE80211_RADIOTAP_DATA_RETRIES u8 data - * - * Number of unicast retries a transmitted frame used. - * - * IEEE80211_RADIOTAP_MCS u8, u8, u8 unitless - * - * Contains a bitmap of known fields/flags, the flags, and - * the MCS index. - * - * IEEE80211_RADIOTAP_AMPDU_STATUS u32, u16, u8, u8 unitless - * - * Contains the AMPDU information for the subframe. - * - * IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 - * - * Contains VHT information about this frame. - * - * IEEE80211_RADIOTAP_TIMESTAMP u64, u16, u8, u8 variable - * - * Contains timestamp information for this frame. - */ -enum ieee80211_radiotap_type { +/* version is always 0 */ +#define PKTHDR_RADIOTAP_VERSION 0 + +/* see the radiotap website for the descriptions */ +enum ieee80211_radiotap_presence { IEEE80211_RADIOTAP_TSFT = 0, IEEE80211_RADIOTAP_FLAGS = 1, IEEE80211_RADIOTAP_RATE = 2, @@ -214,7 +67,7 @@ enum ieee80211_radiotap_type { IEEE80211_RADIOTAP_TX_FLAGS = 15, IEEE80211_RADIOTAP_RTS_RETRIES = 16, IEEE80211_RADIOTAP_DATA_RETRIES = 17, - + /* 18 is XChannel, but it's not defined yet */ IEEE80211_RADIOTAP_MCS = 19, IEEE80211_RADIOTAP_AMPDU_STATUS = 20, IEEE80211_RADIOTAP_VHT = 21, @@ -226,129 +79,135 @@ enum ieee80211_radiotap_type { IEEE80211_RADIOTAP_EXT = 31 }; -/* Channel flags. */ -#define IEEE80211_CHAN_TURBO 0x0010 /* Turbo channel */ -#define IEEE80211_CHAN_CCK 0x0020 /* CCK channel */ -#define IEEE80211_CHAN_OFDM 0x0040 /* OFDM channel */ -#define IEEE80211_CHAN_2GHZ 0x0080 /* 2 GHz spectrum channel. */ -#define IEEE80211_CHAN_5GHZ 0x0100 /* 5 GHz spectrum channel */ -#define IEEE80211_CHAN_PASSIVE 0x0200 /* Only passive scan allowed */ -#define IEEE80211_CHAN_DYN 0x0400 /* Dynamic CCK-OFDM channel */ -#define IEEE80211_CHAN_GFSK 0x0800 /* GFSK channel (FHSS PHY) */ -#define IEEE80211_CHAN_GSM 0x1000 /* GSM (900 MHz) */ -#define IEEE80211_CHAN_STURBO 0x2000 /* Static Turbo */ -#define IEEE80211_CHAN_HALF 0x4000 /* Half channel (10 MHz wide) */ -#define IEEE80211_CHAN_QUARTER 0x8000 /* Quarter channel (5 MHz wide) */ - -/* For IEEE80211_RADIOTAP_FLAGS */ -#define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received - * during CFP - */ -#define IEEE80211_RADIOTAP_F_SHORTPRE 0x02 /* sent/received - * with short - * preamble - */ -#define IEEE80211_RADIOTAP_F_WEP 0x04 /* sent/received - * with WEP encryption - */ -#define IEEE80211_RADIOTAP_F_FRAG 0x08 /* sent/received - * with fragmentation - */ -#define IEEE80211_RADIOTAP_F_FCS 0x10 /* frame includes FCS */ -#define IEEE80211_RADIOTAP_F_DATAPAD 0x20 /* frame has padding between - * 802.11 header and payload - * (to 32-bit boundary) - */ -#define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* bad FCS */ - -/* For IEEE80211_RADIOTAP_RX_FLAGS */ -#define IEEE80211_RADIOTAP_F_RX_BADPLCP 0x0002 /* frame has bad PLCP */ +/* for IEEE80211_RADIOTAP_FLAGS */ +enum ieee80211_radiotap_flags { + IEEE80211_RADIOTAP_F_CFP = 0x01, + IEEE80211_RADIOTAP_F_SHORTPRE = 0x02, + IEEE80211_RADIOTAP_F_WEP = 0x04, + IEEE80211_RADIOTAP_F_FRAG = 0x08, + IEEE80211_RADIOTAP_F_FCS = 0x10, + IEEE80211_RADIOTAP_F_DATAPAD = 0x20, + IEEE80211_RADIOTAP_F_BADFCS = 0x40, +}; -/* For IEEE80211_RADIOTAP_TX_FLAGS */ -#define IEEE80211_RADIOTAP_F_TX_FAIL 0x0001 /* failed due to excessive - * retries */ -#define IEEE80211_RADIOTAP_F_TX_CTS 0x0002 /* used cts 'protection' */ -#define IEEE80211_RADIOTAP_F_TX_RTS 0x0004 /* used rts/cts handshake */ -#define IEEE80211_RADIOTAP_F_TX_NOACK 0x0008 /* don't expect an ack */ +/* for IEEE80211_RADIOTAP_CHANNEL */ +enum ieee80211_radiotap_channel_flags { + IEEE80211_CHAN_CCK = 0x0020, + IEEE80211_CHAN_OFDM = 0x0040, + IEEE80211_CHAN_2GHZ = 0x0080, + IEEE80211_CHAN_5GHZ = 0x0100, + IEEE80211_CHAN_DYN = 0x0400, + IEEE80211_CHAN_HALF = 0x4000, + IEEE80211_CHAN_QUARTER = 0x8000, +}; +/* for IEEE80211_RADIOTAP_RX_FLAGS */ +enum ieee80211_radiotap_rx_flags { + IEEE80211_RADIOTAP_F_RX_BADPLCP = 0x0002, +}; -/* For IEEE80211_RADIOTAP_MCS */ -#define IEEE80211_RADIOTAP_MCS_HAVE_BW 0x01 -#define IEEE80211_RADIOTAP_MCS_HAVE_MCS 0x02 -#define IEEE80211_RADIOTAP_MCS_HAVE_GI 0x04 -#define IEEE80211_RADIOTAP_MCS_HAVE_FMT 0x08 -#define IEEE80211_RADIOTAP_MCS_HAVE_FEC 0x10 -#define IEEE80211_RADIOTAP_MCS_HAVE_STBC 0x20 +/* for IEEE80211_RADIOTAP_TX_FLAGS */ +enum ieee80211_radiotap_tx_flags { + IEEE80211_RADIOTAP_F_TX_FAIL = 0x0001, + IEEE80211_RADIOTAP_F_TX_CTS = 0x0002, + IEEE80211_RADIOTAP_F_TX_RTS = 0x0004, + IEEE80211_RADIOTAP_F_TX_NOACK = 0x0008, +}; -#define IEEE80211_RADIOTAP_MCS_BW_MASK 0x03 -#define IEEE80211_RADIOTAP_MCS_BW_20 0 -#define IEEE80211_RADIOTAP_MCS_BW_40 1 -#define IEEE80211_RADIOTAP_MCS_BW_20L 2 -#define IEEE80211_RADIOTAP_MCS_BW_20U 3 -#define IEEE80211_RADIOTAP_MCS_SGI 0x04 -#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08 -#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 -#define IEEE80211_RADIOTAP_MCS_STBC_MASK 0x60 -#define IEEE80211_RADIOTAP_MCS_STBC_1 1 -#define IEEE80211_RADIOTAP_MCS_STBC_2 2 -#define IEEE80211_RADIOTAP_MCS_STBC_3 3 +/* for IEEE80211_RADIOTAP_MCS "have" flags */ +enum ieee80211_radiotap_mcs_have { + IEEE80211_RADIOTAP_MCS_HAVE_BW = 0x01, + IEEE80211_RADIOTAP_MCS_HAVE_MCS = 0x02, + IEEE80211_RADIOTAP_MCS_HAVE_GI = 0x04, + IEEE80211_RADIOTAP_MCS_HAVE_FMT = 0x08, + IEEE80211_RADIOTAP_MCS_HAVE_FEC = 0x10, + IEEE80211_RADIOTAP_MCS_HAVE_STBC = 0x20, +}; -#define IEEE80211_RADIOTAP_MCS_STBC_SHIFT 5 +enum ieee80211_radiotap_mcs_flags { + IEEE80211_RADIOTAP_MCS_BW_MASK = 0x03, + IEEE80211_RADIOTAP_MCS_BW_20 = 0, + IEEE80211_RADIOTAP_MCS_BW_40 = 1, + IEEE80211_RADIOTAP_MCS_BW_20L = 2, + IEEE80211_RADIOTAP_MCS_BW_20U = 3, + + IEEE80211_RADIOTAP_MCS_SGI = 0x04, + IEEE80211_RADIOTAP_MCS_FMT_GF = 0x08, + IEEE80211_RADIOTAP_MCS_FEC_LDPC = 0x10, + IEEE80211_RADIOTAP_MCS_STBC_MASK = 0x60, + IEEE80211_RADIOTAP_MCS_STBC_1 = 1, + IEEE80211_RADIOTAP_MCS_STBC_2 = 2, + IEEE80211_RADIOTAP_MCS_STBC_3 = 3, + IEEE80211_RADIOTAP_MCS_STBC_SHIFT = 5, +}; -/* For IEEE80211_RADIOTAP_AMPDU_STATUS */ -#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001 -#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN 0x0002 -#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN 0x0004 -#define IEEE80211_RADIOTAP_AMPDU_IS_LAST 0x0008 -#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR 0x0010 -#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN 0x0020 +/* for IEEE80211_RADIOTAP_AMPDU_STATUS */ +enum ieee80211_radiotap_ampdu_flags { + IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN = 0x0001, + IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN = 0x0002, + IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN = 0x0004, + IEEE80211_RADIOTAP_AMPDU_IS_LAST = 0x0008, + IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR = 0x0010, + IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN = 0x0020, +}; -/* For IEEE80211_RADIOTAP_VHT */ -#define IEEE80211_RADIOTAP_VHT_KNOWN_STBC 0x0001 -#define IEEE80211_RADIOTAP_VHT_KNOWN_TXOP_PS_NA 0x0002 -#define IEEE80211_RADIOTAP_VHT_KNOWN_GI 0x0004 -#define IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS 0x0008 -#define IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM 0x0010 -#define IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED 0x0020 -#define IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH 0x0040 -#define IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID 0x0080 -#define IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID 0x0100 +/* for IEEE80211_RADIOTAP_VHT */ +enum ieee80211_radiotap_vht_known { + IEEE80211_RADIOTAP_VHT_KNOWN_STBC = 0x0001, + IEEE80211_RADIOTAP_VHT_KNOWN_TXOP_PS_NA = 0x0002, + IEEE80211_RADIOTAP_VHT_KNOWN_GI = 0x0004, + IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS = 0x0008, + IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM = 0x0010, + IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED = 0x0020, + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH = 0x0040, + IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID = 0x0080, + IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID = 0x0100, +}; -#define IEEE80211_RADIOTAP_VHT_FLAG_STBC 0x01 -#define IEEE80211_RADIOTAP_VHT_FLAG_TXOP_PS_NA 0x02 -#define IEEE80211_RADIOTAP_VHT_FLAG_SGI 0x04 -#define IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9 0x08 -#define IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM 0x10 -#define IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED 0x20 +enum ieee80211_radiotap_vht_flags { + IEEE80211_RADIOTAP_VHT_FLAG_STBC = 0x01, + IEEE80211_RADIOTAP_VHT_FLAG_TXOP_PS_NA = 0x02, + IEEE80211_RADIOTAP_VHT_FLAG_SGI = 0x04, + IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9 = 0x08, + IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM = 0x10, + IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED = 0x20, +}; -#define IEEE80211_RADIOTAP_CODING_LDPC_USER0 0x01 -#define IEEE80211_RADIOTAP_CODING_LDPC_USER1 0x02 -#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04 -#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08 +enum ieee80211_radiotap_vht_coding { + IEEE80211_RADIOTAP_CODING_LDPC_USER0 = 0x01, + IEEE80211_RADIOTAP_CODING_LDPC_USER1 = 0x02, + IEEE80211_RADIOTAP_CODING_LDPC_USER2 = 0x04, + IEEE80211_RADIOTAP_CODING_LDPC_USER3 = 0x08, +}; -/* For IEEE80211_RADIOTAP_TIMESTAMP */ -#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK 0x000F -#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS 0x0000 -#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US 0x0001 -#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS 0x0003 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK 0x00F0 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU 0x0000 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ 0x0010 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU 0x0020 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU 0x0030 -#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN 0x00F0 +/* for IEEE80211_RADIOTAP_TIMESTAMP */ +enum ieee80211_radiotap_timestamp_unit_spos { + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK = 0x000F, + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS = 0x0000, + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US = 0x0001, + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS = 0x0003, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK = 0x00F0, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU = 0x0000, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ = 0x0010, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU = 0x0020, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU = 0x0030, + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN = 0x00F0, +}; -#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT 0x00 -#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT 0x01 -#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY 0x02 +enum ieee80211_radiotap_timestamp_flags { + IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT = 0x00, + IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT = 0x01, + IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02, +}; -/* helpers */ -static inline int ieee80211_get_radiotap_len(unsigned char *data) +/** + * ieee80211_get_radiotap_len - get radiotap header length + */ +static inline u16 ieee80211_get_radiotap_len(const char *data) { - struct ieee80211_radiotap_header *hdr = - (struct ieee80211_radiotap_header *)data; + struct ieee80211_radiotap_header *hdr = (void *)data; return get_unaligned_le16(&hdr->it_len); } -#endif /* IEEE80211_RADIOTAP_H */ +#endif /* __RADIOTAP_H */ diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 0fa4c324b713..f656f9051aca 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -205,7 +205,6 @@ struct inet6_dev { __s32 rs_interval; /* in jiffies */ __u8 rs_probes; - __u8 addr_gen_mode; unsigned long tstamp; /* ipv6InterfaceTable update timestamp */ struct rcu_head rcu; }; diff --git a/include/net/ife.h b/include/net/ife.h new file mode 100644 index 000000000000..2d87d6898b0a --- /dev/null +++ b/include/net/ife.h @@ -0,0 +1,51 @@ +#ifndef __NET_IFE_H +#define __NET_IFE_H + +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/module.h> +#include <uapi/linux/ife.h> + +#if IS_ENABLED(CONFIG_NET_IFE) + +void *ife_encode(struct sk_buff *skb, u16 metalen); +void *ife_decode(struct sk_buff *skb, u16 *metalen); + +void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen); +int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, + const void *dval); + +void *ife_tlv_meta_next(void *skbdata); + +#else + +static inline void *ife_encode(struct sk_buff *skb, u16 metalen) +{ + return NULL; +} + +static inline void *ife_decode(struct sk_buff *skb, u16 *metalen) +{ + return NULL; +} + +static inline void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, + u16 *totlen) +{ + return NULL; +} + +static inline int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, + const void *dval) +{ + return 0; +} + +static inline void *ife_tlv_meta_next(void *skbdata) +{ + return NULL; +} + +#endif + +#endif /* __NET_IFE_H */ diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h index 3212b39b5bfc..8ec87b62257b 100644 --- a/include/net/inet6_connection_sock.h +++ b/include/net/inet6_connection_sock.h @@ -15,16 +15,11 @@ #include <linux/types.h> -struct inet_bind_bucket; struct request_sock; struct sk_buff; struct sock; struct sockaddr; -int inet6_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb, bool relax, - bool soreuseport_ok); - struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6, const struct request_sock *req, u8 proto); diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 5d683428fced..f39ae697347f 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -17,10 +17,11 @@ int inet_release(struct socket *sock); int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, - int addr_len, int flags); + int addr_len, int flags, int is_sendmsg); int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); -int inet_accept(struct socket *sock, struct socket *newsock, int flags); +int inet_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern); int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 85ee3879499e..c7a577976bec 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -62,9 +62,6 @@ struct inet_connection_sock_af_ops { char __user *optval, int __user *optlen); #endif void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); - int (*bind_conflict)(const struct sock *sk, - const struct inet_bind_bucket *tb, - bool relax, bool soreuseport_ok); void (*mtu_reduced)(struct sock *sk); }; @@ -144,6 +141,7 @@ struct inet_connection_sock { #define ICSK_TIME_PROBE0 3 /* Zero window probe timer */ #define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */ #define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */ +#define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */ static inline struct inet_connection_sock *inet_csk(const struct sock *sk) { @@ -234,7 +232,8 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, } if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 || - what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE) { + what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE || + what == ICSK_TIME_REO_TIMEOUT) { icsk->icsk_pending = what; icsk->icsk_timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); @@ -259,11 +258,8 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk, return (unsigned long)min_t(u64, when, max_when); } -struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); +struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern); -int inet_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb, bool relax, - bool soreuseport_ok); int inet_csk_get_port(struct sock *sk, unsigned short snum); struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4, diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 909972aa3acd..5894730ec82a 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -164,13 +164,7 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, int i) static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) { - unsigned int res; - - local_bh_disable(); - res = percpu_counter_sum_positive(&nf->mem); - local_bh_enable(); - - return res; + return percpu_counter_sum_positive(&nf->mem); } /* RFC 3168 support : diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 0574493e3899..1178931288cb 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -74,13 +74,21 @@ struct inet_ehash_bucket { * users logged onto your box, isn't it nice to know that new data * ports are created in O(1) time? I thought so. ;-) -DaveM */ +#define FASTREUSEPORT_ANY 1 +#define FASTREUSEPORT_STRICT 2 + struct inet_bind_bucket { possible_net_t ib_net; unsigned short port; signed char fastreuse; signed char fastreuseport; kuid_t fastuid; - int num_owners; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr fast_v6_rcv_saddr; +#endif + __be32 fast_rcv_saddr; + unsigned short fast_sk_family; + bool fast_ipv6_only; struct hlist_node node; struct hlist_head owners; }; @@ -203,10 +211,7 @@ void inet_hashinfo_init(struct inet_hashinfo *h); bool inet_ehash_insert(struct sock *sk, struct sock *osk); bool inet_ehash_nolisten(struct sock *sk, struct sock *osk); -int __inet_hash(struct sock *sk, struct sock *osk, - int (*saddr_same)(const struct sock *sk1, - const struct sock *sk2, - bool match_wildcard)); +int __inet_hash(struct sock *sk, struct sock *osk); int inet_hash(struct sock *sk); void inet_unhash(struct sock *sk); diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index c9cff977a7fb..aa95053dfc78 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -206,7 +206,11 @@ struct inet_sock { transparent:1, mc_all:1, nodefrag:1; - __u8 bind_address_no_port:1; + __u8 bind_address_no_port:1, + defer_connect:1; /* Indicates that fastopen_connect is set + * and cookie exists so we defer connect + * until first data frame is written + */ __u8 rcv_tos; __u8 convert_csum; int uc_index; diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index c9b3eb70f340..6a75d67a30fd 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -29,16 +29,6 @@ #include <linux/atomic.h> -struct inet_hashinfo; - -struct inet_timewait_death_row { - atomic_t tw_count; - - struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp; - int sysctl_tw_recycle; - int sysctl_max_tw_buckets; -}; - struct inet_bind_bucket; /* @@ -125,8 +115,7 @@ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); -void inet_twsk_purge(struct inet_hashinfo *hashinfo, - struct inet_timewait_death_row *twdr, int family); +void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family); static inline struct net *twsk_net(const struct inet_timewait_sock *twsk) diff --git a/include/net/ip.h b/include/net/ip.h index ab6761a7c883..bf264a8db1ce 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -263,11 +263,21 @@ static inline bool sysctl_dev_name_is_allowed(const char *name) return strcmp(name, "default") != 0 && strcmp(name, "all") != 0; } +static inline int inet_prot_sock(struct net *net) +{ + return net->ipv4.sysctl_ip_prot_sock; +} + #else static inline int inet_is_local_reserved_port(struct net *net, int port) { return 0; } + +static inline int inet_prot_sock(struct net *net) +{ + return PROT_SOCK; +} #endif __be32 inet_current_timestamp(void); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index a74e2aa40ef4..c979c878df1c 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -37,7 +37,9 @@ struct fib6_config { int fc_ifindex; u32 fc_flags; u32 fc_protocol; - u32 fc_type; /* only 8 bits are used */ + u16 fc_type; /* only 8 bits are used */ + u16 fc_delete_all_nh : 1, + __unused : 15; struct in6_addr fc_dst; struct in6_addr fc_src; diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 5f376af377c7..368bb4024b78 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -211,14 +211,22 @@ struct fib_entry_notifier_info { u8 tos; u8 type; u32 tb_id; - u32 nlflags; +}; + +struct fib_nh_notifier_info { + struct fib_notifier_info info; /* must be first */ + struct fib_nh *fib_nh; }; enum fib_event_type { + FIB_EVENT_ENTRY_REPLACE, + FIB_EVENT_ENTRY_APPEND, FIB_EVENT_ENTRY_ADD, FIB_EVENT_ENTRY_DEL, FIB_EVENT_RULE_ADD, FIB_EVENT_RULE_DEL, + FIB_EVENT_NH_ADD, + FIB_EVENT_NH_DEL, }; int register_fib_notifier(struct notifier_block *nb, @@ -344,7 +352,6 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb); int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos, int oif, struct net_device *dev, struct in_device *idev, u32 *itag); -void fib_select_default(const struct flowi4 *flp, struct fib_result *res); #ifdef CONFIG_IP_ROUTE_CLASSID static inline int fib_num_tclassid_users(struct net *net) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index e893fe43dd13..95056796657c 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -58,6 +58,7 @@ struct ip_tunnel_key { /* Flags for ip_tunnel_info mode. */ #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */ #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */ +#define IP_TUNNEL_INFO_BRIDGE 0x04 /* represents a bridged tunnel id */ /* Maximum tunnel options length. */ #define IP_TUNNEL_OPTS_MAX \ @@ -261,8 +262,8 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); -struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *tot); +void ip_tunnel_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot); struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, int link, __be16 flags, __be32 remote, __be32 local, diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index cd6018a9ee24..7bdfa7d78363 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1421,7 +1421,7 @@ static inline void ip_vs_dest_put(struct ip_vs_dest *dest) static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) { - if (atomic_dec_return(&dest->refcnt) < 0) + if (atomic_dec_and_test(&dest->refcnt)) kfree(dest); } @@ -1554,10 +1554,12 @@ static inline void ip_vs_notrack(struct sk_buff *skb) struct nf_conn *ct = nf_ct_get(skb, &ctinfo); if (!ct || !nf_ct_is_untracked(ct)) { - nf_conntrack_put(skb->nfct); - skb->nfct = &nf_ct_untracked_get()->ct_general; - skb->nfctinfo = IP_CT_NEW; - nf_conntrack_get(skb->nfct); + struct nf_conn *untracked; + + nf_conntrack_put(&ct->ct_general); + untracked = nf_ct_untracked_get(); + nf_conntrack_get(&untracked->ct_general); + nf_ct_set(skb, untracked, IP_CT_NEW); } #endif } diff --git a/include/net/irda/timer.h b/include/net/irda/timer.h index cb2615ccf761..d784f242cf7b 100644 --- a/include/net/irda/timer.h +++ b/include/net/irda/timer.h @@ -59,7 +59,7 @@ struct lap_cb; * Slot timer must never exceed 85 ms, and must always be at least 25 ms, * suggested to 75-85 msec by IrDA lite. This doesn't work with a lot of * devices, and other stackes uses a lot more, so it's best we do it as well - * (Note : this is the default value and sysctl overides it - Jean II) + * (Note : this is the default value and sysctl overrides it - Jean II) */ #define SLOT_TIMEOUT (90*HZ/1000) diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index e0f4109e64c6..2509728650bd 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -505,25 +505,8 @@ static inline int iwe_stream_event_len_adjust(struct iw_request_info *info, /* * Wrapper to add an Wireless Event to a stream of events. */ -static inline char * -iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends, - struct iw_event *iwe, int event_len) -{ - int lcp_len = iwe_stream_lcp_len(info); - - event_len = iwe_stream_event_len_adjust(info, event_len); - - /* Check if it's possible */ - if(likely((stream + event_len) < ends)) { - iwe->len = event_len; - /* Beware of alignement issues on 64 bits */ - memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); - memcpy(stream + lcp_len, &iwe->u, - event_len - lcp_len); - stream += event_len; - } - return stream; -} +char *iwe_stream_add_event(struct iw_request_info *info, char *stream, + char *ends, struct iw_event *iwe, int event_len); static inline char * iwe_stream_add_event_check(struct iw_request_info *info, char *stream, @@ -541,26 +524,8 @@ iwe_stream_add_event_check(struct iw_request_info *info, char *stream, * Wrapper to add an short Wireless Event containing a pointer to a * stream of events. */ -static inline char * -iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, - struct iw_event *iwe, char *extra) -{ - int event_len = iwe_stream_point_len(info) + iwe->u.data.length; - int point_len = iwe_stream_point_len(info); - int lcp_len = iwe_stream_lcp_len(info); - - /* Check if it's possible */ - if(likely((stream + event_len) < ends)) { - iwe->len = event_len; - memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); - memcpy(stream + lcp_len, - ((char *) &iwe->u) + IW_EV_POINT_OFF, - IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); - memcpy(stream + point_len, extra, iwe->u.data.length); - stream += event_len; - } - return stream; -} +char *iwe_stream_add_point(struct iw_request_info *info, char *stream, + char *ends, struct iw_event *iwe, char *extra); static inline char * iwe_stream_add_point_check(struct iw_request_info *info, char *stream, @@ -579,25 +544,8 @@ iwe_stream_add_point_check(struct iw_request_info *info, char *stream, * Be careful, this one is tricky to use properly : * At the first run, you need to have (value = event + IW_EV_LCP_LEN). */ -static inline char * -iwe_stream_add_value(struct iw_request_info *info, char *event, char *value, - char *ends, struct iw_event *iwe, int event_len) -{ - int lcp_len = iwe_stream_lcp_len(info); - - /* Don't duplicate LCP */ - event_len -= IW_EV_LCP_LEN; - - /* Check if it's possible */ - if(likely((value + event_len) < ends)) { - /* Add new value */ - memcpy(value, &iwe->u, event_len); - value += event_len; - /* Patch LCP */ - iwe->len = value - event; - memcpy(event, (char *) iwe, lcp_len); - } - return value; -} +char *iwe_stream_add_value(struct iw_request_info *info, char *event, + char *value, char *ends, struct iw_event *iwe, + int event_len); #endif /* _IW_HANDLER_H */ diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 0388b9c5f5e2..ebfe237aad7e 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -33,7 +33,7 @@ struct lwtunnel_state { }; struct lwtunnel_encap_ops { - int (*build_state)(struct net_device *dev, struct nlattr *encap, + int (*build_state)(struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **ts); void (*destroy_state)(struct lwtunnel_state *lws); @@ -109,7 +109,7 @@ int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, unsigned int num); int lwtunnel_valid_encap_type(u16 encap_type); int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len); -int lwtunnel_build_state(struct net_device *dev, u16 encap_type, +int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **lws); @@ -184,7 +184,7 @@ static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len) return 0; } -static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type, +static inline int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **lws) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5345d358a510..a3bab3c5ecfb 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -147,7 +147,6 @@ enum ieee80211_ac_numbers { IEEE80211_AC_BE = 2, IEEE80211_AC_BK = 3, }; -#define IEEE80211_NUM_ACS 4 /** * struct ieee80211_tx_queue_params - transmit queue configuration @@ -1018,7 +1017,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_DECRYPTED: This frame was decrypted in hardware. * @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame, * verification has been done by the hardware. - * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame. + * @RX_FLAG_IV_STRIPPED: The IV and ICV are stripped from this frame. * If this flag is set, the stack cannot do any replay detection * hence the driver or hardware will have to do that. * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this @@ -1089,6 +1088,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before. * This is used for AMSDU subframes which can have the same PN as * the first subframe. + * @RX_FLAG_ICV_STRIPPED: The ICV is stripped from this frame. CRC checking must + * be done in the hardware. */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -1124,6 +1125,7 @@ enum mac80211_rx_flags { RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31), RX_FLAG_MIC_STRIPPED = BIT_ULL(32), RX_FLAG_ALLOW_SAME_PN = BIT_ULL(33), + RX_FLAG_ICV_STRIPPED = BIT_ULL(34), }; #define RX_FLAG_STBC_SHIFT 26 @@ -1766,15 +1768,6 @@ struct ieee80211_sta_rates { * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single * A-MSDU. Taken from the Extended Capabilities element. 0 means * unlimited. - * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes. This - * field is always valid for packets with a VHT preamble. For packets - * with a HT preamble, additional limits apply: - * + If the skb is transmitted as part of a BA agreement, the - * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes. - * + If the skb is not part of a BA aggreement, the A-MSDU maximal - * size is min(max_amsdu_len, 7935) bytes. - * Both additional HT limits must be enforced by the low level driver. - * This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2). * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not. * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control. * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) @@ -1797,6 +1790,22 @@ struct ieee80211_sta { bool tdls_initiator; bool mfp; u8 max_amsdu_subframes; + + /** + * @max_amsdu_len: + * indicates the maximal length of an A-MSDU in bytes. + * This field is always valid for packets with a VHT preamble. + * For packets with a HT preamble, additional limits apply: + * + * * If the skb is transmitted as part of a BA agreement, the + * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes. + * * If the skb is not part of a BA aggreement, the A-MSDU maximal + * size is min(max_amsdu_len, 7935) bytes. + * + * Both additional HT limits must be enforced by the low level + * driver. This is defined by the spec (IEEE 802.11-2012 section + * 8.3.2.2 NOTE 2). + */ u16 max_amsdu_len; bool support_p2p_ps; u16 max_rc_amsdu_len; @@ -3201,26 +3210,6 @@ enum ieee80211_reconfig_type { * Returns non-zero if this device sent the last beacon. * The callback can sleep. * - * @ampdu_action: Perform a certain A-MPDU action - * The RA/TID combination determines the destination and TID we want - * the ampdu action to be performed for. The action is defined through - * ieee80211_ampdu_mlme_action. - * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver - * may neither send aggregates containing more subframes than @buf_size - * nor send aggregates in a way that lost frames would exceed the - * buffer size. If just limiting the aggregate size, this would be - * possible with a buf_size of 8: - * - TX: 1.....7 - * - RX: 2....7 (lost frame #1) - * - TX: 8..1... - * which is invalid since #1 was now re-transmitted well past the - * buffer size of 8. Correct ways to retransmit #1 would be: - * - TX: 1 or 18 or 81 - * Even "189" would be wrong since 1 could be lost again. - * - * Returns a negative error code on failure. - * The callback can sleep. - * * @get_survey: Return per-channel survey information * * @rfkill_poll: Poll rfkill hardware state. If you need this, you also @@ -3403,7 +3392,7 @@ enum ieee80211_reconfig_type { * since there won't be any time to beacon before the switch anyway. * @pre_channel_switch: This is an optional callback that is called * before a channel switch procedure is started (ie. when a STA - * gets a CSA or an userspace initiated channel-switch), allowing + * gets a CSA or a userspace initiated channel-switch), allowing * the driver to prepare for the channel switch. * @post_channel_switch: This is an optional callback that is called * after a channel switch procedure is completed, allowing the @@ -3573,6 +3562,35 @@ struct ieee80211_ops { s64 offset); void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int (*tx_last_beacon)(struct ieee80211_hw *hw); + + /** + * @ampdu_action: + * Perform a certain A-MPDU action. + * The RA/TID combination determines the destination and TID we want + * the ampdu action to be performed for. The action is defined through + * ieee80211_ampdu_mlme_action. + * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver + * may neither send aggregates containing more subframes than @buf_size + * nor send aggregates in a way that lost frames would exceed the + * buffer size. If just limiting the aggregate size, this would be + * possible with a buf_size of 8: + * + * - ``TX: 1.....7`` + * - ``RX: 2....7`` (lost frame #1) + * - ``TX: 8..1...`` + * + * which is invalid since #1 was now re-transmitted well past the + * buffer size of 8. Correct ways to retransmit #1 would be: + * + * - ``TX: 1 or`` + * - ``TX: 18 or`` + * - ``TX: 81`` + * + * Even ``189`` would be wrong since 1 could be lost again. + * + * Returns a negative error code on failure. + * The callback can sleep. + */ int (*ampdu_action)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params); @@ -5260,6 +5278,7 @@ void ieee80211_resume_disconnect(struct ieee80211_vif *vif); * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @rssi_event: the RSSI trigger event type + * @rssi_level: new RSSI level value or 0 if not available * @gfp: context flags * * When the %IEEE80211_VIF_SUPPORTS_CQM_RSSI is set, and a connection quality @@ -5268,6 +5287,7 @@ void ieee80211_resume_disconnect(struct ieee80211_vif *vif); */ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, enum nl80211_cqm_rssi_threshold_event rssi_event, + s32 rssi_level, gfp_t gfp); /** diff --git a/include/net/ndisc.h b/include/net/ndisc.h index d562a2fe4860..8a0214654b6b 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -391,6 +391,23 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons return n; } +static inline void __ipv6_confirm_neigh(struct net_device *dev, + const void *pkey) +{ + struct neighbour *n; + + rcu_read_lock_bh(); + n = __ipv6_neigh_lookup_noref(dev, pkey); + if (n) { + unsigned long now = jiffies; + + /* avoid dirtying neighbour */ + if (n->confirmed != now) + n->confirmed = now; + } + rcu_read_unlock_bh(); +} + int ndisc_init(void); int ndisc_late_init(void); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 8b683841e574..5ebf69491160 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -468,6 +468,16 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb return dev_queue_xmit(skb); } +static inline int neigh_output(struct neighbour *n, struct sk_buff *skb) +{ + const struct hh_cache *hh = &n->hh; + + if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) + return neigh_hh_output(hh, skb); + else + return n->output(n, skb); +} + static inline struct neighbour * __neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) { diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h index 919e4e8af327..6ff32815641b 100644 --- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h +++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h @@ -14,6 +14,7 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; +extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; #ifdef CONFIG_NF_CT_PROTO_DCCP extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4; diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index eaea968f8657..c59b82456f89 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -5,6 +5,7 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; +extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; #ifdef CONFIG_NF_CT_PROTO_DCCP extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6; diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 5916aa9ab3f0..f540f9ad2af4 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -34,6 +34,7 @@ union nf_conntrack_proto { struct ip_ct_sctp sctp; struct ip_ct_tcp tcp; struct nf_ct_gre gre; + unsigned int tmpl_padto; }; union nf_conntrack_expect_proto { @@ -75,7 +76,7 @@ struct nf_conn { /* Usage count in here is 1 for hash table, 1 per skb, * plus 1 for any connection(s) we are `master' for * - * Hint, SKB address this struct and refcnt via skb->nfct and + * Hint, SKB address this struct and refcnt via skb->_nfct and * helpers nf_conntrack_get() and nf_conntrack_put(). * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt, * beware nf_ct_get() is different and don't inc refcnt. @@ -162,12 +163,16 @@ void nf_conntrack_alter_reply(struct nf_conn *ct, int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_conntrack); +#define NFCT_INFOMASK 7UL +#define NFCT_PTRMASK ~(NFCT_INFOMASK) + /* Return conntrack_info and tuple hash for given skb. */ static inline struct nf_conn * nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo) { - *ctinfo = skb->nfctinfo; - return (struct nf_conn *)skb->nfct; + *ctinfo = skb->_nfct & NFCT_INFOMASK; + + return (struct nf_conn *)(skb->_nfct & NFCT_PTRMASK); } /* decrement reference count on a conntrack */ @@ -341,6 +346,12 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, gfp_t flags); void nf_ct_tmpl_free(struct nf_conn *tmpl); +static inline void +nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) +{ + skb->_nfct = (unsigned long)ct | info; +} + #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v)) diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 62e17d1319ff..84ec7ca5f195 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -62,7 +62,7 @@ int __nf_conntrack_confirm(struct sk_buff *skb); /* Confirm a connection: returns NF_DROP if packet must be dropped. */ static inline int nf_conntrack_confirm(struct sk_buff *skb) { - struct nf_conn *ct = (struct nf_conn *)skb->nfct; + struct nf_conn *ct = (struct nf_conn *)skb_nfct(skb); int ret = NF_ACCEPT; if (ct && !nf_ct_is_untracked(ct)) { diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index e7b836590f0b..85e993e278d5 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -55,7 +55,7 @@ struct nf_conntrack_l4proto { void (*destroy)(struct nf_conn *ct); int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, - unsigned int dataoff, enum ip_conntrack_info *ctinfo, + unsigned int dataoff, u_int8_t pf, unsigned int hooknum); /* Print out the per-protocol part of the tuple. Return like seq_* */ diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index 450f87f95415..42e0696f38d8 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h @@ -51,6 +51,9 @@ struct nf_logger { struct module *me; }; +/* sysctl_nf_log_all_netns - allow LOG target in all network namespaces */ +extern int sysctl_nf_log_all_netns; + /* Function to register/unregister log function. */ int nf_log_register(u_int8_t pf, struct nf_logger *logger); void nf_log_unregister(struct nf_logger *logger); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 7dfdb517f0be..2aa8a9d80fbe 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -203,6 +203,7 @@ struct nft_set_elem { struct nft_set; struct nft_set_iter { u8 genmask; + bool flush; unsigned int count; unsigned int skip; int err; @@ -243,11 +244,13 @@ enum nft_set_class { * characteristics * * @size: required memory - * @class: lookup performance class + * @lookup: lookup performance class + * @space: memory class */ struct nft_set_estimate { unsigned int size; - enum nft_set_class class; + enum nft_set_class lookup; + enum nft_set_class space; }; struct nft_set_ext; @@ -260,7 +263,7 @@ struct nft_expr; * @insert: insert new element into set * @activate: activate new element in the next generation * @deactivate: lookup for element and deactivate it in the next generation - * @deactivate_one: deactivate element in the next generation + * @flush: deactivate element in the next generation * @remove: remove element from set * @walk: iterate over all set elemeennts * @privsize: function to return size of set private data @@ -295,10 +298,11 @@ struct nft_set_ops { void * (*deactivate)(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem); - bool (*deactivate_one)(const struct net *net, - const struct nft_set *set, - void *priv); - void (*remove)(const struct nft_set *set, + bool (*flush)(const struct net *net, + const struct nft_set *set, + void *priv); + void (*remove)(const struct net *net, + const struct nft_set *set, const struct nft_set_elem *elem); void (*walk)(const struct nft_ctx *ctx, struct nft_set *set, @@ -984,9 +988,9 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table, const struct nlattr *nla, u32 objtype, u8 genmask); -int nft_obj_notify(struct net *net, struct nft_table *table, - struct nft_object *obj, u32 portid, u32 seq, - int event, int family, int report, gfp_t gfp); +void nft_obj_notify(struct net *net, struct nft_table *table, + struct nft_object *obj, u32 portid, u32 seq, + int event, int family, int report, gfp_t gfp); /** * struct nft_object_type - stateful object type @@ -1198,10 +1202,13 @@ struct nft_trans { struct nft_trans_rule { struct nft_rule *rule; + u32 rule_id; }; #define nft_trans_rule(trans) \ (((struct nft_trans_rule *)trans->data)->rule) +#define nft_trans_rule_id(trans) \ + (((struct nft_trans_rule *)trans->data)->rule_id) struct nft_trans_set { struct nft_set *set; diff --git a/include/net/netlink.h b/include/net/netlink.h index d3938f11ae52..b239fcd33d80 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -229,6 +229,7 @@ struct nl_info { struct nlmsghdr *nlh; struct net *nl_net; u32 portid; + bool skip_notify; }; int netlink_rcv_skb(struct sk_buff *skb, diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index cf799fc3fdec..17724c62de97 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -69,19 +69,6 @@ struct nf_sctp_net { }; #endif -#ifdef CONFIG_NF_CT_PROTO_UDPLITE -enum udplite_conntrack { - UDPLITE_CT_UNREPLIED, - UDPLITE_CT_REPLIED, - UDPLITE_CT_MAX -}; - -struct nf_udplite_net { - struct nf_proto_net pn; - unsigned int timeouts[UDPLITE_CT_MAX]; -}; -#endif - struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; @@ -94,9 +81,6 @@ struct nf_ip_net { #ifdef CONFIG_NF_CT_PROTO_SCTP struct nf_sctp_net sctp; #endif -#ifdef CONFIG_NF_CT_PROTO_UDPLITE - struct nf_udplite_net udplite; -#endif }; struct ct_pcpu { diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 0378e88f6fd3..622d2da27135 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -27,6 +27,16 @@ struct ping_group_range { kgid_t range[2]; }; +struct inet_hashinfo; + +struct inet_timewait_death_row { + atomic_t tw_count; + + struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp; + int sysctl_tw_recycle; + int sysctl_max_tw_buckets; +}; + struct netns_ipv4 { #ifdef CONFIG_SYSCTL struct ctl_table_header *forw_hdr; @@ -111,6 +121,12 @@ struct netns_ipv4 { int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_tcp_tw_reuse; + struct inet_timewait_death_row tcp_death_row; + int sysctl_max_syn_backlog; + +#ifdef CONFIG_NET_L3_MASTER_DEV + int sysctl_udp_l3mdev_accept; +#endif int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; @@ -123,6 +139,7 @@ struct netns_ipv4 { #ifdef CONFIG_SYSCTL unsigned long *sysctl_local_reserved_ports; + int sysctl_ip_prot_sock; #endif #ifdef CONFIG_IP_MROUTE diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index c501d67172b1..b7871d018354 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -118,6 +118,9 @@ struct netns_sctp { /* Flag to indicate if PR-SCTP is enabled. */ int prsctp_enable; + /* Flag to indicate if PR-CONFIG is enabled. */ + int reconf_enable; + /* Flag to idicate if SCTP-AUTH is enabled */ int auth_enable; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index f0a051480c6c..269fd78bb0ae 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -17,6 +17,14 @@ struct tcf_walker { int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); +#ifdef CONFIG_NET_CLS +void tcf_destroy_chain(struct tcf_proto __rcu **fl); +#else +static inline void tcf_destroy_chain(struct tcf_proto __rcu **fl) +{ +} +#endif + static inline unsigned long __cls_set_class(unsigned long *clp, unsigned long cl) { @@ -473,6 +481,11 @@ static inline bool tc_flags_valid(u32 flags) return true; } +static inline bool tc_in_hw(u32 flags) +{ + return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; +} + enum tc_fl_command { TC_CLSFLOWER_REPLACE, TC_CLSFLOWER_DESTROY, @@ -481,6 +494,7 @@ enum tc_fl_command { struct tc_cls_flower_offload { enum tc_fl_command command; + u32 prio; unsigned long cookie; struct flow_dissector *dissector; struct fl_flow_key *mask; @@ -515,4 +529,12 @@ struct tc_cls_bpf_offload { u32 gen_flags; }; + +/* This structure holds cookie structure that is passed from user + * to the kernel for actions and classifiers + */ +struct tc_cookie { + u8 *data; + u32 len; +}; #endif diff --git a/include/net/psample.h b/include/net/psample.h new file mode 100644 index 000000000000..8888b0e1a82e --- /dev/null +++ b/include/net/psample.h @@ -0,0 +1,36 @@ +#ifndef __NET_PSAMPLE_H +#define __NET_PSAMPLE_H + +#include <uapi/linux/psample.h> +#include <linux/module.h> +#include <linux/list.h> + +struct psample_group { + struct list_head list; + struct net *net; + u32 group_num; + u32 refcount; + u32 seq; +}; + +struct psample_group *psample_group_get(struct net *net, u32 group_num); +void psample_group_put(struct psample_group *group); + +#if IS_ENABLED(CONFIG_PSAMPLE) + +void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, + u32 trunc_size, int in_ifindex, int out_ifindex, + u32 sample_rate); + +#else + +static inline void psample_sample_packet(struct psample_group *group, + struct sk_buff *skb, u32 trunc_size, + int in_ifindex, int out_ifindex, + u32 sample_rate) +{ +} + +#endif + +#endif /* __NET_PSAMPLE_H */ diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 6ebe13eb1c4c..a12a5d25b27e 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -1,7 +1,7 @@ /* * NET Generic infrastructure for Network protocols. * - * Definitions for request_sock + * Definitions for request_sock * * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> * @@ -123,8 +123,6 @@ static inline void reqsk_put(struct request_sock *req) reqsk_free(req); } -extern int sysctl_max_syn_backlog; - /* * For a TCP Fast Open listener - * lock - protects the access to all the reqsk, which is co-owned by diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 4113916cc1bb..106de5f7bf06 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -139,6 +139,10 @@ struct rtnl_af_ops { const struct nlattr *attr); int (*set_link_af)(struct net_device *dev, const struct nlattr *attr); + + int (*fill_stats_af)(struct sk_buff *skb, + const struct net_device *dev); + size_t (*get_stats_af_size)(const struct net_device *dev); }; void __rtnl_af_unregister(struct rtnl_af_ops *ops); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 498f81b229a4..aeec4086afb2 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -405,19 +405,35 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, u32 parentid); void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); -bool tcf_destroy(struct tcf_proto *tp, bool force); -void tcf_destroy_chain(struct tcf_proto __rcu **fl); int skb_do_redirect(struct sk_buff *); +static inline void skb_reset_tc(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + skb->tc_redirected = 0; +#endif +} + static inline bool skb_at_tc_ingress(const struct sk_buff *skb) { #ifdef CONFIG_NET_CLS_ACT - return G_TC_AT(skb->tc_verd) & AT_INGRESS; + return skb->tc_at_ingress; #else return false; #endif } +static inline bool skb_skip_tc_classify(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + if (skb->tc_skip_classify) { + skb->tc_skip_classify = 0; + return true; + } +#endif + return false; +} + /* Reset all TX qdiscs greater then index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) { diff --git a/include/net/scm.h b/include/net/scm.h index 59fa93c01d2a..142ea9e7a6d0 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -3,6 +3,7 @@ #include <linux/limits.h> #include <linux/net.h> +#include <linux/cred.h> #include <linux/security.h> #include <linux/pid.h> #include <linux/nsproxy.h> diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 5b847e49f7e9..b07a745ab69f 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -60,11 +60,14 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM }; #define SCTP_NUM_PRSCTP_CHUNK_TYPES 1 +#define SCTP_NUM_RECONF_CHUNK_TYPES 1 + #define SCTP_NUM_AUTH_CHUNK_TYPES 1 #define SCTP_NUM_CHUNK_TYPES (SCTP_NUM_BASE_CHUNK_TYPES + \ SCTP_NUM_ADDIP_CHUNK_TYPES +\ SCTP_NUM_PRSCTP_CHUNK_TYPES +\ + SCTP_NUM_RECONF_CHUNK_TYPES +\ SCTP_NUM_AUTH_CHUNK_TYPES) /* These are the different flavours of event. */ @@ -90,6 +93,7 @@ typedef enum { SCTP_EVENT_TIMEOUT_T4_RTO, SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD, SCTP_EVENT_TIMEOUT_HEARTBEAT, + SCTP_EVENT_TIMEOUT_RECONF, SCTP_EVENT_TIMEOUT_SACK, SCTP_EVENT_TIMEOUT_AUTOCLOSE, } sctp_event_timeout_t; @@ -113,9 +117,10 @@ typedef enum { SCTP_PRIMITIVE_SEND, SCTP_PRIMITIVE_REQUESTHEARTBEAT, SCTP_PRIMITIVE_ASCONF, + SCTP_PRIMITIVE_RECONF, } sctp_event_primitive_t; -#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_ASCONF +#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_RECONF #define SCTP_NUM_PRIMITIVE_TYPES (SCTP_EVENT_PRIMITIVE_MAX + 1) /* We define here a utility type for manipulating subtypes. diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index d8833a86cd7e..1f71ee5ab518 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -141,6 +141,8 @@ int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg); int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg); int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg); int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg); +int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc, + void *arg); /* * sctp/input.c @@ -192,6 +194,15 @@ void sctp_remaddr_proc_exit(struct net *net); int sctp_offload_init(void); /* + * sctp/stream.c + */ +int sctp_send_reset_streams(struct sctp_association *asoc, + struct sctp_reset_streams *params); +int sctp_send_reset_assoc(struct sctp_association *asoc); +int sctp_send_add_streams(struct sctp_association *asoc, + struct sctp_add_streams *params); + +/* * Module global variables */ @@ -283,7 +294,6 @@ extern atomic_t sctp_dbg_objcnt_chunk; extern atomic_t sctp_dbg_objcnt_bind_addr; extern atomic_t sctp_dbg_objcnt_bind_bucket; extern atomic_t sctp_dbg_objcnt_addr; -extern atomic_t sctp_dbg_objcnt_ssnmap; extern atomic_t sctp_dbg_objcnt_datamsg; extern atomic_t sctp_dbg_objcnt_keys; @@ -586,10 +596,10 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr) */ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) { - if (t->dst && !dst_check(t->dst, t->dst_cookie)) { - dst_release(t->dst); - t->dst = NULL; - } + if (t->dst && (!dst_check(t->dst, t->dst_cookie) || + t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)), + SCTP_DEFAULT_MINSEGMENT))) + sctp_transport_dst_release(t); return t->dst; } diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index ca6c971dd74a..b6f682ec184a 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -135,6 +135,7 @@ sctp_state_fn_t sctp_sf_do_8_5_1_E_sa; sctp_state_fn_t sctp_sf_cookie_echoed_err; sctp_state_fn_t sctp_sf_do_asconf; sctp_state_fn_t sctp_sf_do_asconf_ack; +sctp_state_fn_t sctp_sf_do_reconf; sctp_state_fn_t sctp_sf_do_9_2_reshutack; sctp_state_fn_t sctp_sf_eat_fwd_tsn; sctp_state_fn_t sctp_sf_eat_fwd_tsn_fast; @@ -157,6 +158,7 @@ sctp_state_fn_t sctp_sf_error_shutdown; sctp_state_fn_t sctp_sf_ignore_primitive; sctp_state_fn_t sctp_sf_do_prm_requestheartbeat; sctp_state_fn_t sctp_sf_do_prm_asconf; +sctp_state_fn_t sctp_sf_do_prm_reconf; /* Prototypes for other event state functions. */ sctp_state_fn_t sctp_sf_do_no_pending_tsn; @@ -167,6 +169,7 @@ sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort; /* Prototypes for timeout event state functions. */ sctp_state_fn_t sctp_sf_do_6_3_3_rtx; +sctp_state_fn_t sctp_sf_send_reconf; sctp_state_fn_t sctp_sf_do_6_2_sack; sctp_state_fn_t sctp_sf_autoclose_timer_expire; @@ -259,10 +262,38 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, __u32 new_cum_tsn, size_t nstreams, struct sctp_fwdtsn_skip *skiplist); struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc); - +struct sctp_chunk *sctp_make_strreset_req( + const struct sctp_association *asoc, + __u16 stream_num, __u16 *stream_list, + bool out, bool in); +struct sctp_chunk *sctp_make_strreset_tsnreq( + const struct sctp_association *asoc); +struct sctp_chunk *sctp_make_strreset_addstrm( + const struct sctp_association *asoc, + __u16 out, __u16 in); +struct sctp_chunk *sctp_make_strreset_resp( + const struct sctp_association *asoc, + __u32 result, __u32 sn); +struct sctp_chunk *sctp_make_strreset_tsnresp( + struct sctp_association *asoc, + __u32 result, __u32 sn, + __u32 sender_tsn, __u32 receiver_tsn); +bool sctp_verify_reconf(const struct sctp_association *asoc, + struct sctp_chunk *chunk, + struct sctp_paramhdr **errp); void sctp_chunk_assign_tsn(struct sctp_chunk *); void sctp_chunk_assign_ssn(struct sctp_chunk *); +/* Prototypes for stream-processing functions. */ +struct sctp_chunk *sctp_process_strreset_outreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp); +struct sctp_chunk *sctp_process_strreset_inreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp); + /* Prototypes for statetable processing. */ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, @@ -275,6 +306,7 @@ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, /* 2nd level prototypes */ void sctp_generate_t3_rtx_event(unsigned long peer); void sctp_generate_heartbeat_event(unsigned long peer); +void sctp_generate_reconf_event(unsigned long peer); void sctp_generate_proto_unreach_event(unsigned long peer); void sctp_ootb_pkt_free(struct sctp_packet *); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 92daabdc007d..07a0b128625a 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -82,7 +82,6 @@ struct sctp_outq; struct sctp_bind_addr; struct sctp_ulpq; struct sctp_ep_common; -struct sctp_ssnmap; struct crypto_shash; @@ -375,56 +374,24 @@ typedef struct sctp_sender_hb_info { union sctp_addr daddr; unsigned long sent_at; __u64 hb_nonce; -} __packed sctp_sender_hb_info_t; +} sctp_sender_hb_info_t; -/* - * RFC 2960 1.3.2 Sequenced Delivery within Streams - * - * The term "stream" is used in SCTP to refer to a sequence of user - * messages that are to be delivered to the upper-layer protocol in - * order with respect to other messages within the same stream. This is - * in contrast to its usage in TCP, where it refers to a sequence of - * bytes (in this document a byte is assumed to be eight bits). - * ... - * - * This is the structure we use to track both our outbound and inbound - * SSN, or Stream Sequence Numbers. - */ - -struct sctp_stream { - __u16 *ssn; - unsigned int len; -}; - -struct sctp_ssnmap { - struct sctp_stream in; - struct sctp_stream out; -}; - -struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, - gfp_t gfp); -void sctp_ssnmap_free(struct sctp_ssnmap *map); -void sctp_ssnmap_clear(struct sctp_ssnmap *map); +struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp); +void sctp_stream_free(struct sctp_stream *stream); +void sctp_stream_clear(struct sctp_stream *stream); /* What is the current SSN number for this stream? */ -static inline __u16 sctp_ssn_peek(struct sctp_stream *stream, __u16 id) -{ - return stream->ssn[id]; -} +#define sctp_ssn_peek(stream, type, sid) \ + ((stream)->type[sid].ssn) /* Return the next SSN number for this stream. */ -static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id) -{ - return stream->ssn[id]++; -} +#define sctp_ssn_next(stream, type, sid) \ + ((stream)->type[sid].ssn++) /* Skip over this ssn and all below. */ -static inline void sctp_ssn_skip(struct sctp_stream *stream, __u16 id, - __u16 ssn) -{ - stream->ssn[id] = ssn+1; -} - +#define sctp_ssn_skip(stream, type, sid, ssn) \ + ((stream)->type[sid].ssn = ssn + 1) + /* * Pointers to address related SCTP functions. * (i.e. things that depend on the address family.) @@ -509,7 +476,8 @@ struct sctp_pf { int (*send_verify) (struct sctp_sock *, union sctp_addr *); int (*supported_addrs)(const struct sctp_sock *, __be16 *); struct sock *(*create_accept_sk) (struct sock *sk, - struct sctp_association *asoc); + struct sctp_association *asoc, + bool kern); int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr); void (*to_sk_saddr)(union sctp_addr *, struct sock *sk); void (*to_sk_daddr)(union sctp_addr *, struct sock *sk); @@ -530,6 +498,7 @@ struct sctp_datamsg { /* Did the messenge fail to send? */ int send_error; u8 send_failed:1, + force_delay:1, can_delay; /* should this message be Nagle delayed */ }; @@ -722,10 +691,9 @@ struct sctp_packet { ipfragok:1; /* So let ip fragment this packet */ }; -struct sctp_packet *sctp_packet_init(struct sctp_packet *, - struct sctp_transport *, - __u16 sport, __u16 dport); -struct sctp_packet *sctp_packet_config(struct sctp_packet *, __u32 vtag, int); +void sctp_packet_init(struct sctp_packet *, struct sctp_transport *, + __u16 sport, __u16 dport); +void sctp_packet_config(struct sctp_packet *, __u32 vtag, int); sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *, struct sctp_chunk *, int, gfp_t); sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *, @@ -838,6 +806,8 @@ struct sctp_transport { __u32 burst_limited; /* Holds old cwnd when max.burst is applied */ + __u32 dst_pending_confirm; /* need to confirm neighbour */ + /* Destination */ struct dst_entry *dst; /* Source address. */ @@ -911,6 +881,9 @@ struct sctp_transport { /* Timer to handle ICMP proto unreachable envets */ struct timer_list proto_unreach_timer; + /* Timer to handler reconf chunk rtx */ + struct timer_list reconf_timer; + /* Since we're using per-destination retransmission timers * (see above), we're also using per-destination "transmitted" * queues. This probably ought to be a private struct @@ -969,6 +942,7 @@ void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk); void sctp_transport_free(struct sctp_transport *); void sctp_transport_reset_t3_rtx(struct sctp_transport *); void sctp_transport_reset_hb_timer(struct sctp_transport *); +void sctp_transport_reset_reconf_timer(struct sctp_transport *transport); int sctp_transport_hold(struct sctp_transport *); void sctp_transport_put(struct sctp_transport *); void sctp_transport_update_rto(struct sctp_transport *, __u32); @@ -980,6 +954,8 @@ unsigned long sctp_transport_timeout(struct sctp_transport *); void sctp_transport_reset(struct sctp_transport *); void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32); void sctp_transport_immediate_rtx(struct sctp_transport *); +void sctp_transport_dst_release(struct sctp_transport *t); +void sctp_transport_dst_confirm(struct sctp_transport *t); /* This is the structure we use to queue packets as they come into @@ -1285,7 +1261,10 @@ struct sctp_endpoint { struct list_head endpoint_shared_keys; __u16 active_key_id; __u8 auth_enable:1, - prsctp_enable:1; + prsctp_enable:1, + reconf_enable:1; + + __u8 strreset_enable; }; /* Recover the outter endpoint structure. */ @@ -1332,6 +1311,25 @@ struct sctp_inithdr_host { __u32 initial_tsn; }; +struct sctp_stream_out { + __u16 ssn; + __u8 state; +}; + +struct sctp_stream_in { + __u16 ssn; +}; + +struct sctp_stream { + struct sctp_stream_out *out; + struct sctp_stream_in *in; + __u16 outcnt; + __u16 incnt; +}; + +#define SCTP_STREAM_CLOSED 0x00 +#define SCTP_STREAM_OPEN 0x01 + /* SCTP_GET_ASSOC_STATS counters */ struct sctp_priv_assoc_stats { /* Maximum observed rto in the association during subsequent @@ -1519,6 +1517,7 @@ struct sctp_association { hostname_address:1, /* Peer understands DNS addresses? */ asconf_capable:1, /* Does peer support ADDIP? */ prsctp_capable:1, /* Can peer do PR-SCTP? */ + reconf_capable:1, /* Can peer do RE-CONFIG? */ auth_capable:1; /* Is peer doing SCTP-AUTH? */ /* sack_needed : This flag indicates if the next received @@ -1747,8 +1746,8 @@ struct sctp_association { /* Default receive parameters */ __u32 default_rcv_context; - /* This tracks outbound ssn for a given stream. */ - struct sctp_ssnmap *ssnmap; + /* Stream arrays */ + struct sctp_stream *stream; /* All outbound chunks go through this structure. */ struct sctp_outq outqueue; @@ -1878,7 +1877,16 @@ struct sctp_association { __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ temp:1, /* Is it a temporary association? */ - prsctp_enable:1; + prsctp_enable:1, + reconf_enable:1; + + __u8 strreset_enable; + __u8 strreset_outstanding; /* request param count on the fly */ + + __u32 strreset_outseq; /* Update after receiving response */ + __u32 strreset_inseq; /* Update after receiving request */ + + struct sctp_chunk *strreset_chunk; /* save request chunk */ struct sctp_priv_assoc_stats stats; diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 2c098cd7e7e2..324b5965fc4d 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -128,6 +128,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_authkey( struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event( const struct sctp_association *asoc, gfp_t gfp); +struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( + const struct sctp_association *asoc, __u16 flags, + __u16 stream_num, __u16 *stream_list, gfp_t gfp); + void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *); void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, diff --git a/include/net/smc.h b/include/net/smc.h new file mode 100644 index 000000000000..12d26358ad9f --- /dev/null +++ b/include/net/smc.h @@ -0,0 +1,20 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Definitions for the SMC module (socket related) + * + * Copyright IBM Corp. 2016 + * + * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> + */ +#ifndef _SMC_H +#define _SMC_H + +struct smc_hashinfo { + rwlock_t lock; + struct hlist_head ht; +}; + +int smc_hash_sk(struct sock *sk); +void smc_unhash_sk(struct sock *sk); +#endif /* _SMC_H */ diff --git a/include/net/sock.h b/include/net/sock.h index c4f5e6fca17c..03252d53975d 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -70,6 +70,7 @@ #include <net/checksum.h> #include <net/tcp_states.h> #include <linux/net_tstamp.h> +#include <net/smc.h> /* * This structure really needs to be cleaned up. @@ -235,10 +236,12 @@ struct sock_common { * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings * @sk_lock: synchronizer + * @sk_kern_sock: True if sock is using kernel lock classes * @sk_rcvbuf: size of receive buffer in bytes * @sk_wq: sock wait queue and async head * @sk_rx_dst: receive input route used by early demux * @sk_dst_cache: destination cache + * @sk_dst_pending_confirm: need to confirm neighbour * @sk_policy: flow policy * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed @@ -392,6 +395,8 @@ struct sock { struct sk_buff_head sk_write_queue; __s32 sk_peek_off; int sk_write_pending; + __u32 sk_dst_pending_confirm; + /* Note: 32bit hole on 64bit arches */ long sk_sndtimeo; struct timer_list sk_timer; __u32 sk_priority; @@ -426,7 +431,8 @@ struct sock { #endif kmemcheck_bitfield_begin(flags); - unsigned int sk_padding : 2, + unsigned int sk_padding : 1, + sk_kern_sock : 1, sk_no_check_tx : 1, sk_no_check_rx : 1, sk_userlocks : 4, @@ -543,8 +549,7 @@ static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) static inline struct sock *sk_next(const struct sock *sk) { - return sk->sk_node.next ? - hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; + return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node); } static inline struct sock *sk_nulls_next(const struct sock *sk) @@ -986,6 +991,7 @@ struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; +struct smc_hashinfo; struct module; /* @@ -1011,7 +1017,8 @@ struct proto { int addr_len); int (*disconnect)(struct sock *sk, int flags); - struct sock * (*accept)(struct sock *sk, int flags, int *err); + struct sock * (*accept)(struct sock *sk, int flags, int *err, + bool kern); int (*ioctl)(struct sock *sk, int cmd, unsigned long arg); @@ -1024,6 +1031,7 @@ struct proto { int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *option); + void (*keepalive)(struct sock *sk, int valbool); #ifdef CONFIG_COMPAT int (*compat_setsockopt)(struct sock *sk, int level, @@ -1093,6 +1101,7 @@ struct proto { struct inet_hashinfo *hashinfo; struct udp_table *udp_table; struct raw_hashinfo *raw_hash; + struct smc_hashinfo *smc_hash; } h; struct module *owner; @@ -1520,6 +1529,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, void sk_free(struct sock *sk); void sk_destruct(struct sock *sk); struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); +void sk_free_unlock_clone(struct sock *sk); struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, gfp_t priority); @@ -1531,7 +1541,7 @@ void sock_efree(struct sk_buff *skb); #ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb); #else -#define sock_edemux(skb) sock_efree(skb) +#define sock_edemux sock_efree #endif int sock_setsockopt(struct socket *sock, int level, int op, @@ -1566,7 +1576,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg, int sock_no_bind(struct socket *, struct sockaddr *, int); int sock_no_connect(struct socket *, struct sockaddr *, int, int); int sock_no_socketpair(struct socket *, struct socket *); -int sock_no_accept(struct socket *, struct socket *, int); +int sock_no_accept(struct socket *, struct socket *, int, bool); int sock_no_getname(struct socket *, struct sockaddr *, int *, int); unsigned int sock_no_poll(struct file *, struct socket *, struct poll_table_struct *); @@ -1761,6 +1771,7 @@ static inline void dst_negative_advice(struct sock *sk) if (ndst != dst) { rcu_assign_pointer(sk->sk_dst_cache, ndst); sk_tx_queue_clear(sk); + sk->sk_dst_pending_confirm = 0; } } } @@ -1771,6 +1782,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old_dst; sk_tx_queue_clear(sk); + sk->sk_dst_pending_confirm = 0; /* * This can be called while sk is owned by the caller only, * with no state that can be checked in a rcu_dereference_check() cond @@ -1786,6 +1798,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old_dst; sk_tx_queue_clear(sk); + sk->sk_dst_pending_confirm = 0; old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); dst_release(old_dst); } @@ -1806,6 +1819,26 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); +static inline void sk_dst_confirm(struct sock *sk) +{ + if (!sk->sk_dst_pending_confirm) + sk->sk_dst_pending_confirm = 1; +} + +static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n) +{ + if (skb_get_dst_pending_confirm(skb)) { + struct sock *sk = skb->sk; + unsigned long now = jiffies; + + /* avoid dirtying neighbour */ + if (n->confirmed != now) + n->confirmed = now; + if (sk && sk->sk_dst_pending_confirm) + sk->sk_dst_pending_confirm = 0; + } +} + bool sk_mc_loop(struct sock *sk); static inline bool sk_can_gso(const struct sock *sk) diff --git a/include/net/switchdev.h b/include/net/switchdev.h index eba80c4fc56f..929d6af321cd 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -46,8 +46,10 @@ enum switchdev_attr_id { SWITCHDEV_ATTR_ID_PORT_PARENT_ID, SWITCHDEV_ATTR_ID_PORT_STP_STATE, SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, + SWITCHDEV_ATTR_ID_PORT_MROUTER, SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING, + SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, }; struct switchdev_attr { @@ -60,8 +62,10 @@ struct switchdev_attr { struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ u8 stp_state; /* PORT_STP_STATE */ unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ + bool mrouter; /* PORT_MROUTER */ clock_t ageing_time; /* BRIDGE_AGEING_TIME */ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ + bool mc_disabled; /* MC_DISABLED */ } u; }; diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h index 9fd2bea0a6e0..30ba459ddd34 100644 --- a/include/net/tc_act/tc_ife.h +++ b/include/net/tc_act/tc_ife.h @@ -6,7 +6,6 @@ #include <linux/rtnetlink.h> #include <linux/module.h> -#define IFE_METAHDRLEN 2 struct tcf_ife_info { struct tc_action common; u8 eth_dst[ETH_ALEN]; @@ -45,8 +44,6 @@ struct tcf_meta_ops { int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi); int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi); -int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, - const void *dval); int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp); int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp); int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi); diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h index 29e38d6823df..dfbd6ee0bc7c 100644 --- a/include/net/tc_act/tc_pedit.h +++ b/include/net/tc_act/tc_pedit.h @@ -3,11 +3,17 @@ #include <net/act_api.h> +struct tcf_pedit_key_ex { + enum pedit_header_type htype; + enum pedit_cmd cmd; +}; + struct tcf_pedit { struct tc_action common; unsigned char tcfp_nkeys; unsigned char tcfp_flags; struct tc_pedit_key *tcfp_keys; + struct tcf_pedit_key_ex *tcfp_keys_ex; }; #define to_pedit(a) ((struct tcf_pedit *)a) diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h new file mode 100644 index 000000000000..89e9305be880 --- /dev/null +++ b/include/net/tc_act/tc_sample.h @@ -0,0 +1,50 @@ +#ifndef __NET_TC_SAMPLE_H +#define __NET_TC_SAMPLE_H + +#include <net/act_api.h> +#include <linux/tc_act/tc_sample.h> +#include <net/psample.h> + +struct tcf_sample { + struct tc_action common; + u32 rate; + bool truncate; + u32 trunc_size; + struct psample_group __rcu *psample_group; + u32 psample_group_num; + struct list_head tcfm_list; + struct rcu_head rcu; +}; +#define to_sample(a) ((struct tcf_sample *)a) + +static inline bool is_tcf_sample(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + return a->ops && a->ops->type == TCA_ACT_SAMPLE; +#else + return false; +#endif +} + +static inline __u32 tcf_sample_rate(const struct tc_action *a) +{ + return to_sample(a)->rate; +} + +static inline bool tcf_sample_truncate(const struct tc_action *a) +{ + return to_sample(a)->truncate; +} + +static inline int tcf_sample_trunc_size(const struct tc_action *a) +{ + return to_sample(a)->trunc_size; +} + +static inline struct psample_group * +tcf_sample_psample_group(const struct tc_action *a) +{ + return rcu_dereference(to_sample(a)->psample_group); +} + +#endif /* __NET_TC_SAMPLE_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 6061963cca98..6ec4ea652f3f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -143,6 +143,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes * for local resources. */ +#define TCP_REO_TIMEOUT_MIN (2000) /* Min RACK reordering timeout in usec */ #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ @@ -231,7 +232,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); */ #define TFO_SERVER_WO_SOCKOPT1 0x400 -extern struct inet_timewait_death_row tcp_death_row; /* sysctl variables for tcp */ extern int sysctl_tcp_timestamps; @@ -262,6 +262,9 @@ extern int sysctl_tcp_slow_start_after_idle; extern int sysctl_tcp_thin_linear_timeouts; extern int sysctl_tcp_thin_dupack; extern int sysctl_tcp_early_retrans; +extern int sysctl_tcp_recovery; +#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */ + extern int sysctl_tcp_limit_output_bytes; extern int sysctl_tcp_challenge_ack_limit; extern int sysctl_tcp_min_tso_segs; @@ -398,6 +401,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); void tcp_enter_loss(struct sock *sk); +void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag); void tcp_clear_retrans(struct tcp_sock *tp); void tcp_update_metrics(struct sock *sk); void tcp_init_metrics(struct sock *sk); @@ -542,6 +546,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); void tcp_retransmit_timer(struct sock *sk); void tcp_xmit_retransmit_queue(struct sock *); void tcp_simple_retransmit(struct sock *); +void tcp_enter_recovery(struct sock *sk, bool ece_ack); int tcp_trim_head(struct sock *, struct sk_buff *, u32); int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t); @@ -560,7 +565,6 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb, const struct sk_buff *next_skb); /* tcp_input.c */ -void tcp_resume_early_retransmit(struct sock *sk); void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); void tcp_reset(struct sock *sk); @@ -1032,23 +1036,6 @@ static inline void tcp_enable_fack(struct tcp_sock *tp) tp->rx_opt.sack_ok |= TCP_FACK_ENABLED; } -/* TCP early-retransmit (ER) is similar to but more conservative than - * the thin-dupack feature. Enable ER only if thin-dupack is disabled. - */ -static inline void tcp_enable_early_retrans(struct tcp_sock *tp) -{ - struct net *net = sock_net((struct sock *)tp); - - tp->do_early_retrans = sysctl_tcp_early_retrans && - sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack && - net->ipv4.sysctl_tcp_reordering == 3; -} - -static inline void tcp_disable_early_retrans(struct tcp_sock *tp) -{ - tp->do_early_retrans = 0; -} - static inline unsigned int tcp_left_out(const struct tcp_sock *tp) { return tp->sacked_out + tp->lost_out; @@ -1506,6 +1493,9 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct tcp_fastopen_cookie *foc, struct dst_entry *dst); void tcp_fastopen_init_key_once(bool publish); +bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, + struct tcp_fastopen_cookie *cookie); +bool tcp_fastopen_defer_connect(struct sock *sk, int *err); #define TCP_FASTOPEN_KEY_LENGTH 16 /* Fastopen key context */ @@ -1857,17 +1847,11 @@ void tcp_v4_init(void); void tcp_init(void); /* tcp_recovery.c */ - -/* Flags to enable various loss recovery features. See below */ -extern int sysctl_tcp_recovery; - -/* Use TCP RACK to detect (some) tail and retransmit losses */ -#define TCP_RACK_LOST_RETRANS 0x1 - -extern int tcp_rack_mark_lost(struct sock *sk); - -extern void tcp_rack_advance(struct tcp_sock *tp, - const struct skb_mstamp *xmit_time, u8 sacked); +extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now); +extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, + const struct skb_mstamp *xmit_time, + const struct skb_mstamp *ack_time); +extern void tcp_rack_reo_timeout(struct sock *sk); /* * Save and compile IPv4 options, return a pointer to it diff --git a/include/net/udp.h b/include/net/udp.h index 1661791e8ca1..c9d8b8e848e0 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -204,7 +204,6 @@ static inline void udp_lib_close(struct sock *sk, long timeout) } int udp_lib_get_port(struct sock *sk, unsigned short snum, - int (*)(const struct sock *, const struct sock *, bool), unsigned int hash2_nulladdr); u32 udp_flow_hashrnd(void); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 31947b9c21d6..14d82bf16692 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -213,6 +213,8 @@ struct xfrm_state { /* Last used time */ unsigned long lastused; + struct page_frag xfrag; + /* Reference to data common to all the instances of this * transformer. */ const struct xfrm_type *type; @@ -278,9 +280,7 @@ struct net_device; struct xfrm_type; struct xfrm_dst; struct xfrm_policy_afinfo { - unsigned short family; struct dst_ops *dst_ops; - void (*garbage_collect)(struct net *net); struct dst_entry *(*dst_lookup)(struct net *net, int tos, int oif, const xfrm_address_t *saddr, @@ -301,8 +301,8 @@ struct xfrm_policy_afinfo { struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig); }; -int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); -int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); +int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family); +void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo); void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c); void km_state_notify(struct xfrm_state *x, const struct km_event *c); @@ -343,17 +343,16 @@ struct xfrm_state_afinfo { int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); -void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); +struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family); struct xfrm_input_afinfo { unsigned int family; - struct module *owner; int (*callback)(struct sk_buff *skb, u8 protocol, int err); }; -int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo); -int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo); +int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo); +int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo); void xfrm_state_delete_tunnel(struct xfrm_state *x); @@ -499,6 +498,7 @@ struct xfrm_tmpl { }; #define XFRM_MAX_DEPTH 6 +#define XFRM_MAX_OFFLOAD_DEPTH 1 struct xfrm_policy_walk_entry { struct list_head all; @@ -682,6 +682,7 @@ struct xfrm_spi_skb_cb { unsigned int daddroff; unsigned int family; + __be32 seq; }; #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0])) @@ -974,10 +975,41 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); +struct xfrm_offload { + /* Output sequence number for replay protection on offloading. */ + struct { + __u32 low; + __u32 hi; + } seq; + + __u32 flags; +#define SA_DELETE_REQ 1 +#define CRYPTO_DONE 2 +#define CRYPTO_NEXT_DONE 4 +#define CRYPTO_FALLBACK 8 +#define XFRM_GSO_SEGMENT 16 +#define XFRM_GRO 32 + + __u32 status; +#define CRYPTO_SUCCESS 1 +#define CRYPTO_GENERIC_ERROR 2 +#define CRYPTO_TRANSPORT_AH_AUTH_FAILED 4 +#define CRYPTO_TRANSPORT_ESP_AUTH_FAILED 8 +#define CRYPTO_TUNNEL_AH_AUTH_FAILED 16 +#define CRYPTO_TUNNEL_ESP_AUTH_FAILED 32 +#define CRYPTO_INVALID_PACKET_SYNTAX 64 +#define CRYPTO_INVALID_PROTOCOL 128 + + __u8 proto; +}; + struct sec_path { atomic_t refcnt; int len; + int olen; + struct xfrm_state *xvec[XFRM_MAX_DEPTH]; + struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH]; }; static inline int secpath_exists(struct sk_buff *skb) @@ -1007,6 +1039,7 @@ secpath_put(struct sec_path *sp) } struct sec_path *secpath_dup(struct sec_path *src); +int secpath_set(struct sk_buff *skb); static inline void secpath_reset(struct sk_buff *skb) @@ -1168,6 +1201,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk) } void xfrm_garbage_collect(struct net *net); +void xfrm_garbage_collect_deferred(struct net *net); #else @@ -1519,6 +1553,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); int xfrm4_transport_finish(struct sk_buff *skb, int async); int xfrm4_rcv(struct sk_buff *skb); +int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq); static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) { @@ -1774,6 +1809,15 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb) { return skb->sp->xvec[skb->sp->len - 1]; } +static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) +{ + struct sec_path *sp = skb->sp; + + if (!sp || !sp->olen || sp->len != sp->olen) + return NULL; + + return &sp->ovec[sp->olen - 1]; +} #endif static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) diff --git a/include/rdma/ib.h b/include/rdma/ib.h index a6b93706b0fc..9b4c22a36931 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -35,6 +35,7 @@ #include <linux/types.h> #include <linux/sched.h> +#include <linux/cred.h> struct ib_addr { union { diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 1beab5532035..4b34c51f859e 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -160,8 +160,7 @@ static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr) static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev) { - return dev->priv_flags & IFF_802_1Q_VLAN ? - vlan_dev_vlan_id(dev) : 0xffff; + return is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0xffff; } static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid) @@ -326,8 +325,7 @@ static inline u16 rdma_get_vlan_id(union ib_gid *dgid) static inline struct net_device *rdma_vlan_dev_real_dev(const struct net_device *dev) { - return dev->priv_flags & IFF_802_1Q_VLAN ? - vlan_dev_real_dev(dev) : NULL; + return is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : NULL; } #endif /* IB_ADDR_H */ diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h index e30f19bd4a41..385ec88ee9e5 100644 --- a/include/rdma/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -165,4 +165,17 @@ int ib_get_cached_lmc(struct ib_device *device, u8 port_num, u8 *lmc); +/** + * ib_get_cached_port_state - Returns a cached port state table entry + * @device: The device to query. + * @port_num: The port number of the device to query. + * @port_state: port_state for the specified port for that device. + * + * ib_get_cached_port_state() fetches the specified port_state table entry stored in + * the local software cache. + */ +int ib_get_cached_port_state(struct ib_device *device, + u8 port_num, + enum ib_port_state *port_active); + #endif /* _IB_CACHE_H */ diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h index 408439fe911e..c755325f0831 100644 --- a/include/rdma/ib_hdrs.h +++ b/include/rdma/ib_hdrs.h @@ -75,6 +75,12 @@ #define IB_GRH_FLOW_SHIFT 0 #define IB_GRH_NEXT_HDR 0x1B +#define IB_AETH_CREDIT_SHIFT 24 +#define IB_AETH_CREDIT_MASK 0x1F +#define IB_AETH_CREDIT_INVAL 0x1F +#define IB_AETH_NAK_SHIFT 29 +#define IB_MSN_MASK 0xFFFFFF + struct ib_reth { __be64 vaddr; /* potentially unaligned */ __be32 rkey; diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index 5ee7aab95eb8..fd0e53219f93 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -153,12 +153,12 @@ struct ib_sa_path_rec { union ib_gid sgid; __be16 dlid; __be16 slid; - int raw_traffic; + u8 raw_traffic; /* reserved */ __be32 flow_label; u8 hop_limit; u8 traffic_class; - int reversible; + u8 reversible; u8 numb_path; __be16 pkey; __be16 qos_class; @@ -220,7 +220,7 @@ struct ib_sa_mcmember_rec { u8 hop_limit; u8 scope; u8 join_state; - int proxy_join; + u8 proxy_join; }; /* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */ diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h index 3da0b167041b..542cd8b3414c 100644 --- a/include/rdma/ib_umem_odp.h +++ b/include/rdma/ib_umem_odp.h @@ -79,11 +79,15 @@ struct ib_umem_odp { struct completion notifier_completion; int dying; + struct work_struct work; }; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem); +struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context, + unsigned long addr, + size_t size); void ib_umem_odp_release(struct ib_umem *umem); @@ -117,10 +121,12 @@ typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end, int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end, umem_call_back cb, void *cookie); -struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root, - u64 start, u64 last); -struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node, - u64 start, u64 last); +/* + * Find first region intersecting with address range. + * Return NULL if not found + */ +struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root *root, + u64 addr, u64 length); static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item, unsigned long mmu_seq) @@ -153,6 +159,13 @@ static inline int ib_umem_odp_get(struct ib_ucontext *context, return -EINVAL; } +static inline struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context, + unsigned long addr, + size_t size) +{ + return ERR_PTR(-EINVAL); +} + static inline void ib_umem_odp_release(struct ib_umem *umem) {} #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index b567e4452a47..0f1813c13687 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -60,6 +60,7 @@ #include <linux/atomic.h> #include <linux/mmu_notifier.h> #include <linux/uaccess.h> +#include <linux/cgroup_rdma.h> extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; @@ -207,6 +208,7 @@ enum ib_device_cap_flags { IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), IB_DEVICE_RC_IP_CSUM = (1 << 25), + /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ IB_DEVICE_RAW_IP_CSUM = (1 << 26), /* * Devices should set IB_DEVICE_CROSS_CHANNEL if they @@ -220,6 +222,7 @@ enum ib_device_cap_flags { IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), IB_DEVICE_SG_GAPS_REG = (1ULL << 32), IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), + /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), }; @@ -241,7 +244,8 @@ enum ib_atomic_cap { }; enum ib_odp_general_cap_bits { - IB_ODP_SUPPORT = 1 << 0, + IB_ODP_SUPPORT = 1 << 0, + IB_ODP_SUPPORT_IMPLICIT = 1 << 1, }; enum ib_odp_transport_cap_bits { @@ -330,6 +334,7 @@ struct ib_device_attr { uint64_t hca_core_clock; /* in KHZ */ struct ib_rss_caps rss_caps; u32 max_wq_type_rq; + u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ }; enum ib_mtu { @@ -499,6 +504,8 @@ static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 +#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000 +#define RDMA_CORE_CAP_PROT_USNIC 0x02000000 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ | RDMA_CORE_CAP_IB_MAD \ @@ -522,6 +529,10 @@ static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ | RDMA_CORE_CAP_OPA_MAD) +#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET) + +#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC) + struct ib_port_attr { u64 subnet_prefix; enum ib_port_state state; @@ -1019,6 +1030,7 @@ enum ib_qp_create_flags { IB_QP_CREATE_SIGNATURE_EN = 1 << 6, IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, IB_QP_CREATE_SCATTER_FCS = 1 << 8, + IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, /* reserve bits 26-31 for low level drivers' internal use */ IB_QP_CREATE_RESERVED_START = 1 << 26, IB_QP_CREATE_RESERVED_END = 1 << 31, @@ -1345,6 +1357,12 @@ struct ib_fmr_attr { struct ib_umem; +struct ib_rdmacg_object { +#ifdef CONFIG_CGROUP_RDMA + struct rdma_cgroup *cg; /* owner rdma cgroup */ +#endif +}; + struct ib_ucontext { struct ib_device *device; struct list_head pd_list; @@ -1377,6 +1395,8 @@ struct ib_ucontext { struct list_head no_private_counters; int odp_mrs_count; #endif + + struct ib_rdmacg_object cg_obj; }; struct ib_uobject { @@ -1384,6 +1404,7 @@ struct ib_uobject { struct ib_ucontext *context; /* associated user context */ void *object; /* containing object */ struct list_head list; /* link to context's list */ + struct ib_rdmacg_object cg_obj; /* rdmacg object */ int id; /* index into kernel idr */ struct kref ref; struct rw_semaphore mutex; /* protects .live */ @@ -1470,6 +1491,18 @@ struct ib_srq { } ext; }; +enum ib_raw_packet_caps { + /* Strip cvlan from incoming packet and report it in the matching work + * completion is supported. + */ + IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), + /* Scatter FCS field of an incoming packet to host memory is supported. + */ + IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), + /* Checksum offloads are supported (for both send and receive). */ + IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), +}; + enum ib_wq_type { IB_WQT_RQ }; @@ -1493,6 +1526,11 @@ struct ib_wq { atomic_t usecnt; }; +enum ib_wq_flags { + IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, + IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, +}; + struct ib_wq_init_attr { void *wq_context; enum ib_wq_type wq_type; @@ -1500,16 +1538,20 @@ struct ib_wq_init_attr { u32 max_sge; struct ib_cq *cq; void (*event_handler)(struct ib_event *, void *); + u32 create_flags; /* Use enum ib_wq_flags */ }; enum ib_wq_attr_mask { - IB_WQ_STATE = 1 << 0, - IB_WQ_CUR_STATE = 1 << 1, + IB_WQ_STATE = 1 << 0, + IB_WQ_CUR_STATE = 1 << 1, + IB_WQ_FLAGS = 1 << 2, }; struct ib_wq_attr { enum ib_wq_state wq_state; enum ib_wq_state curr_wq_state; + u32 flags; /* Use enum ib_wq_flags */ + u32 flags_mask; /* Use enum ib_wq_flags */ }; struct ib_rwq_ind_table { @@ -1618,6 +1660,8 @@ enum ib_flow_spec_type { IB_FLOW_SPEC_UDP = 0x41, IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, IB_FLOW_SPEC_INNER = 0x100, + /* Actions */ + IB_FLOW_SPEC_ACTION_TAG = 0x1000, }; #define IB_FLOW_SPEC_LAYER_MASK 0xF0 #define IB_FLOW_SPEC_SUPPORT_LAYERS 8 @@ -1740,6 +1784,12 @@ struct ib_flow_spec_tunnel { struct ib_flow_tunnel_filter mask; }; +struct ib_flow_spec_action_tag { + enum ib_flow_spec_type type; + u16 size; + u32 tag_id; +}; + union ib_flow_spec { struct { u32 type; @@ -1751,6 +1801,7 @@ union ib_flow_spec { struct ib_flow_spec_tcp_udp tcp_udp; struct ib_flow_spec_ipv6 ipv6; struct ib_flow_spec_tunnel tunnel; + struct ib_flow_spec_action_tag flow_tag; }; struct ib_flow_attr { @@ -1789,59 +1840,17 @@ enum ib_mad_result { #define IB_DEVICE_NAME_MAX 64 +struct ib_port_cache { + struct ib_pkey_cache *pkey; + struct ib_gid_table *gid; + u8 lmc; + enum ib_port_state port_state; +}; + struct ib_cache { rwlock_t lock; struct ib_event_handler event_handler; - struct ib_pkey_cache **pkey_cache; - struct ib_gid_table **gid_cache; - u8 *lmc_cache; -}; - -struct ib_dma_mapping_ops { - int (*mapping_error)(struct ib_device *dev, - u64 dma_addr); - u64 (*map_single)(struct ib_device *dev, - void *ptr, size_t size, - enum dma_data_direction direction); - void (*unmap_single)(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction); - u64 (*map_page)(struct ib_device *dev, - struct page *page, unsigned long offset, - size_t size, - enum dma_data_direction direction); - void (*unmap_page)(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction); - int (*map_sg)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction); - void (*unmap_sg)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction); - int (*map_sg_attrs)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs); - void (*unmap_sg_attrs)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs); - void (*sync_single_for_cpu)(struct ib_device *dev, - u64 dma_handle, - size_t size, - enum dma_data_direction dir); - void (*sync_single_for_device)(struct ib_device *dev, - u64 dma_handle, - size_t size, - enum dma_data_direction dir); - void *(*alloc_coherent)(struct ib_device *dev, - size_t size, - u64 *dma_handle, - gfp_t flag); - void (*free_coherent)(struct ib_device *dev, - size_t size, void *cpu_addr, - u64 dma_handle); + struct ib_port_cache *ports; }; struct iw_cm_verbs; @@ -1854,8 +1863,6 @@ struct ib_port_immutable { }; struct ib_device { - struct device *dma_device; - char name[IB_DEVICE_NAME_MAX]; struct list_head event_handler_list; @@ -2105,7 +2112,6 @@ struct ib_device { struct ib_rwq_ind_table_init_attr *init_attr, struct ib_udata *udata); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); - struct ib_dma_mapping_ops *dma_ops; struct module *owner; struct device dev; @@ -2132,6 +2138,10 @@ struct ib_device { struct attribute_group *hw_stats_ag; struct rdma_hw_stats *hw_stats; +#ifdef CONFIG_CGROUP_RDMA + struct rdmacg_device cg_device; +#endif + /** * The following mandatory functions are used only at device * registration. Keep functions such as these at the end of this @@ -2289,6 +2299,13 @@ static inline u8 rdma_end_port(const struct ib_device *device) return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; } +static inline int rdma_is_port_valid(const struct ib_device *device, + unsigned int port) +{ + return (port >= rdma_start_port(device) && + port <= rdma_end_port(device)); +} + static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) { return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; @@ -2321,6 +2338,16 @@ static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) rdma_protocol_roce(device, port_num); } +static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET; +} + +static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC; +} + /** * rdma_cap_ib_mad - Check if the port of a device supports Infiniband * Management Datagrams. @@ -2980,9 +3007,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) */ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) { - if (dev->dma_ops) - return dev->dma_ops->mapping_error(dev, dma_addr); - return dma_mapping_error(dev->dma_device, dma_addr); + return dma_mapping_error(&dev->dev, dma_addr); } /** @@ -2996,9 +3021,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_single(dev, cpu_addr, size, direction); - return dma_map_single(dev->dma_device, cpu_addr, size, direction); + return dma_map_single(&dev->dev, cpu_addr, size, direction); } /** @@ -3012,28 +3035,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_single(dev, addr, size, direction); - else - dma_unmap_single(dev->dma_device, addr, size, direction); -} - -static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, - void *cpu_addr, size_t size, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - return dma_map_single_attrs(dev->dma_device, cpu_addr, size, - direction, dma_attrs); -} - -static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - return dma_unmap_single_attrs(dev->dma_device, addr, size, - direction, dma_attrs); + dma_unmap_single(&dev->dev, addr, size, direction); } /** @@ -3050,9 +3052,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_page(dev, page, offset, size, direction); - return dma_map_page(dev->dma_device, page, offset, size, direction); + return dma_map_page(&dev->dev, page, offset, size, direction); } /** @@ -3066,10 +3066,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_page(dev, addr, size, direction); - else - dma_unmap_page(dev->dma_device, addr, size, direction); + dma_unmap_page(&dev->dev, addr, size, direction); } /** @@ -3083,9 +3080,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_sg(dev, sg, nents, direction); - return dma_map_sg(dev->dma_device, sg, nents, direction); + return dma_map_sg(&dev->dev, sg, nents, direction); } /** @@ -3099,10 +3094,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_sg(dev, sg, nents, direction); - else - dma_unmap_sg(dev->dma_device, sg, nents, direction); + dma_unmap_sg(&dev->dev, sg, nents, direction); } static inline int ib_dma_map_sg_attrs(struct ib_device *dev, @@ -3110,12 +3102,7 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - if (dev->dma_ops) - return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, - dma_attrs); - else - return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, - dma_attrs); + return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); } static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, @@ -3123,12 +3110,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - if (dev->dma_ops) - return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, - dma_attrs); - else - dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, - dma_attrs); + dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); } /** * ib_sg_dma_address - Return the DMA address from a scatter/gather entry @@ -3170,10 +3152,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - if (dev->dma_ops) - dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); - else - dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); + dma_sync_single_for_cpu(&dev->dev, addr, size, dir); } /** @@ -3188,10 +3167,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - if (dev->dma_ops) - dev->dma_ops->sync_single_for_device(dev, addr, size, dir); - else - dma_sync_single_for_device(dev->dma_device, addr, size, dir); + dma_sync_single_for_device(&dev->dev, addr, size, dir); } /** @@ -3203,19 +3179,10 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, */ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, + dma_addr_t *dma_handle, gfp_t flag) { - if (dev->dma_ops) - return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); - else { - dma_addr_t handle; - void *ret; - - ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); - *dma_handle = handle; - return ret; - } + return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); } /** @@ -3227,12 +3194,9 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, */ static inline void ib_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, - u64 dma_handle) + dma_addr_t dma_handle) { - if (dev->dma_ops) - dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); - else - dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); + dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); } /** diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 861e23eaebda..8fc1ca7b6f23 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -164,7 +164,7 @@ struct rvt_driver_params { /* Protection domain */ struct rvt_pd { struct ib_pd ibpd; - int user; /* non-zero if created from user space */ + bool user; }; /* Address handle */ @@ -335,6 +335,8 @@ struct rvt_driver_provided { /* Notify driver a mad agent has been removed */ void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx); + /* Notify driver to restart rc */ + void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait); }; struct rvt_dev_info { @@ -483,6 +485,23 @@ static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi, return qp; } +/** + * rvt_mod_retry_timer - mod a retry timer + * @qp - the QP + * Modify a potentially already running retry timer + */ +static inline void rvt_mod_retry_timer(struct rvt_qp *qp) +{ + struct ib_qp *ibqp = &qp->ibqp; + struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); + + lockdep_assert_held(&qp->s_lock); + qp->s_flags |= RVT_S_TIMER; + /* 4.096 usec. * (1 << qp->timeout) */ + mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies + + rdi->busy_jiffies); +} + struct rvt_dev_info *rvt_alloc_device(size_t size, int nports); void rvt_dealloc_device(struct rvt_dev_info *rdi); int rvt_register_device(struct rvt_dev_info *rvd); diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h index de59de28b6a2..f418bd5571a5 100644 --- a/include/rdma/rdmavt_mr.h +++ b/include/rdma/rdmavt_mr.h @@ -52,6 +52,7 @@ * For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once * drivers no longer need access to the MR directly. */ +#include <linux/percpu-refcount.h> /* * A segment is a linear region of low physical memory. @@ -79,11 +80,11 @@ struct rvt_mregion { int access_flags; u32 max_segs; /* number of rvt_segs in all the arrays */ u32 mapsz; /* size of the map array */ + atomic_t lkey_invalid; /* true if current lkey is invalid */ u8 page_shift; /* 0 - non unform/non powerof2 sizes */ u8 lkey_published; /* in global table */ - atomic_t lkey_invalid; /* true if current lkey is invalid */ + struct percpu_ref refcount; struct completion comp; /* complete when refcount goes to zero */ - atomic_t refcount; struct rvt_segarray *map[0]; /* the segments */ }; @@ -123,13 +124,12 @@ struct rvt_sge_state { static inline void rvt_put_mr(struct rvt_mregion *mr) { - if (unlikely(atomic_dec_and_test(&mr->refcount))) - complete(&mr->comp); + percpu_ref_put(&mr->refcount); } static inline void rvt_get_mr(struct rvt_mregion *mr) { - atomic_inc(&mr->refcount); + percpu_ref_get(&mr->refcount); } static inline void rvt_put_ss(struct rvt_sge_state *ss) @@ -141,4 +141,54 @@ static inline void rvt_put_ss(struct rvt_sge_state *ss) } } +static inline u32 rvt_get_sge_length(struct rvt_sge *sge, u32 length) +{ + u32 len = sge->length; + + if (len > length) + len = length; + if (len > sge->sge_length) + len = sge->sge_length; + + return len; +} + +static inline void rvt_update_sge(struct rvt_sge_state *ss, u32 length, + bool release) +{ + struct rvt_sge *sge = &ss->sge; + + sge->vaddr += length; + sge->length -= length; + sge->sge_length -= length; + if (sge->sge_length == 0) { + if (release) + rvt_put_mr(sge->mr); + if (--ss->num_sge) + *sge = *ss->sg_list++; + } else if (sge->length == 0 && sge->mr->lkey) { + if (++sge->n >= RVT_SEGSZ) { + if (++sge->m >= sge->mr->mapsz) + return; + sge->n = 0; + } + sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; + sge->length = sge->mr->map[sge->m]->segs[sge->n].length; + } +} + +static inline void rvt_skip_sge(struct rvt_sge_state *ss, u32 length, + bool release) +{ + struct rvt_sge *sge = &ss->sge; + + while (length) { + u32 len = rvt_get_sge_length(sge, length); + + WARN_ON_ONCE(len == 0); + rvt_update_sge(ss, len, release); + length -= len; + } +} + #endif /* DEF_RDMAVT_INCMRH */ diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index f3dbd157ae5c..f3816396c76a 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -144,6 +144,8 @@ #define RVT_FLUSH_RECV 0x40 #define RVT_PROCESS_OR_FLUSH_SEND \ (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND) +#define RVT_SEND_OR_FLUSH_OR_RECV_OK \ + (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK) /* * Internal send flags @@ -370,6 +372,7 @@ struct rvt_qp { struct rvt_sge_state s_ack_rdma_sge; struct timer_list s_timer; + struct hrtimer s_rnr_timer; atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */ @@ -467,6 +470,15 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) } /** + * rvt_is_user_qp - return if this is user mode QP + * @qp - the target QP + */ +static inline bool rvt_is_user_qp(struct rvt_qp *qp) +{ + return !!qp->pid; +} + +/** * rvt_get_qp - get a QP reference * @qp - the QP to hold */ @@ -582,6 +594,32 @@ static inline void rvt_qp_swqe_complete( } } +/* + * Compare the lower 24 bits of the msn values. + * Returns an integer <, ==, or > than zero. + */ +static inline int rvt_cmp_msn(u32 a, u32 b) +{ + return (((int)a) - ((int)b)) << 8; +} + +/** + * rvt_compute_aeth - compute the AETH (syndrome + MSN) + * @qp: the queue pair to compute the AETH for + * + * Returns the AETH. + */ +__be32 rvt_compute_aeth(struct rvt_qp *qp); + +/** + * rvt_get_credit - flush the send work queue of a QP + * @qp: the qp who's send work queue to flush + * @aeth: the Acknowledge Extended Transport Header + * + * The QP s_lock should be held. + */ +void rvt_get_credit(struct rvt_qp *qp, u32 aeth); + /** * @qp - the qp pair * @len - the length @@ -607,6 +645,14 @@ static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len) extern const int ib_rvt_state_ops[]; struct rvt_dev_info; +void rvt_comm_est(struct rvt_qp *qp); int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); +void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err); +unsigned long rvt_rnr_tbl_to_usec(u32 index); +enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t); +void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth); +void rvt_del_timers_sync(struct rvt_qp *qp); +void rvt_stop_rc_timers(struct rvt_qp *qp); +void rvt_add_retry_timer(struct rvt_qp *qp); #endif /* DEF_RDMAVT_INCQP_H */ diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index b0e275de6dec..583875ea136a 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -196,6 +196,7 @@ struct iscsi_conn { struct iscsi_task *task; /* xmit task in progress */ /* xmit */ + spinlock_t taskqueuelock; /* protects the next three lists */ struct list_head mgmtqueue; /* mgmt (control) xmit queue */ struct list_head cmdqueue; /* data-path cmd queue */ struct list_head requeue; /* tasks needing another run */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 8990e580b278..080c7ce9bae8 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -315,6 +315,7 @@ extern void scsi_remove_device(struct scsi_device *); extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh); void scsi_attach_vpd(struct scsi_device *sdev); +extern struct scsi_device *scsi_device_from_queue(struct request_queue *q); extern int scsi_device_get(struct scsi_device *); extern void scsi_device_put(struct scsi_device *); extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *, @@ -409,19 +410,16 @@ extern int scsi_is_target_device(const struct device *); extern void scsi_sanitize_inquiry_string(unsigned char *s, int len); extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, int timeout, int retries, - u64 flags, int *resid); -extern int scsi_execute_req_flags(struct scsi_device *sdev, - const unsigned char *cmd, int data_direction, void *buffer, - unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, - int retries, int *resid, u64 flags, req_flags_t rq_flags); + unsigned char *sense, struct scsi_sense_hdr *sshdr, + int timeout, int retries, u64 flags, + req_flags_t rq_flags, int *resid); static inline int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, int retries, int *resid) { - return scsi_execute_req_flags(sdev, cmd, data_direction, buffer, - bufflen, sshdr, timeout, retries, resid, 0, 0); + return scsi_execute(sdev, cmd, data_direction, buffer, + bufflen, NULL, sshdr, timeout, retries, 0, 0, resid); } extern void sdev_disable_disk_events(struct scsi_device *sdev); extern void sdev_enable_disk_events(struct scsi_device *sdev); @@ -474,6 +472,10 @@ static inline int scsi_device_created(struct scsi_device *sdev) sdev->sdev_state == SDEV_CREATED_BLOCK; } +int scsi_internal_device_block(struct scsi_device *sdev, bool wait); +int scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state); + /* accessor functions for the SCSI parameters */ static inline int scsi_device_sync(struct scsi_device *sdev) { diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h index 4b6b489a8d7c..c2d1b15da136 100644 --- a/include/soc/arc/mcip.h +++ b/include/soc/arc/mcip.h @@ -14,6 +14,7 @@ #include <soc/arc/aux.h> #define ARC_REG_MCIP_BCR 0x0d0 +#define ARC_REG_MCIP_IDU_BCR 0x0D5 #define ARC_REG_MCIP_CMD 0x600 #define ARC_REG_MCIP_WDATA 0x601 #define ARC_REG_MCIP_READBACK 0x602 @@ -69,6 +70,22 @@ struct mcip_bcr { #endif }; +struct mcip_idu_bcr { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:21, cirqnum:3, ver:8; +#else + unsigned int ver:8, cirqnum:3, pad:21; +#endif +}; + + +/* + * Build register for IDU contains not an actual number of supported common + * interrupts but an exponent of 2 which must be multiplied by 4 to + * get a number of supported common interrupts. + */ +#define mcip_idu_bcr_to_nr_irqs(bcr) (4 * (1 << (bcr).cirqnum)) + /* * MCIP programming model * diff --git a/include/sound/control.h b/include/sound/control.h index 21d047f229a1..bd7246de58e7 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -22,6 +22,7 @@ * */ +#include <linux/wait.h> #include <sound/asound.h> #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data) diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h index 1c8f9e1ef2a5..67be2445941a 100644 --- a/include/sound/dmaengine_pcm.h +++ b/include/sound/dmaengine_pcm.h @@ -71,6 +71,7 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) * @slave_id: Slave requester id for the DMA channel. * @filter_data: Custom DMA channel filter data, this will usually be used when * requesting the DMA channel. + * @chan_name: Custom channel name to use when requesting DMA channel. * @fifo_size: FIFO size of the DAI controller in bytes * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now */ @@ -80,6 +81,7 @@ struct snd_dmaengine_dai_dma_data { u32 maxburst; unsigned int slave_id; void *filter_data; + const char *chan_name; unsigned int fifo_size; unsigned int flags; }; @@ -105,6 +107,10 @@ void snd_dmaengine_pcm_set_config_from_dai_data( * playback. */ #define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3) +/* + * The PCM streams have custom channel names specified. + */ +#define SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME BIT(4) /** * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM diff --git a/include/sound/pcm.h b/include/sound/pcm.h index af1fb37c6b26..361749e60799 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -570,6 +570,15 @@ int snd_pcm_stop_xrun(struct snd_pcm_substream *substream); #ifdef CONFIG_PM int snd_pcm_suspend(struct snd_pcm_substream *substream); int snd_pcm_suspend_all(struct snd_pcm *pcm); +#else +static inline int snd_pcm_suspend(struct snd_pcm_substream *substream) +{ + return 0; +} +static inline int snd_pcm_suspend_all(struct snd_pcm *pcm) +{ + return 0; +} #endif int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg); int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, struct file *file, diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index f730b91e472f..492a3ca7f17b 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -103,7 +103,7 @@ struct snd_rawmidi_substream { struct snd_rawmidi_runtime *runtime; struct pid *pid; /* hardware layer */ - struct snd_rawmidi_ops *ops; + const struct snd_rawmidi_ops *ops; }; struct snd_rawmidi_file { @@ -155,7 +155,7 @@ int snd_rawmidi_new(struct snd_card *card, char *id, int device, int output_count, int input_count, struct snd_rawmidi **rmidi); void snd_rawmidi_set_ops(struct snd_rawmidi *rmidi, int stream, - struct snd_rawmidi_ops *ops); + const struct snd_rawmidi_ops *ops); /* callbacks */ diff --git a/include/sound/rt5665.h b/include/sound/rt5665.h index 963229e71dc7..963229e71dc7 100755..100644 --- a/include/sound/rt5665.h +++ b/include/sound/rt5665.h diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index 64e90ca9ad32..af58d2362975 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -34,11 +34,12 @@ int asoc_simple_card_set_dailink_name(struct device *dev, int asoc_simple_card_parse_card_name(struct snd_soc_card *card, char *prefix); -#define asoc_simple_card_parse_clk_cpu(node, dai_link, simple_dai) \ - asoc_simple_card_parse_clk(node, dai_link->cpu_of_node, simple_dai) -#define asoc_simple_card_parse_clk_codec(node, dai_link, simple_dai) \ - asoc_simple_card_parse_clk(node, dai_link->codec_of_node, simple_dai) -int asoc_simple_card_parse_clk(struct device_node *node, +#define asoc_simple_card_parse_clk_cpu(dev, node, dai_link, simple_dai) \ + asoc_simple_card_parse_clk(dev, node, dai_link->cpu_of_node, simple_dai) +#define asoc_simple_card_parse_clk_codec(dev, node, dai_link, simple_dai) \ + asoc_simple_card_parse_clk(dev, node, dai_link->codec_of_node, simple_dai) +int asoc_simple_card_parse_clk(struct device *dev, + struct device_node *node, struct device_node *dai_of_node, struct asoc_simple_dai *simple_dai); diff --git a/include/sound/snd_wavefront.h b/include/sound/snd_wavefront.h index 35e94b3d1ec7..cd0bab1ef6f1 100644 --- a/include/sound/snd_wavefront.h +++ b/include/sound/snd_wavefront.h @@ -37,8 +37,8 @@ struct _snd_wavefront_midi { #define MPU_ACK 0xFE #define UART_MODE_ON 0x3F -extern struct snd_rawmidi_ops snd_wavefront_midi_output; -extern struct snd_rawmidi_ops snd_wavefront_midi_input; +extern const struct snd_rawmidi_ops snd_wavefront_midi_output; +extern const struct snd_rawmidi_ops snd_wavefront_midi_input; extern void snd_wavefront_midi_enable_virtual (snd_wavefront_card_t *); extern void snd_wavefront_midi_disable_virtual (snd_wavefront_card_t *); diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 200e1f04c166..58acd00cae19 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -256,6 +256,9 @@ struct snd_soc_dai_driver { int (*resume)(struct snd_soc_dai *dai); /* compress dai */ int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num); + /* Optional Callback used at pcm creation*/ + int (*pcm_new)(struct snd_soc_pcm_runtime *rtd, + struct snd_soc_dai *dai); /* DAI is also used for the control bus */ bool bus_control; diff --git a/include/sound/soc.h b/include/sound/soc.h index b86168a21d56..cdfb55f7aede 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -497,6 +497,8 @@ void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream); int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd, unsigned int dai_fmt); +int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour); + /* Utility functions to get clock rates from various things */ int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots); int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params); @@ -507,9 +509,6 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *parms); int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream, const struct snd_pcm_hardware *hw); -int snd_soc_platform_trigger(struct snd_pcm_substream *substream, - int cmd, struct snd_soc_platform *platform); - int soc_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai); @@ -785,6 +784,10 @@ struct snd_soc_component_driver { int (*suspend)(struct snd_soc_component *); int (*resume)(struct snd_soc_component *); + /* pcm creation and destruction */ + int (*pcm_new)(struct snd_soc_pcm_runtime *); + void (*pcm_free)(struct snd_pcm *); + /* DT */ int (*of_xlate_dai_name)(struct snd_soc_component *component, struct of_phandle_args *args, @@ -859,6 +862,8 @@ struct snd_soc_component { void (*remove)(struct snd_soc_component *); int (*suspend)(struct snd_soc_component *); int (*resume)(struct snd_soc_component *); + int (*pcm_new)(struct snd_soc_pcm_runtime *); + void (*pcm_free)(struct snd_pcm *); /* machine specific init */ int (*init)(struct snd_soc_component *component); @@ -941,20 +946,11 @@ struct snd_soc_platform_driver { int (*pcm_new)(struct snd_soc_pcm_runtime *); void (*pcm_free)(struct snd_pcm *); - /* - * For platform caused delay reporting. - * Optional. - */ - snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *, - struct snd_soc_dai *); - /* platform stream pcm ops */ const struct snd_pcm_ops *ops; /* platform stream compress ops */ const struct snd_compr_ops *compr_ops; - - int (*bespoke_trigger)(struct snd_pcm_substream *, int); }; struct snd_soc_dai_link_component { @@ -1099,6 +1095,8 @@ struct snd_soc_card { const char *name; const char *long_name; const char *driver_name; + char dmi_longname[80]; + struct device *dev; struct snd_card *snd_card; struct module *owner; @@ -1647,37 +1645,21 @@ static inline struct snd_soc_platform *snd_soc_kcontrol_platform( int snd_soc_util_init(void); void snd_soc_util_exit(void); -#define snd_soc_of_parse_card_name(card, propname) \ - snd_soc_of_parse_card_name_from_node(card, NULL, propname) -int snd_soc_of_parse_card_name_from_node(struct snd_soc_card *card, - struct device_node *np, - const char *propname); -#define snd_soc_of_parse_audio_simple_widgets(card, propname)\ - snd_soc_of_parse_audio_simple_widgets_from_node(card, NULL, propname) -int snd_soc_of_parse_audio_simple_widgets_from_node(struct snd_soc_card *card, - struct device_node *np, - const char *propname); - +int snd_soc_of_parse_card_name(struct snd_soc_card *card, + const char *propname); +int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card, + const char *propname); int snd_soc_of_parse_tdm_slot(struct device_node *np, unsigned int *tx_mask, unsigned int *rx_mask, unsigned int *slots, unsigned int *slot_width); -#define snd_soc_of_parse_audio_prefix(card, codec_conf, of_node, propname) \ - snd_soc_of_parse_audio_prefix_from_node(card, NULL, codec_conf, \ - of_node, propname) -void snd_soc_of_parse_audio_prefix_from_node(struct snd_soc_card *card, - struct device_node *np, +void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card, struct snd_soc_codec_conf *codec_conf, struct device_node *of_node, const char *propname); - -#define snd_soc_of_parse_audio_routing(card, propname) \ - snd_soc_of_parse_audio_routing_from_node(card, NULL, propname) -int snd_soc_of_parse_audio_routing_from_node(struct snd_soc_card *card, - struct device_node *np, - const char *propname); - +int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, + const char *propname); unsigned int snd_soc_of_parse_daifmt(struct device_node *np, const char *prefix, struct device_node **bitclkmaster, diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index 1277e9ba0318..ff1a4f4cd66d 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -55,8 +55,12 @@ extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *); extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, struct iscsi_scsi_req *); -extern int iscsit_check_dataout_hdr(struct iscsi_conn *, unsigned char *, - struct iscsi_cmd **); +extern int +__iscsit_check_dataout_hdr(struct iscsi_conn *, void *, + struct iscsi_cmd *, u32, bool *); +extern int +iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf, + struct iscsi_cmd **out_cmd); extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *, bool); extern int iscsit_setup_nop_out(struct iscsi_conn *, struct iscsi_cmd *, @@ -125,6 +129,9 @@ extern void iscsit_release_cmd(struct iscsi_cmd *); extern void iscsit_free_cmd(struct iscsi_cmd *, bool); extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); +extern struct iscsi_cmd * +iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *conn, + itt_t init_task_tag, u32 length); /* * From iscsi_target_nego.c diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index b54b98dc2d4a..1b0f447ce850 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -4,7 +4,12 @@ #include <linux/types.h> #include <target/target_core_base.h> -#define TRANSPORT_FLAG_PASSTHROUGH 1 +#define TRANSPORT_FLAG_PASSTHROUGH 0x1 +/* + * ALUA commands, state checks and setup operations are handled by the + * backend module. + */ +#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2 struct request_queue; struct scatterlist; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index da854fb4530f..4b784b6e21c0 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -4,7 +4,9 @@ #include <linux/configfs.h> /* struct config_group */ #include <linux/dma-direction.h> /* enum dma_data_direction */ #include <linux/percpu_ida.h> /* struct percpu_ida */ +#include <linux/percpu-refcount.h> #include <linux/semaphore.h> /* struct semaphore */ +#include <linux/completion.h> #define TARGET_CORE_VERSION "v5.0" @@ -197,6 +199,7 @@ enum tcm_tmreq_table { TMR_LUN_RESET = 5, TMR_TARGET_WARM_RESET = 6, TMR_TARGET_COLD_RESET = 7, + TMR_UNKNOWN = 0xff, }; /* fabric independent task management response values */ @@ -296,7 +299,7 @@ struct t10_alua_tg_pt_gp { struct list_head tg_pt_gp_lun_list; struct se_lun *tg_pt_gp_alua_lun; struct se_node_acl *tg_pt_gp_alua_nacl; - struct delayed_work tg_pt_gp_transition_work; + struct work_struct tg_pt_gp_transition_work; struct completion *tg_pt_gp_transition_complete; }; @@ -397,7 +400,6 @@ struct se_tmr_req { void *fabric_tmr_ptr; struct se_cmd *task_cmd; struct se_device *tmr_dev; - struct se_lun *tmr_lun; struct list_head tmr_list; }; @@ -488,8 +490,6 @@ struct se_cmd { #define CMD_T_COMPLETE (1 << 2) #define CMD_T_SENT (1 << 4) #define CMD_T_STOP (1 << 5) -#define CMD_T_DEV_ACTIVE (1 << 7) -#define CMD_T_BUSY (1 << 9) #define CMD_T_TAS (1 << 10) #define CMD_T_FABRIC_STOP (1 << 11) spinlock_t t_state_lock; @@ -732,6 +732,7 @@ struct se_lun { struct config_group lun_group; struct se_port_stat_grps port_stat_grps; struct completion lun_ref_comp; + struct completion lun_shutdown_comp; struct percpu_ref lun_ref; struct list_head lun_dev_link; struct hlist_node link; @@ -767,6 +768,8 @@ struct se_device { u32 dev_index; u64 creation_time; atomic_long_t num_resets; + atomic_long_t aborts_complete; + atomic_long_t aborts_no_task; atomic_long_t num_cmds; atomic_long_t read_bytes; atomic_long_t write_bytes; @@ -914,6 +917,7 @@ static inline struct se_portal_group *param_to_tpg(struct config_item *item) struct se_wwn { struct target_fabric_configfs *wwn_tf; + void *priv; struct config_group wwn_group; struct config_group fabric_stat_group; }; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 358041bad1da..d7dd1427fe0d 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -47,7 +47,7 @@ struct target_core_fabric_ops { u32 (*tpg_get_inst_index)(struct se_portal_group *); /* * Optional to release struct se_cmd and fabric dependent allocated - * I/O descriptor in transport_cmd_check_stop(). + * I/O descriptor after command execution has finished. * * Returning 1 will signal a descriptor has been released. * Returning 0 will signal a descriptor has not been released. diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h new file mode 100644 index 000000000000..8b95c16b7045 --- /dev/null +++ b/include/trace/events/afs.h @@ -0,0 +1,184 @@ +/* AFS tracepoints + * + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM afs + +#if !defined(_TRACE_AFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_AFS_H + +#include <linux/tracepoint.h> + +/* + * Define enums for tracing information. + */ +#ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY + +enum afs_call_trace { + afs_call_trace_alloc, + afs_call_trace_free, + afs_call_trace_put, + afs_call_trace_wake, + afs_call_trace_work, +}; + +#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */ + +/* + * Declare tracing information enums and their string mappings for display. + */ +#define afs_call_traces \ + EM(afs_call_trace_alloc, "ALLOC") \ + EM(afs_call_trace_free, "FREE ") \ + EM(afs_call_trace_put, "PUT ") \ + EM(afs_call_trace_wake, "WAKE ") \ + E_(afs_call_trace_work, "WORK ") + +/* + * Export enum symbols via userspace. + */ +#undef EM +#undef E_ +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define E_(a, b) TRACE_DEFINE_ENUM(a); + +afs_call_traces; + +/* + * Now redefine the EM() and E_() macros to map the enums to the strings that + * will be printed in the output. + */ +#undef EM +#undef E_ +#define EM(a, b) { a, b }, +#define E_(a, b) { a, b } + +TRACE_EVENT(afs_recv_data, + TP_PROTO(struct afs_call *call, unsigned count, unsigned offset, + bool want_more, int ret), + + TP_ARGS(call, count, offset, want_more, ret), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, rxcall ) + __field(struct afs_call *, call ) + __field(enum afs_call_state, state ) + __field(unsigned int, count ) + __field(unsigned int, offset ) + __field(unsigned short, unmarshall ) + __field(bool, want_more ) + __field(int, ret ) + ), + + TP_fast_assign( + __entry->rxcall = call->rxcall; + __entry->call = call; + __entry->state = call->state; + __entry->unmarshall = call->unmarshall; + __entry->count = count; + __entry->offset = offset; + __entry->want_more = want_more; + __entry->ret = ret; + ), + + TP_printk("c=%p ac=%p s=%u u=%u %u/%u wm=%u ret=%d", + __entry->rxcall, + __entry->call, + __entry->state, __entry->unmarshall, + __entry->offset, __entry->count, + __entry->want_more, __entry->ret) + ); + +TRACE_EVENT(afs_notify_call, + TP_PROTO(struct rxrpc_call *rxcall, struct afs_call *call), + + TP_ARGS(rxcall, call), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, rxcall ) + __field(struct afs_call *, call ) + __field(enum afs_call_state, state ) + __field(unsigned short, unmarshall ) + ), + + TP_fast_assign( + __entry->rxcall = rxcall; + __entry->call = call; + __entry->state = call->state; + __entry->unmarshall = call->unmarshall; + ), + + TP_printk("c=%p ac=%p s=%u u=%u", + __entry->rxcall, + __entry->call, + __entry->state, __entry->unmarshall) + ); + +TRACE_EVENT(afs_cb_call, + TP_PROTO(struct afs_call *call), + + TP_ARGS(call), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, rxcall ) + __field(struct afs_call *, call ) + __field(const char *, name ) + __field(u32, op ) + ), + + TP_fast_assign( + __entry->rxcall = call->rxcall; + __entry->call = call; + __entry->name = call->type->name; + __entry->op = call->operation_ID; + ), + + TP_printk("c=%p ac=%p %s o=%u", + __entry->rxcall, + __entry->call, + __entry->name, + __entry->op) + ); + +TRACE_EVENT(afs_call, + TP_PROTO(struct afs_call *call, enum afs_call_trace op, + int usage, int outstanding, const void *where), + + TP_ARGS(call, op, usage, outstanding, where), + + TP_STRUCT__entry( + __field(struct afs_call *, call ) + __field(int, op ) + __field(int, usage ) + __field(int, outstanding ) + __field(const void *, where ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->op = op; + __entry->usage = usage; + __entry->outstanding = outstanding; + __entry->where = where; + ), + + TP_printk("c=%p %s u=%d o=%d sp=%pSR", + __entry->call, + __print_symbolic(__entry->op, afs_call_traces), + __entry->usage, + __entry->outstanding, + __entry->where) + ); + +#endif /* _TRACE_AFS_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/bpf.h b/include/trace/events/bpf.h new file mode 100644 index 000000000000..c3a53fd47ff1 --- /dev/null +++ b/include/trace/events/bpf.h @@ -0,0 +1,347 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM bpf + +#if !defined(_TRACE_BPF_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_BPF_H + +#include <linux/filter.h> +#include <linux/bpf.h> +#include <linux/fs.h> +#include <linux/tracepoint.h> + +#define __PROG_TYPE_MAP(FN) \ + FN(SOCKET_FILTER) \ + FN(KPROBE) \ + FN(SCHED_CLS) \ + FN(SCHED_ACT) \ + FN(TRACEPOINT) \ + FN(XDP) \ + FN(PERF_EVENT) \ + FN(CGROUP_SKB) \ + FN(CGROUP_SOCK) \ + FN(LWT_IN) \ + FN(LWT_OUT) \ + FN(LWT_XMIT) + +#define __MAP_TYPE_MAP(FN) \ + FN(HASH) \ + FN(ARRAY) \ + FN(PROG_ARRAY) \ + FN(PERF_EVENT_ARRAY) \ + FN(PERCPU_HASH) \ + FN(PERCPU_ARRAY) \ + FN(STACK_TRACE) \ + FN(CGROUP_ARRAY) \ + FN(LRU_HASH) \ + FN(LRU_PERCPU_HASH) \ + FN(LPM_TRIE) + +#define __PROG_TYPE_TP_FN(x) \ + TRACE_DEFINE_ENUM(BPF_PROG_TYPE_##x); +#define __PROG_TYPE_SYM_FN(x) \ + { BPF_PROG_TYPE_##x, #x }, +#define __PROG_TYPE_SYM_TAB \ + __PROG_TYPE_MAP(__PROG_TYPE_SYM_FN) { -1, 0 } +__PROG_TYPE_MAP(__PROG_TYPE_TP_FN) + +#define __MAP_TYPE_TP_FN(x) \ + TRACE_DEFINE_ENUM(BPF_MAP_TYPE_##x); +#define __MAP_TYPE_SYM_FN(x) \ + { BPF_MAP_TYPE_##x, #x }, +#define __MAP_TYPE_SYM_TAB \ + __MAP_TYPE_MAP(__MAP_TYPE_SYM_FN) { -1, 0 } +__MAP_TYPE_MAP(__MAP_TYPE_TP_FN) + +DECLARE_EVENT_CLASS(bpf_prog_event, + + TP_PROTO(const struct bpf_prog *prg), + + TP_ARGS(prg), + + TP_STRUCT__entry( + __array(u8, prog_tag, 8) + __field(u32, type) + ), + + TP_fast_assign( + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); + memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); + __entry->type = prg->type; + ), + + TP_printk("prog=%s type=%s", + __print_hex_str(__entry->prog_tag, 8), + __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB)) +); + +DEFINE_EVENT(bpf_prog_event, bpf_prog_get_type, + + TP_PROTO(const struct bpf_prog *prg), + + TP_ARGS(prg) +); + +DEFINE_EVENT(bpf_prog_event, bpf_prog_put_rcu, + + TP_PROTO(const struct bpf_prog *prg), + + TP_ARGS(prg) +); + +TRACE_EVENT(bpf_prog_load, + + TP_PROTO(const struct bpf_prog *prg, int ufd), + + TP_ARGS(prg, ufd), + + TP_STRUCT__entry( + __array(u8, prog_tag, 8) + __field(u32, type) + __field(int, ufd) + ), + + TP_fast_assign( + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); + memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); + __entry->type = prg->type; + __entry->ufd = ufd; + ), + + TP_printk("prog=%s type=%s ufd=%d", + __print_hex_str(__entry->prog_tag, 8), + __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB), + __entry->ufd) +); + +TRACE_EVENT(bpf_map_create, + + TP_PROTO(const struct bpf_map *map, int ufd), + + TP_ARGS(map, ufd), + + TP_STRUCT__entry( + __field(u32, type) + __field(u32, size_key) + __field(u32, size_value) + __field(u32, max_entries) + __field(u32, flags) + __field(int, ufd) + ), + + TP_fast_assign( + __entry->type = map->map_type; + __entry->size_key = map->key_size; + __entry->size_value = map->value_size; + __entry->max_entries = map->max_entries; + __entry->flags = map->map_flags; + __entry->ufd = ufd; + ), + + TP_printk("map type=%s ufd=%d key=%u val=%u max=%u flags=%x", + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), + __entry->ufd, __entry->size_key, __entry->size_value, + __entry->max_entries, __entry->flags) +); + +DECLARE_EVENT_CLASS(bpf_obj_prog, + + TP_PROTO(const struct bpf_prog *prg, int ufd, + const struct filename *pname), + + TP_ARGS(prg, ufd, pname), + + TP_STRUCT__entry( + __array(u8, prog_tag, 8) + __field(int, ufd) + __string(path, pname->name) + ), + + TP_fast_assign( + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); + memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); + __assign_str(path, pname->name); + __entry->ufd = ufd; + ), + + TP_printk("prog=%s path=%s ufd=%d", + __print_hex_str(__entry->prog_tag, 8), + __get_str(path), __entry->ufd) +); + +DEFINE_EVENT(bpf_obj_prog, bpf_obj_pin_prog, + + TP_PROTO(const struct bpf_prog *prg, int ufd, + const struct filename *pname), + + TP_ARGS(prg, ufd, pname) +); + +DEFINE_EVENT(bpf_obj_prog, bpf_obj_get_prog, + + TP_PROTO(const struct bpf_prog *prg, int ufd, + const struct filename *pname), + + TP_ARGS(prg, ufd, pname) +); + +DECLARE_EVENT_CLASS(bpf_obj_map, + + TP_PROTO(const struct bpf_map *map, int ufd, + const struct filename *pname), + + TP_ARGS(map, ufd, pname), + + TP_STRUCT__entry( + __field(u32, type) + __field(int, ufd) + __string(path, pname->name) + ), + + TP_fast_assign( + __assign_str(path, pname->name); + __entry->type = map->map_type; + __entry->ufd = ufd; + ), + + TP_printk("map type=%s ufd=%d path=%s", + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), + __entry->ufd, __get_str(path)) +); + +DEFINE_EVENT(bpf_obj_map, bpf_obj_pin_map, + + TP_PROTO(const struct bpf_map *map, int ufd, + const struct filename *pname), + + TP_ARGS(map, ufd, pname) +); + +DEFINE_EVENT(bpf_obj_map, bpf_obj_get_map, + + TP_PROTO(const struct bpf_map *map, int ufd, + const struct filename *pname), + + TP_ARGS(map, ufd, pname) +); + +DECLARE_EVENT_CLASS(bpf_map_keyval, + + TP_PROTO(const struct bpf_map *map, int ufd, + const void *key, const void *val), + + TP_ARGS(map, ufd, key, val), + + TP_STRUCT__entry( + __field(u32, type) + __field(u32, key_len) + __dynamic_array(u8, key, map->key_size) + __field(bool, key_trunc) + __field(u32, val_len) + __dynamic_array(u8, val, map->value_size) + __field(bool, val_trunc) + __field(int, ufd) + ), + + TP_fast_assign( + memcpy(__get_dynamic_array(key), key, map->key_size); + memcpy(__get_dynamic_array(val), val, map->value_size); + __entry->type = map->map_type; + __entry->key_len = min(map->key_size, 16U); + __entry->key_trunc = map->key_size != __entry->key_len; + __entry->val_len = min(map->value_size, 16U); + __entry->val_trunc = map->value_size != __entry->val_len; + __entry->ufd = ufd; + ), + + TP_printk("map type=%s ufd=%d key=[%s%s] val=[%s%s]", + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), + __entry->ufd, + __print_hex(__get_dynamic_array(key), __entry->key_len), + __entry->key_trunc ? " ..." : "", + __print_hex(__get_dynamic_array(val), __entry->val_len), + __entry->val_trunc ? " ..." : "") +); + +DEFINE_EVENT(bpf_map_keyval, bpf_map_lookup_elem, + + TP_PROTO(const struct bpf_map *map, int ufd, + const void *key, const void *val), + + TP_ARGS(map, ufd, key, val) +); + +DEFINE_EVENT(bpf_map_keyval, bpf_map_update_elem, + + TP_PROTO(const struct bpf_map *map, int ufd, + const void *key, const void *val), + + TP_ARGS(map, ufd, key, val) +); + +TRACE_EVENT(bpf_map_delete_elem, + + TP_PROTO(const struct bpf_map *map, int ufd, + const void *key), + + TP_ARGS(map, ufd, key), + + TP_STRUCT__entry( + __field(u32, type) + __field(u32, key_len) + __dynamic_array(u8, key, map->key_size) + __field(bool, key_trunc) + __field(int, ufd) + ), + + TP_fast_assign( + memcpy(__get_dynamic_array(key), key, map->key_size); + __entry->type = map->map_type; + __entry->key_len = min(map->key_size, 16U); + __entry->key_trunc = map->key_size != __entry->key_len; + __entry->ufd = ufd; + ), + + TP_printk("map type=%s ufd=%d key=[%s%s]", + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), + __entry->ufd, + __print_hex(__get_dynamic_array(key), __entry->key_len), + __entry->key_trunc ? " ..." : "") +); + +TRACE_EVENT(bpf_map_next_key, + + TP_PROTO(const struct bpf_map *map, int ufd, + const void *key, const void *key_next), + + TP_ARGS(map, ufd, key, key_next), + + TP_STRUCT__entry( + __field(u32, type) + __field(u32, key_len) + __dynamic_array(u8, key, map->key_size) + __dynamic_array(u8, nxt, map->key_size) + __field(bool, key_trunc) + __field(int, ufd) + ), + + TP_fast_assign( + memcpy(__get_dynamic_array(key), key, map->key_size); + memcpy(__get_dynamic_array(nxt), key_next, map->key_size); + __entry->type = map->map_type; + __entry->key_len = min(map->key_size, 16U); + __entry->key_trunc = map->key_size != __entry->key_len; + __entry->ufd = ufd; + ), + + TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]", + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), + __entry->ufd, + __print_hex(__get_dynamic_array(key), __entry->key_len), + __entry->key_trunc ? " ..." : "", + __print_hex(__get_dynamic_array(nxt), __entry->key_len), + __entry->key_trunc ? " ..." : "") +); + +#endif /* _TRACE_BPF_H */ + +#include <trace/define_trace.h> diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 88d18a8ceb59..a3c3cab643a9 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -184,7 +184,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, TRACE_EVENT_CONDITION(btrfs_get_extent, - TP_PROTO(struct btrfs_root *root, struct inode *inode, + TP_PROTO(struct btrfs_root *root, struct btrfs_inode *inode, struct extent_map *map), TP_ARGS(root, inode, map), diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h index ab68640a18d0..c226f50e88fa 100644 --- a/include/trace/events/cgroup.h +++ b/include/trace/events/cgroup.h @@ -61,19 +61,15 @@ DECLARE_EVENT_CLASS(cgroup, __field( int, id ) __field( int, level ) __dynamic_array(char, path, - cgrp->kn ? cgroup_path(cgrp, NULL, 0) + 1 - : strlen("(null)")) + cgroup_path(cgrp, NULL, 0) + 1) ), TP_fast_assign( __entry->root = cgrp->root->hierarchy_id; __entry->id = cgrp->id; __entry->level = cgrp->level; - if (cgrp->kn) - cgroup_path(cgrp, __get_dynamic_array(path), - __get_dynamic_array_len(path)); - else - __assign_str(path, "(null)"); + cgroup_path(cgrp, __get_dynamic_array(path), + __get_dynamic_array_len(path)); ), TP_printk("root=%d id=%d level=%d path=%s", @@ -119,8 +115,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate, __field( int, dst_id ) __field( int, dst_level ) __dynamic_array(char, dst_path, - dst_cgrp->kn ? cgroup_path(dst_cgrp, NULL, 0) + 1 - : strlen("(null)")) + cgroup_path(dst_cgrp, NULL, 0) + 1) __field( int, pid ) __string( comm, task->comm ) ), @@ -129,11 +124,8 @@ DECLARE_EVENT_CLASS(cgroup_migrate, __entry->dst_root = dst_cgrp->root->hierarchy_id; __entry->dst_id = dst_cgrp->id; __entry->dst_level = dst_cgrp->level; - if (dst_cgrp->kn) - cgroup_path(dst_cgrp, __get_dynamic_array(dst_path), - __get_dynamic_array_len(dst_path)); - else - __assign_str(dst_path, "(null)"); + cgroup_path(dst_cgrp, __get_dynamic_array(dst_path), + __get_dynamic_array_len(dst_path)); __entry->pid = task->pid; __assign_str(comm, task->comm); ), diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index cbdb90b6b308..0a18ab6483ff 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -9,62 +9,6 @@ #include <linux/tracepoint.h> #include <trace/events/mmflags.h> -#define COMPACTION_STATUS \ - EM( COMPACT_SKIPPED, "skipped") \ - EM( COMPACT_DEFERRED, "deferred") \ - EM( COMPACT_CONTINUE, "continue") \ - EM( COMPACT_SUCCESS, "success") \ - EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \ - EM( COMPACT_COMPLETE, "complete") \ - EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \ - EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \ - EMe(COMPACT_CONTENDED, "contended") - -#ifdef CONFIG_ZONE_DMA -#define IFDEF_ZONE_DMA(X) X -#else -#define IFDEF_ZONE_DMA(X) -#endif - -#ifdef CONFIG_ZONE_DMA32 -#define IFDEF_ZONE_DMA32(X) X -#else -#define IFDEF_ZONE_DMA32(X) -#endif - -#ifdef CONFIG_HIGHMEM -#define IFDEF_ZONE_HIGHMEM(X) X -#else -#define IFDEF_ZONE_HIGHMEM(X) -#endif - -#define ZONE_TYPE \ - IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \ - IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \ - EM (ZONE_NORMAL, "Normal") \ - IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \ - EMe(ZONE_MOVABLE,"Movable") - -/* - * First define the enums in the above macros to be exported to userspace - * via TRACE_DEFINE_ENUM(). - */ -#undef EM -#undef EMe -#define EM(a, b) TRACE_DEFINE_ENUM(a); -#define EMe(a, b) TRACE_DEFINE_ENUM(a); - -COMPACTION_STATUS -ZONE_TYPE - -/* - * Now redefine the EM() and EMe() macros to map the enums to the strings - * that will be printed in the output. - */ -#undef EM -#undef EMe -#define EM(a, b) {a, b}, -#define EMe(a, b) {a, b} DECLARE_EVENT_CLASS(mm_compaction_isolate_template, @@ -187,6 +131,7 @@ TRACE_EVENT(mm_compaction_begin, __entry->sync ? "sync" : "async") ); +#ifdef CONFIG_COMPACTION TRACE_EVENT(mm_compaction_end, TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn, unsigned long free_pfn, unsigned long zone_end, bool sync, @@ -220,6 +165,7 @@ TRACE_EVENT(mm_compaction_end, __entry->sync ? "sync" : "async", __print_symbolic(__entry->status, COMPACTION_STATUS)) ); +#endif TRACE_EVENT(mm_compaction_try_to_compact_pages, @@ -248,6 +194,7 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages, __entry->prio) ); +#ifdef CONFIG_COMPACTION DECLARE_EVENT_CLASS(mm_compaction_suitable_template, TP_PROTO(struct zone *zone, @@ -295,7 +242,6 @@ DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_suitable, TP_ARGS(zone, order, ret) ); -#ifdef CONFIG_COMPACTION DECLARE_EVENT_CLASS(mm_compaction_defer_template, TP_PROTO(struct zone *zone, int order), diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 01b3c9869a0d..c80fcad0a6c9 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -6,8 +6,8 @@ #include <linux/tracepoint.h> -#define show_dev(entry) MAJOR(entry->dev), MINOR(entry->dev) -#define show_dev_ino(entry) show_dev(entry), (unsigned long)entry->ino +#define show_dev(dev) MAJOR(dev), MINOR(dev) +#define show_dev_ino(entry) show_dev(entry->dev), (unsigned long)entry->ino TRACE_DEFINE_ENUM(NODE); TRACE_DEFINE_ENUM(DATA); @@ -55,25 +55,35 @@ TRACE_DEFINE_ENUM(CP_DISCARD); { IPU, "IN-PLACE" }, \ { OPU, "OUT-OF-PLACE" }) -#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | REQ_PREFLUSH | REQ_FUA)) -#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) - -#define show_bio_type(op_flags) show_bio_op_flags(op_flags), \ - show_bio_extra(op_flags) +#define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_PREFLUSH | REQ_META |\ + REQ_PRIO) +#define F2FS_BIO_FLAG_MASK(t) (t & F2FS_OP_FLAGS) + +#define show_bio_type(op,op_flags) show_bio_op(op), \ + show_bio_op_flags(op_flags) + +#define show_bio_op(op) \ + __print_symbolic(op, \ + { REQ_OP_READ, "READ" }, \ + { REQ_OP_WRITE, "WRITE" }, \ + { REQ_OP_FLUSH, "FLUSH" }, \ + { REQ_OP_DISCARD, "DISCARD" }, \ + { REQ_OP_ZONE_REPORT, "ZONE_REPORT" }, \ + { REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \ + { REQ_OP_ZONE_RESET, "ZONE_RESET" }, \ + { REQ_OP_WRITE_SAME, "WRITE_SAME" }, \ + { REQ_OP_WRITE_ZEROES, "WRITE_ZEROES" }) #define show_bio_op_flags(flags) \ __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ - { 0, "WRITE" }, \ - { REQ_RAHEAD, "READAHEAD" }, \ - { REQ_SYNC, "REQ_SYNC" }, \ - { REQ_PREFLUSH, "REQ_PREFLUSH" }, \ - { REQ_FUA, "REQ_FUA" }) - -#define show_bio_extra(type) \ - __print_symbolic(F2FS_BIO_EXTRA_MASK(type), \ + { REQ_RAHEAD, "(RA)" }, \ + { REQ_SYNC, "(S)" }, \ + { REQ_SYNC | REQ_PRIO, "(SP)" }, \ { REQ_META, "(M)" }, \ - { REQ_PRIO, "(P)" }, \ { REQ_META | REQ_PRIO, "(MP)" }, \ + { REQ_SYNC | REQ_PREFLUSH , "(SF)" }, \ + { REQ_SYNC | REQ_META | REQ_PRIO, "(SMP)" }, \ + { REQ_PREFLUSH | REQ_META | REQ_PRIO, "(FMP)" }, \ { 0, " \b" }) #define show_data_type(type) \ @@ -235,7 +245,7 @@ TRACE_EVENT(f2fs_sync_fs, ), TP_printk("dev = (%d,%d), superblock is %s, wait = %d", - show_dev(__entry), + show_dev(__entry->dev), __entry->dirty ? "dirty" : "not dirty", __entry->wait) ); @@ -305,6 +315,13 @@ DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit, TP_ARGS(inode, ret) ); +DEFINE_EVENT(f2fs__inode_exit, f2fs_drop_inode, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + DEFINE_EVENT(f2fs__inode, f2fs_truncate, TP_PROTO(struct inode *inode), @@ -534,7 +551,7 @@ TRACE_EVENT(f2fs_background_gc, ), TP_printk("dev = (%d,%d), wait_ms = %ld, prefree = %u, free = %u", - show_dev(__entry), + show_dev(__entry->dev), __entry->wait_ms, __entry->prefree, __entry->free) @@ -555,6 +572,7 @@ TRACE_EVENT(f2fs_get_victim, __field(int, alloc_mode) __field(int, gc_mode) __field(unsigned int, victim) + __field(unsigned int, cost) __field(unsigned int, ofs_unit) __field(unsigned int, pre_victim) __field(unsigned int, prefree) @@ -568,20 +586,23 @@ TRACE_EVENT(f2fs_get_victim, __entry->alloc_mode = p->alloc_mode; __entry->gc_mode = p->gc_mode; __entry->victim = p->min_segno; + __entry->cost = p->min_cost; __entry->ofs_unit = p->ofs_unit; __entry->pre_victim = pre_victim; __entry->prefree = prefree; __entry->free = free; ), - TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), victim = %u " - "ofs_unit = %u, pre_victim_secno = %d, prefree = %u, free = %u", - show_dev(__entry), + TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), " + "victim = %u, cost = %u, ofs_unit = %u, " + "pre_victim_secno = %d, prefree = %u, free = %u", + show_dev(__entry->dev), show_data_type(__entry->type), show_gc_type(__entry->gc_type), show_alloc_mode(__entry->alloc_mode), show_victim_policy(__entry->gc_mode), __entry->victim, + __entry->cost, __entry->ofs_unit, (int)__entry->pre_victim, __entry->prefree, @@ -713,7 +734,7 @@ TRACE_EVENT(f2fs_reserve_new_blocks, ), TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu", - show_dev(__entry), + show_dev(__entry->dev), (unsigned int)__entry->nid, __entry->ofs_in_node, (unsigned long long)__entry->count) @@ -753,7 +774,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, (unsigned long)__entry->index, (unsigned long long)__entry->old_blkaddr, (unsigned long long)__entry->new_blkaddr, - show_bio_type(__entry->op_flags), + show_bio_type(__entry->op, __entry->op_flags), show_block_type(__entry->type)) ); @@ -775,15 +796,15 @@ DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio, TP_CONDITION(page->mapping) ); -DECLARE_EVENT_CLASS(f2fs__submit_bio, +DECLARE_EVENT_CLASS(f2fs__bio, - TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, - struct bio *bio), + TP_PROTO(struct super_block *sb, int type, struct bio *bio), - TP_ARGS(sb, fio, bio), + TP_ARGS(sb, type, bio), TP_STRUCT__entry( __field(dev_t, dev) + __field(dev_t, target) __field(int, op) __field(int, op_flags) __field(int, type) @@ -793,37 +814,55 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, TP_fast_assign( __entry->dev = sb->s_dev; - __entry->op = fio->op; - __entry->op_flags = fio->op_flags; - __entry->type = fio->type; + __entry->target = bio->bi_bdev->bd_dev; + __entry->op = bio_op(bio); + __entry->op_flags = bio->bi_opf; + __entry->type = type; __entry->sector = bio->bi_iter.bi_sector; __entry->size = bio->bi_iter.bi_size; ), - TP_printk("dev = (%d,%d), rw = %s%s, %s, sector = %lld, size = %u", - show_dev(__entry), - show_bio_type(__entry->op_flags), + TP_printk("dev = (%d,%d)/(%d,%d), rw = %s%s, %s, sector = %lld, size = %u", + show_dev(__entry->target), + show_dev(__entry->dev), + show_bio_type(__entry->op, __entry->op_flags), show_block_type(__entry->type), (unsigned long long)__entry->sector, __entry->size) ); -DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio, +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_write_bio, + + TP_PROTO(struct super_block *sb, int type, struct bio *bio), + + TP_ARGS(sb, type, bio), + + TP_CONDITION(bio) +); + +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_read_bio, - TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, - struct bio *bio), + TP_PROTO(struct super_block *sb, int type, struct bio *bio), - TP_ARGS(sb, fio, bio), + TP_ARGS(sb, type, bio), TP_CONDITION(bio) ); -DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio, +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_read_bio, - TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, - struct bio *bio), + TP_PROTO(struct super_block *sb, int type, struct bio *bio), - TP_ARGS(sb, fio, bio), + TP_ARGS(sb, type, bio), + + TP_CONDITION(bio) +); + +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_write_bio, + + TP_PROTO(struct super_block *sb, int type, struct bio *bio), + + TP_ARGS(sb, type, bio), TP_CONDITION(bio) ); @@ -1082,16 +1121,16 @@ TRACE_EVENT(f2fs_write_checkpoint, ), TP_printk("dev = (%d,%d), checkpoint for %s, state = %s", - show_dev(__entry), + show_dev(__entry->dev), show_cpreason(__entry->reason), __entry->msg) ); TRACE_EVENT(f2fs_issue_discard, - TP_PROTO(struct super_block *sb, block_t blkstart, block_t blklen), + TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen), - TP_ARGS(sb, blkstart, blklen), + TP_ARGS(dev, blkstart, blklen), TP_STRUCT__entry( __field(dev_t, dev) @@ -1100,22 +1139,22 @@ TRACE_EVENT(f2fs_issue_discard, ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev = dev->bd_dev; __entry->blkstart = blkstart; __entry->blklen = blklen; ), TP_printk("dev = (%d,%d), blkstart = 0x%llx, blklen = 0x%llx", - show_dev(__entry), + show_dev(__entry->dev), (unsigned long long)__entry->blkstart, (unsigned long long)__entry->blklen) ); TRACE_EVENT(f2fs_issue_reset_zone, - TP_PROTO(struct super_block *sb, block_t blkstart), + TP_PROTO(struct block_device *dev, block_t blkstart), - TP_ARGS(sb, blkstart), + TP_ARGS(dev, blkstart), TP_STRUCT__entry( __field(dev_t, dev) @@ -1123,21 +1162,21 @@ TRACE_EVENT(f2fs_issue_reset_zone, ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev = dev->bd_dev; __entry->blkstart = blkstart; ), TP_printk("dev = (%d,%d), reset zone at block = 0x%llx", - show_dev(__entry), + show_dev(__entry->dev), (unsigned long long)__entry->blkstart) ); TRACE_EVENT(f2fs_issue_flush, - TP_PROTO(struct super_block *sb, unsigned int nobarrier, + TP_PROTO(struct block_device *dev, unsigned int nobarrier, unsigned int flush_merge), - TP_ARGS(sb, nobarrier, flush_merge), + TP_ARGS(dev, nobarrier, flush_merge), TP_STRUCT__entry( __field(dev_t, dev) @@ -1146,13 +1185,13 @@ TRACE_EVENT(f2fs_issue_flush, ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev = dev->bd_dev; __entry->nobarrier = nobarrier; __entry->flush_merge = flush_merge; ), TP_printk("dev = (%d,%d), %s %s", - show_dev(__entry), + show_dev(__entry->dev), __entry->nobarrier ? "skip (nobarrier)" : "issue", __entry->flush_merge ? " with flush_merge" : "") ); @@ -1267,7 +1306,7 @@ TRACE_EVENT(f2fs_shrink_extent_tree, ), TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u", - show_dev(__entry), + show_dev(__entry->dev), __entry->node_cnt, __entry->tree_cnt) ); @@ -1314,7 +1353,7 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, ), TP_printk("dev = (%d,%d), %s, dirty count = %lld", - show_dev(__entry), + show_dev(__entry->dev), show_file_type(__entry->type), __entry->count) ); diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h new file mode 100644 index 000000000000..c566ddc87f73 --- /dev/null +++ b/include/trace/events/fs_dax.h @@ -0,0 +1,156 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fs_dax + +#if !defined(_TRACE_FS_DAX_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FS_DAX_H + +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(dax_pmd_fault_class, + TP_PROTO(struct inode *inode, struct vm_fault *vmf, + pgoff_t max_pgoff, int result), + TP_ARGS(inode, vmf, max_pgoff, result), + TP_STRUCT__entry( + __field(unsigned long, ino) + __field(unsigned long, vm_start) + __field(unsigned long, vm_end) + __field(unsigned long, vm_flags) + __field(unsigned long, address) + __field(pgoff_t, pgoff) + __field(pgoff_t, max_pgoff) + __field(dev_t, dev) + __field(unsigned int, flags) + __field(int, result) + ), + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->vm_start = vmf->vma->vm_start; + __entry->vm_end = vmf->vma->vm_end; + __entry->vm_flags = vmf->vma->vm_flags; + __entry->address = vmf->address; + __entry->flags = vmf->flags; + __entry->pgoff = vmf->pgoff; + __entry->max_pgoff = max_pgoff; + __entry->result = result; + ), + TP_printk("dev %d:%d ino %#lx %s %s address %#lx vm_start " + "%#lx vm_end %#lx pgoff %#lx max_pgoff %#lx %s", + MAJOR(__entry->dev), + MINOR(__entry->dev), + __entry->ino, + __entry->vm_flags & VM_SHARED ? "shared" : "private", + __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE), + __entry->address, + __entry->vm_start, + __entry->vm_end, + __entry->pgoff, + __entry->max_pgoff, + __print_flags(__entry->result, "|", VM_FAULT_RESULT_TRACE) + ) +) + +#define DEFINE_PMD_FAULT_EVENT(name) \ +DEFINE_EVENT(dax_pmd_fault_class, name, \ + TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ + pgoff_t max_pgoff, int result), \ + TP_ARGS(inode, vmf, max_pgoff, result)) + +DEFINE_PMD_FAULT_EVENT(dax_pmd_fault); +DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done); + +DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, + TP_PROTO(struct inode *inode, struct vm_fault *vmf, + struct page *zero_page, + void *radix_entry), + TP_ARGS(inode, vmf, zero_page, radix_entry), + TP_STRUCT__entry( + __field(unsigned long, ino) + __field(unsigned long, vm_flags) + __field(unsigned long, address) + __field(struct page *, zero_page) + __field(void *, radix_entry) + __field(dev_t, dev) + ), + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->vm_flags = vmf->vma->vm_flags; + __entry->address = vmf->address; + __entry->zero_page = zero_page; + __entry->radix_entry = radix_entry; + ), + TP_printk("dev %d:%d ino %#lx %s address %#lx zero_page %p " + "radix_entry %#lx", + MAJOR(__entry->dev), + MINOR(__entry->dev), + __entry->ino, + __entry->vm_flags & VM_SHARED ? "shared" : "private", + __entry->address, + __entry->zero_page, + (unsigned long)__entry->radix_entry + ) +) + +#define DEFINE_PMD_LOAD_HOLE_EVENT(name) \ +DEFINE_EVENT(dax_pmd_load_hole_class, name, \ + TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ + struct page *zero_page, void *radix_entry), \ + TP_ARGS(inode, vmf, zero_page, radix_entry)) + +DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole); +DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback); + +DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class, + TP_PROTO(struct inode *inode, struct vm_fault *vmf, + long length, pfn_t pfn, void *radix_entry), + TP_ARGS(inode, vmf, length, pfn, radix_entry), + TP_STRUCT__entry( + __field(unsigned long, ino) + __field(unsigned long, vm_flags) + __field(unsigned long, address) + __field(long, length) + __field(u64, pfn_val) + __field(void *, radix_entry) + __field(dev_t, dev) + __field(int, write) + ), + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->vm_flags = vmf->vma->vm_flags; + __entry->address = vmf->address; + __entry->write = vmf->flags & FAULT_FLAG_WRITE; + __entry->length = length; + __entry->pfn_val = pfn.val; + __entry->radix_entry = radix_entry; + ), + TP_printk("dev %d:%d ino %#lx %s %s address %#lx length %#lx " + "pfn %#llx %s radix_entry %#lx", + MAJOR(__entry->dev), + MINOR(__entry->dev), + __entry->ino, + __entry->vm_flags & VM_SHARED ? "shared" : "private", + __entry->write ? "write" : "read", + __entry->address, + __entry->length, + __entry->pfn_val & ~PFN_FLAGS_MASK, + __print_flags_u64(__entry->pfn_val & PFN_FLAGS_MASK, "|", + PFN_FLAGS_TRACE), + (unsigned long)__entry->radix_entry + ) +) + +#define DEFINE_PMD_INSERT_MAPPING_EVENT(name) \ +DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \ + TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ + long length, pfn_t pfn, void *radix_entry), \ + TP_ARGS(inode, vmf, length, pfn, radix_entry)) + +DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping); +DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback); + +#endif /* _TRACE_FS_DAX_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 15bf875d0e4a..304ff94363b2 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -1,3 +1,6 @@ +#include <linux/node.h> +#include <linux/mmzone.h> +#include <linux/compaction.h> /* * The order of these masks is important. Matching masks will be seen * first and the left over flags will end up showing by themselves. @@ -171,3 +174,98 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \ (flags) ? __print_flags(flags, "|", \ __def_vmaflag_names \ ) : "none" + +#ifdef CONFIG_COMPACTION +#define COMPACTION_STATUS \ + EM( COMPACT_SKIPPED, "skipped") \ + EM( COMPACT_DEFERRED, "deferred") \ + EM( COMPACT_CONTINUE, "continue") \ + EM( COMPACT_SUCCESS, "success") \ + EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \ + EM( COMPACT_COMPLETE, "complete") \ + EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \ + EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \ + EMe(COMPACT_CONTENDED, "contended") + +/* High-level compaction status feedback */ +#define COMPACTION_FAILED 1 +#define COMPACTION_WITHDRAWN 2 +#define COMPACTION_PROGRESS 3 + +#define compact_result_to_feedback(result) \ +({ \ + enum compact_result __result = result; \ + (compaction_failed(__result)) ? COMPACTION_FAILED : \ + (compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \ +}) + +#define COMPACTION_FEEDBACK \ + EM(COMPACTION_FAILED, "failed") \ + EM(COMPACTION_WITHDRAWN, "withdrawn") \ + EMe(COMPACTION_PROGRESS, "progress") + +#define COMPACTION_PRIORITY \ + EM(COMPACT_PRIO_SYNC_FULL, "COMPACT_PRIO_SYNC_FULL") \ + EM(COMPACT_PRIO_SYNC_LIGHT, "COMPACT_PRIO_SYNC_LIGHT") \ + EMe(COMPACT_PRIO_ASYNC, "COMPACT_PRIO_ASYNC") +#else +#define COMPACTION_STATUS +#define COMPACTION_PRIORITY +#define COMPACTION_FEEDBACK +#endif + +#ifdef CONFIG_ZONE_DMA +#define IFDEF_ZONE_DMA(X) X +#else +#define IFDEF_ZONE_DMA(X) +#endif + +#ifdef CONFIG_ZONE_DMA32 +#define IFDEF_ZONE_DMA32(X) X +#else +#define IFDEF_ZONE_DMA32(X) +#endif + +#ifdef CONFIG_HIGHMEM +#define IFDEF_ZONE_HIGHMEM(X) X +#else +#define IFDEF_ZONE_HIGHMEM(X) +#endif + +#define ZONE_TYPE \ + IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \ + IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \ + EM (ZONE_NORMAL, "Normal") \ + IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \ + EMe(ZONE_MOVABLE,"Movable") + +#define LRU_NAMES \ + EM (LRU_INACTIVE_ANON, "inactive_anon") \ + EM (LRU_ACTIVE_ANON, "active_anon") \ + EM (LRU_INACTIVE_FILE, "inactive_file") \ + EM (LRU_ACTIVE_FILE, "active_file") \ + EMe(LRU_UNEVICTABLE, "unevictable") + +/* + * First define the enums in the above macros to be exported to userspace + * via TRACE_DEFINE_ENUM(). + */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +COMPACTION_STATUS +COMPACTION_PRIORITY +COMPACTION_FEEDBACK +ZONE_TYPE +LRU_NAMES + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h index 1e974983757e..38baeb27221a 100644 --- a/include/trace/events/oom.h +++ b/include/trace/events/oom.h @@ -4,6 +4,7 @@ #if !defined(_TRACE_OOM_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_OOM_H #include <linux/tracepoint.h> +#include <trace/events/mmflags.h> TRACE_EVENT(oom_score_adj_update, @@ -27,6 +28,86 @@ TRACE_EVENT(oom_score_adj_update, __entry->pid, __entry->comm, __entry->oom_score_adj) ); +TRACE_EVENT(reclaim_retry_zone, + + TP_PROTO(struct zoneref *zoneref, + int order, + unsigned long reclaimable, + unsigned long available, + unsigned long min_wmark, + int no_progress_loops, + bool wmark_check), + + TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check), + + TP_STRUCT__entry( + __field( int, node) + __field( int, zone_idx) + __field( int, order) + __field( unsigned long, reclaimable) + __field( unsigned long, available) + __field( unsigned long, min_wmark) + __field( int, no_progress_loops) + __field( bool, wmark_check) + ), + + TP_fast_assign( + __entry->node = zone_to_nid(zoneref->zone); + __entry->zone_idx = zoneref->zone_idx; + __entry->order = order; + __entry->reclaimable = reclaimable; + __entry->available = available; + __entry->min_wmark = min_wmark; + __entry->no_progress_loops = no_progress_loops; + __entry->wmark_check = wmark_check; + ), + + TP_printk("node=%d zone=%-8s order=%d reclaimable=%lu available=%lu min_wmark=%lu no_progress_loops=%d wmark_check=%d", + __entry->node, __print_symbolic(__entry->zone_idx, ZONE_TYPE), + __entry->order, + __entry->reclaimable, __entry->available, __entry->min_wmark, + __entry->no_progress_loops, + __entry->wmark_check) +); + +#ifdef CONFIG_COMPACTION +TRACE_EVENT(compact_retry, + + TP_PROTO(int order, + enum compact_priority priority, + enum compact_result result, + int retries, + int max_retries, + bool ret), + + TP_ARGS(order, priority, result, retries, max_retries, ret), + + TP_STRUCT__entry( + __field( int, order) + __field( int, priority) + __field( int, result) + __field( int, retries) + __field( int, max_retries) + __field( bool, ret) + ), + + TP_fast_assign( + __entry->order = order; + __entry->priority = priority; + __entry->result = compact_result_to_feedback(result); + __entry->retries = retries; + __entry->max_retries = max_retries; + __entry->ret = ret; + ), + + TP_printk("order=%d priority=%s compaction_result=%s retries=%d max_retries=%d should_retry=%d", + __entry->order, + __print_symbolic(__entry->priority, COMPACTION_PRIORITY), + __print_symbolic(__entry->result, COMPACTION_FEEDBACK), + __entry->retries, __entry->max_retries, + __entry->ret) +); +#endif /* CONFIG_COMPACTION */ #endif /* This part must be outside protection */ diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 0383e5e9a0f3..39123c06a566 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -16,6 +16,388 @@ #include <linux/tracepoint.h> +/* + * Define enums for tracing information. + * + * These should all be kept sorted, making it easier to match the string + * mapping tables further on. + */ +#ifndef __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY + +enum rxrpc_skb_trace { + rxrpc_skb_rx_cleaned, + rxrpc_skb_rx_freed, + rxrpc_skb_rx_got, + rxrpc_skb_rx_lost, + rxrpc_skb_rx_purged, + rxrpc_skb_rx_received, + rxrpc_skb_rx_rotated, + rxrpc_skb_rx_seen, + rxrpc_skb_tx_cleaned, + rxrpc_skb_tx_freed, + rxrpc_skb_tx_got, + rxrpc_skb_tx_new, + rxrpc_skb_tx_rotated, + rxrpc_skb_tx_seen, +}; + +enum rxrpc_conn_trace { + rxrpc_conn_got, + rxrpc_conn_new_client, + rxrpc_conn_new_service, + rxrpc_conn_put_client, + rxrpc_conn_put_service, + rxrpc_conn_queued, + rxrpc_conn_seen, +}; + +enum rxrpc_client_trace { + rxrpc_client_activate_chans, + rxrpc_client_alloc, + rxrpc_client_chan_activate, + rxrpc_client_chan_disconnect, + rxrpc_client_chan_pass, + rxrpc_client_chan_unstarted, + rxrpc_client_cleanup, + rxrpc_client_count, + rxrpc_client_discard, + rxrpc_client_duplicate, + rxrpc_client_exposed, + rxrpc_client_replace, + rxrpc_client_to_active, + rxrpc_client_to_culled, + rxrpc_client_to_idle, + rxrpc_client_to_inactive, + rxrpc_client_to_upgrade, + rxrpc_client_to_waiting, + rxrpc_client_uncount, +}; + +enum rxrpc_call_trace { + rxrpc_call_connected, + rxrpc_call_error, + rxrpc_call_got, + rxrpc_call_got_kernel, + rxrpc_call_got_userid, + rxrpc_call_new_client, + rxrpc_call_new_service, + rxrpc_call_put, + rxrpc_call_put_kernel, + rxrpc_call_put_noqueue, + rxrpc_call_put_userid, + rxrpc_call_queued, + rxrpc_call_queued_ref, + rxrpc_call_release, + rxrpc_call_seen, +}; + +enum rxrpc_transmit_trace { + rxrpc_transmit_await_reply, + rxrpc_transmit_end, + rxrpc_transmit_queue, + rxrpc_transmit_queue_last, + rxrpc_transmit_rotate, + rxrpc_transmit_rotate_last, + rxrpc_transmit_wait, +}; + +enum rxrpc_receive_trace { + rxrpc_receive_end, + rxrpc_receive_front, + rxrpc_receive_incoming, + rxrpc_receive_queue, + rxrpc_receive_queue_last, + rxrpc_receive_rotate, +}; + +enum rxrpc_recvmsg_trace { + rxrpc_recvmsg_cont, + rxrpc_recvmsg_data_return, + rxrpc_recvmsg_dequeue, + rxrpc_recvmsg_enter, + rxrpc_recvmsg_full, + rxrpc_recvmsg_hole, + rxrpc_recvmsg_next, + rxrpc_recvmsg_requeue, + rxrpc_recvmsg_return, + rxrpc_recvmsg_terminal, + rxrpc_recvmsg_to_be_accepted, + rxrpc_recvmsg_wait, +}; + +enum rxrpc_rtt_tx_trace { + rxrpc_rtt_tx_data, + rxrpc_rtt_tx_ping, +}; + +enum rxrpc_rtt_rx_trace { + rxrpc_rtt_rx_ping_response, + rxrpc_rtt_rx_requested_ack, +}; + +enum rxrpc_timer_trace { + rxrpc_timer_begin, + rxrpc_timer_expired, + rxrpc_timer_init_for_reply, + rxrpc_timer_init_for_send_reply, + rxrpc_timer_set_for_ack, + rxrpc_timer_set_for_ping, + rxrpc_timer_set_for_resend, + rxrpc_timer_set_for_send, +}; + +enum rxrpc_propose_ack_trace { + rxrpc_propose_ack_client_tx_end, + rxrpc_propose_ack_input_data, + rxrpc_propose_ack_ping_for_lost_ack, + rxrpc_propose_ack_ping_for_lost_reply, + rxrpc_propose_ack_ping_for_params, + rxrpc_propose_ack_processing_op, + rxrpc_propose_ack_respond_to_ack, + rxrpc_propose_ack_respond_to_ping, + rxrpc_propose_ack_retry_tx, + rxrpc_propose_ack_rotate_rx, + rxrpc_propose_ack_terminal_ack, +}; + +enum rxrpc_propose_ack_outcome { + rxrpc_propose_ack_subsume, + rxrpc_propose_ack_update, + rxrpc_propose_ack_use, +}; + +enum rxrpc_congest_change { + rxrpc_cong_begin_retransmission, + rxrpc_cong_cleared_nacks, + rxrpc_cong_new_low_nack, + rxrpc_cong_no_change, + rxrpc_cong_progress, + rxrpc_cong_retransmit_again, + rxrpc_cong_rtt_window_end, + rxrpc_cong_saw_nack, +}; + +#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */ + +/* + * Declare tracing information enums and their string mappings for display. + */ +#define rxrpc_skb_traces \ + EM(rxrpc_skb_rx_cleaned, "Rx CLN") \ + EM(rxrpc_skb_rx_freed, "Rx FRE") \ + EM(rxrpc_skb_rx_got, "Rx GOT") \ + EM(rxrpc_skb_rx_lost, "Rx *L*") \ + EM(rxrpc_skb_rx_purged, "Rx PUR") \ + EM(rxrpc_skb_rx_received, "Rx RCV") \ + EM(rxrpc_skb_rx_rotated, "Rx ROT") \ + EM(rxrpc_skb_rx_seen, "Rx SEE") \ + EM(rxrpc_skb_tx_cleaned, "Tx CLN") \ + EM(rxrpc_skb_tx_freed, "Tx FRE") \ + EM(rxrpc_skb_tx_got, "Tx GOT") \ + EM(rxrpc_skb_tx_new, "Tx NEW") \ + EM(rxrpc_skb_tx_rotated, "Tx ROT") \ + E_(rxrpc_skb_tx_seen, "Tx SEE") + +#define rxrpc_conn_traces \ + EM(rxrpc_conn_got, "GOT") \ + EM(rxrpc_conn_new_client, "NWc") \ + EM(rxrpc_conn_new_service, "NWs") \ + EM(rxrpc_conn_put_client, "PTc") \ + EM(rxrpc_conn_put_service, "PTs") \ + EM(rxrpc_conn_queued, "QUE") \ + E_(rxrpc_conn_seen, "SEE") + +#define rxrpc_client_traces \ + EM(rxrpc_client_activate_chans, "Activa") \ + EM(rxrpc_client_alloc, "Alloc ") \ + EM(rxrpc_client_chan_activate, "ChActv") \ + EM(rxrpc_client_chan_disconnect, "ChDisc") \ + EM(rxrpc_client_chan_pass, "ChPass") \ + EM(rxrpc_client_chan_unstarted, "ChUnst") \ + EM(rxrpc_client_cleanup, "Clean ") \ + EM(rxrpc_client_count, "Count ") \ + EM(rxrpc_client_discard, "Discar") \ + EM(rxrpc_client_duplicate, "Duplic") \ + EM(rxrpc_client_exposed, "Expose") \ + EM(rxrpc_client_replace, "Replac") \ + EM(rxrpc_client_to_active, "->Actv") \ + EM(rxrpc_client_to_culled, "->Cull") \ + EM(rxrpc_client_to_idle, "->Idle") \ + EM(rxrpc_client_to_inactive, "->Inac") \ + EM(rxrpc_client_to_upgrade, "->Upgd") \ + EM(rxrpc_client_to_waiting, "->Wait") \ + E_(rxrpc_client_uncount, "Uncoun") + +#define rxrpc_conn_cache_states \ + EM(RXRPC_CONN_CLIENT_INACTIVE, "Inac") \ + EM(RXRPC_CONN_CLIENT_WAITING, "Wait") \ + EM(RXRPC_CONN_CLIENT_ACTIVE, "Actv") \ + EM(RXRPC_CONN_CLIENT_CULLED, "Cull") \ + E_(RXRPC_CONN_CLIENT_IDLE, "Idle") \ + +#define rxrpc_call_traces \ + EM(rxrpc_call_connected, "CON") \ + EM(rxrpc_call_error, "*E*") \ + EM(rxrpc_call_got, "GOT") \ + EM(rxrpc_call_got_kernel, "Gke") \ + EM(rxrpc_call_got_userid, "Gus") \ + EM(rxrpc_call_new_client, "NWc") \ + EM(rxrpc_call_new_service, "NWs") \ + EM(rxrpc_call_put, "PUT") \ + EM(rxrpc_call_put_kernel, "Pke") \ + EM(rxrpc_call_put_noqueue, "PNQ") \ + EM(rxrpc_call_put_userid, "Pus") \ + EM(rxrpc_call_queued, "QUE") \ + EM(rxrpc_call_queued_ref, "QUR") \ + EM(rxrpc_call_release, "RLS") \ + E_(rxrpc_call_seen, "SEE") + +#define rxrpc_transmit_traces \ + EM(rxrpc_transmit_await_reply, "AWR") \ + EM(rxrpc_transmit_end, "END") \ + EM(rxrpc_transmit_queue, "QUE") \ + EM(rxrpc_transmit_queue_last, "QLS") \ + EM(rxrpc_transmit_rotate, "ROT") \ + EM(rxrpc_transmit_rotate_last, "RLS") \ + E_(rxrpc_transmit_wait, "WAI") + +#define rxrpc_receive_traces \ + EM(rxrpc_receive_end, "END") \ + EM(rxrpc_receive_front, "FRN") \ + EM(rxrpc_receive_incoming, "INC") \ + EM(rxrpc_receive_queue, "QUE") \ + EM(rxrpc_receive_queue_last, "QLS") \ + E_(rxrpc_receive_rotate, "ROT") + +#define rxrpc_recvmsg_traces \ + EM(rxrpc_recvmsg_cont, "CONT") \ + EM(rxrpc_recvmsg_data_return, "DATA") \ + EM(rxrpc_recvmsg_dequeue, "DEQU") \ + EM(rxrpc_recvmsg_enter, "ENTR") \ + EM(rxrpc_recvmsg_full, "FULL") \ + EM(rxrpc_recvmsg_hole, "HOLE") \ + EM(rxrpc_recvmsg_next, "NEXT") \ + EM(rxrpc_recvmsg_requeue, "REQU") \ + EM(rxrpc_recvmsg_return, "RETN") \ + EM(rxrpc_recvmsg_terminal, "TERM") \ + EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \ + E_(rxrpc_recvmsg_wait, "WAIT") + +#define rxrpc_rtt_tx_traces \ + EM(rxrpc_rtt_tx_data, "DATA") \ + E_(rxrpc_rtt_tx_ping, "PING") + +#define rxrpc_rtt_rx_traces \ + EM(rxrpc_rtt_rx_ping_response, "PONG") \ + E_(rxrpc_rtt_rx_requested_ack, "RACK") + +#define rxrpc_timer_traces \ + EM(rxrpc_timer_begin, "Begin ") \ + EM(rxrpc_timer_expired, "*EXPR*") \ + EM(rxrpc_timer_init_for_reply, "IniRpl") \ + EM(rxrpc_timer_init_for_send_reply, "SndRpl") \ + EM(rxrpc_timer_set_for_ack, "SetAck") \ + EM(rxrpc_timer_set_for_ping, "SetPng") \ + EM(rxrpc_timer_set_for_resend, "SetRTx") \ + E_(rxrpc_timer_set_for_send, "SetTx ") + +#define rxrpc_propose_ack_traces \ + EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ + EM(rxrpc_propose_ack_input_data, "DataIn ") \ + EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ + EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ + EM(rxrpc_propose_ack_ping_for_params, "Params ") \ + EM(rxrpc_propose_ack_processing_op, "ProcOp ") \ + EM(rxrpc_propose_ack_respond_to_ack, "Rsp2Ack") \ + EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \ + EM(rxrpc_propose_ack_retry_tx, "RetryTx") \ + EM(rxrpc_propose_ack_rotate_rx, "RxAck ") \ + E_(rxrpc_propose_ack_terminal_ack, "ClTerm ") + +#define rxrpc_propose_ack_outcomes \ + EM(rxrpc_propose_ack_subsume, " Subsume") \ + EM(rxrpc_propose_ack_update, " Update") \ + E_(rxrpc_propose_ack_use, "") + +#define rxrpc_congest_modes \ + EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \ + EM(RXRPC_CALL_FAST_RETRANSMIT, "FastReTx ") \ + EM(RXRPC_CALL_PACKET_LOSS, "PktLoss ") \ + E_(RXRPC_CALL_SLOW_START, "SlowStart") + +#define rxrpc_congest_changes \ + EM(rxrpc_cong_begin_retransmission, " Retrans") \ + EM(rxrpc_cong_cleared_nacks, " Cleared") \ + EM(rxrpc_cong_new_low_nack, " NewLowN") \ + EM(rxrpc_cong_no_change, "") \ + EM(rxrpc_cong_progress, " Progres") \ + EM(rxrpc_cong_retransmit_again, " ReTxAgn") \ + EM(rxrpc_cong_rtt_window_end, " RttWinE") \ + E_(rxrpc_cong_saw_nack, " SawNack") + +#define rxrpc_pkts \ + EM(0, "?00") \ + EM(RXRPC_PACKET_TYPE_DATA, "DATA") \ + EM(RXRPC_PACKET_TYPE_ACK, "ACK") \ + EM(RXRPC_PACKET_TYPE_BUSY, "BUSY") \ + EM(RXRPC_PACKET_TYPE_ABORT, "ABORT") \ + EM(RXRPC_PACKET_TYPE_ACKALL, "ACKALL") \ + EM(RXRPC_PACKET_TYPE_CHALLENGE, "CHALL") \ + EM(RXRPC_PACKET_TYPE_RESPONSE, "RESP") \ + EM(RXRPC_PACKET_TYPE_DEBUG, "DEBUG") \ + EM(9, "?09") \ + EM(10, "?10") \ + EM(11, "?11") \ + EM(12, "?12") \ + EM(RXRPC_PACKET_TYPE_VERSION, "VERSION") \ + EM(14, "?14") \ + E_(15, "?15") + +#define rxrpc_ack_names \ + EM(0, "-0-") \ + EM(RXRPC_ACK_REQUESTED, "REQ") \ + EM(RXRPC_ACK_DUPLICATE, "DUP") \ + EM(RXRPC_ACK_OUT_OF_SEQUENCE, "OOS") \ + EM(RXRPC_ACK_EXCEEDS_WINDOW, "WIN") \ + EM(RXRPC_ACK_NOSPACE, "MEM") \ + EM(RXRPC_ACK_PING, "PNG") \ + EM(RXRPC_ACK_PING_RESPONSE, "PNR") \ + EM(RXRPC_ACK_DELAY, "DLY") \ + EM(RXRPC_ACK_IDLE, "IDL") \ + E_(RXRPC_ACK__INVALID, "-?-") + +/* + * Export enum symbols via userspace. + */ +#undef EM +#undef E_ +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define E_(a, b) TRACE_DEFINE_ENUM(a); + +rxrpc_skb_traces; +rxrpc_conn_traces; +rxrpc_client_traces; +rxrpc_call_traces; +rxrpc_transmit_traces; +rxrpc_receive_traces; +rxrpc_recvmsg_traces; +rxrpc_rtt_tx_traces; +rxrpc_rtt_rx_traces; +rxrpc_timer_traces; +rxrpc_propose_ack_traces; +rxrpc_propose_ack_outcomes; +rxrpc_congest_changes; + +/* + * Now redefine the EM() and E_() macros to map the enums to the strings that + * will be printed in the output. + */ +#undef EM +#undef E_ +#define EM(a, b) { a, b }, +#define E_(a, b) { a, b } + TRACE_EVENT(rxrpc_conn, TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op, int usage, const void *where), @@ -38,7 +420,7 @@ TRACE_EVENT(rxrpc_conn, TP_printk("C=%p %s u=%d sp=%pSR", __entry->conn, - rxrpc_conn_traces[__entry->op], + __print_symbolic(__entry->op, rxrpc_conn_traces), __entry->usage, __entry->where) ); @@ -70,8 +452,8 @@ TRACE_EVENT(rxrpc_client, TP_printk("C=%p h=%2d %s %s i=%08x u=%d", __entry->conn, __entry->channel, - rxrpc_client_traces[__entry->op], - rxrpc_conn_cache_states[__entry->cs], + __print_symbolic(__entry->op, rxrpc_client_traces), + __print_symbolic(__entry->cs, rxrpc_conn_cache_states), __entry->cid, __entry->usage) ); @@ -100,7 +482,7 @@ TRACE_EVENT(rxrpc_call, TP_printk("c=%p %s u=%d sp=%pSR a=%p", __entry->call, - rxrpc_call_traces[__entry->op], + __print_symbolic(__entry->op, rxrpc_call_traces), __entry->usage, __entry->where, __entry->aux) @@ -130,7 +512,7 @@ TRACE_EVENT(rxrpc_skb, TP_printk("s=%p %s u=%d m=%d p=%pSR", __entry->skb, - rxrpc_skb_traces[__entry->op], + __print_symbolic(__entry->op, rxrpc_skb_traces), __entry->usage, __entry->mod_count, __entry->where) @@ -154,7 +536,8 @@ TRACE_EVENT(rxrpc_rx_packet, __entry->hdr.callNumber, __entry->hdr.serviceId, __entry->hdr.serial, __entry->hdr.seq, __entry->hdr.type, __entry->hdr.flags, - __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK") + __entry->hdr.type <= 15 ? + __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK") ); TRACE_EVENT(rxrpc_rx_done, @@ -214,6 +597,7 @@ TRACE_EVENT(rxrpc_transmit, __field(enum rxrpc_transmit_trace, why ) __field(rxrpc_seq_t, tx_hard_ack ) __field(rxrpc_seq_t, tx_top ) + __field(int, tx_winsize ) ), TP_fast_assign( @@ -221,38 +605,81 @@ TRACE_EVENT(rxrpc_transmit, __entry->why = why; __entry->tx_hard_ack = call->tx_hard_ack; __entry->tx_top = call->tx_top; + __entry->tx_winsize = call->tx_winsize; ), - TP_printk("c=%p %s f=%08x n=%u", + TP_printk("c=%p %s f=%08x n=%u/%u", __entry->call, - rxrpc_transmit_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_transmit_traces), __entry->tx_hard_ack + 1, - __entry->tx_top - __entry->tx_hard_ack) + __entry->tx_top - __entry->tx_hard_ack, + __entry->tx_winsize) + ); + +TRACE_EVENT(rxrpc_rx_data, + TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, + rxrpc_serial_t serial, u8 flags, u8 anno), + + TP_ARGS(call, seq, serial, flags, anno), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, call ) + __field(rxrpc_seq_t, seq ) + __field(rxrpc_serial_t, serial ) + __field(u8, flags ) + __field(u8, anno ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->seq = seq; + __entry->serial = serial; + __entry->flags = flags; + __entry->anno = anno; + ), + + TP_printk("c=%p DATA %08x q=%08x fl=%02x a=%02x", + __entry->call, + __entry->serial, + __entry->seq, + __entry->flags, + __entry->anno) ); TRACE_EVENT(rxrpc_rx_ack, - TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t first, u8 reason, u8 n_acks), + TP_PROTO(struct rxrpc_call *call, + rxrpc_serial_t serial, rxrpc_serial_t ack_serial, + rxrpc_seq_t first, rxrpc_seq_t prev, u8 reason, u8 n_acks), - TP_ARGS(call, first, reason, n_acks), + TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks), TP_STRUCT__entry( __field(struct rxrpc_call *, call ) + __field(rxrpc_serial_t, serial ) + __field(rxrpc_serial_t, ack_serial ) __field(rxrpc_seq_t, first ) + __field(rxrpc_seq_t, prev ) __field(u8, reason ) __field(u8, n_acks ) ), TP_fast_assign( __entry->call = call; + __entry->serial = serial; + __entry->ack_serial = ack_serial; __entry->first = first; + __entry->prev = prev; __entry->reason = reason; __entry->n_acks = n_acks; ), - TP_printk("c=%p %s f=%08x n=%u", + TP_printk("c=%p %08x %s r=%08x f=%08x p=%08x n=%u", __entry->call, - rxrpc_ack_names[__entry->reason], + __entry->serial, + __print_symbolic(__entry->reason, rxrpc_ack_names), + __entry->ack_serial, __entry->first, + __entry->prev, __entry->n_acks) ); @@ -317,7 +744,7 @@ TRACE_EVENT(rxrpc_tx_ack, TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u", __entry->call, __entry->serial, - rxrpc_ack_names[__entry->reason], + __print_symbolic(__entry->reason, rxrpc_ack_names), __entry->ack_first, __entry->ack_serial, __entry->n_acks) @@ -349,7 +776,7 @@ TRACE_EVENT(rxrpc_receive, TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x", __entry->call, - rxrpc_receive_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_receive_traces), __entry->serial, __entry->seq, __entry->hard_ack, @@ -383,7 +810,7 @@ TRACE_EVENT(rxrpc_recvmsg, TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d", __entry->call, - rxrpc_recvmsg_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_recvmsg_traces), __entry->seq, __entry->offset, __entry->len, @@ -410,7 +837,7 @@ TRACE_EVENT(rxrpc_rtt_tx, TP_printk("c=%p %s sr=%08x", __entry->call, - rxrpc_rtt_tx_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_rtt_tx_traces), __entry->send_serial) ); @@ -443,7 +870,7 @@ TRACE_EVENT(rxrpc_rtt_rx, TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld", __entry->call, - rxrpc_rtt_rx_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), __entry->send_serial, __entry->resp_serial, __entry->rtt, @@ -481,7 +908,7 @@ TRACE_EVENT(rxrpc_timer, TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld", __entry->call, - rxrpc_timer_traces[__entry->why], + __print_symbolic(__entry->why, rxrpc_timer_traces), ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)), ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)), ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)), @@ -506,7 +933,8 @@ TRACE_EVENT(rxrpc_rx_lose, __entry->hdr.callNumber, __entry->hdr.serviceId, __entry->hdr.serial, __entry->hdr.seq, __entry->hdr.type, __entry->hdr.flags, - __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK") + __entry->hdr.type <= 15 ? + __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK") ); TRACE_EVENT(rxrpc_propose_ack, @@ -539,12 +967,12 @@ TRACE_EVENT(rxrpc_propose_ack, TP_printk("c=%p %s %s r=%08x i=%u b=%u%s", __entry->call, - rxrpc_propose_ack_traces[__entry->why], - rxrpc_ack_names[__entry->ack_reason], + __print_symbolic(__entry->why, rxrpc_propose_ack_traces), + __print_symbolic(__entry->ack_reason, rxrpc_ack_names), __entry->serial, __entry->immediate, __entry->background, - rxrpc_propose_ack_outcomes[__entry->outcome]) + __print_symbolic(__entry->outcome, rxrpc_propose_ack_outcomes)) ); TRACE_EVENT(rxrpc_retransmit, @@ -603,9 +1031,9 @@ TRACE_EVENT(rxrpc_congest, TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s", __entry->call, __entry->ack_serial, - rxrpc_ack_names[__entry->sum.ack_reason], + __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names), __entry->hard_ack, - rxrpc_congest_modes[__entry->sum.mode], + __print_symbolic(__entry->sum.mode, rxrpc_congest_modes), __entry->sum.cwnd, __entry->sum.ssthresh, __entry->sum.nr_acks, __entry->sum.nr_nacks, @@ -615,10 +1043,50 @@ TRACE_EVENT(rxrpc_congest, __entry->sum.cumulative_acks, __entry->sum.dup_acks, __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "", - rxrpc_congest_changes[__entry->change], + __print_symbolic(__entry->change, rxrpc_congest_changes), __entry->sum.retrans_timeo ? " rTxTo" : "") ); +TRACE_EVENT(rxrpc_disconnect_call, + TP_PROTO(struct rxrpc_call *call), + + TP_ARGS(call), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, call ) + __field(u32, abort_code ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->abort_code = call->abort_code; + ), + + TP_printk("c=%p ab=%08x", + __entry->call, + __entry->abort_code) + ); + +TRACE_EVENT(rxrpc_improper_term, + TP_PROTO(struct rxrpc_call *call), + + TP_ARGS(call), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, call ) + __field(u32, abort_code ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->abort_code = call->abort_code; + ), + + TP_printk("c=%p ab=%08x", + __entry->call, + __entry->abort_code) + ); + #endif /* _TRACE_RXRPC_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9b90c57517a9..9e3ef6c99e4b 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -4,7 +4,7 @@ #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SCHED_H -#include <linux/sched.h> +#include <linux/sched/numa_balancing.h> #include <linux/tracepoint.h> #include <linux/binfmts.h> diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h index 14e49c798135..b35533b94277 100644 --- a/include/trace/events/syscalls.h +++ b/include/trace/events/syscalls.h @@ -1,5 +1,6 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM raw_syscalls +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE syscalls #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 1bca99dbb98f..80787eafba99 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -36,6 +36,13 @@ DEFINE_EVENT(timer_class, timer_init, TP_ARGS(timer) ); +#define decode_timer_flags(flags) \ + __print_flags(flags, "|", \ + { TIMER_MIGRATING, "M" }, \ + { TIMER_DEFERRABLE, "D" }, \ + { TIMER_PINNED, "P" }, \ + { TIMER_IRQSAFE, "I" }) + /** * timer_start - called when the timer is started * @timer: pointer to struct timer_list @@ -65,9 +72,12 @@ TRACE_EVENT(timer_start, __entry->flags = flags; ), - TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x", + TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s", __entry->timer, __entry->function, __entry->expires, - (long)__entry->expires - __entry->now, __entry->flags) + (long)__entry->expires - __entry->now, + __entry->flags & TIMER_CPUMASK, + __entry->flags >> TIMER_ARRAYSHIFT, + decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK)) ); /** diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index c88fd0934e7e..27e8a5c77579 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -15,6 +15,7 @@ #define RECLAIM_WB_MIXED 0x0010u #define RECLAIM_WB_SYNC 0x0004u /* Unused, all reclaim async */ #define RECLAIM_WB_ASYNC 0x0008u +#define RECLAIM_WB_LRU (RECLAIM_WB_ANON|RECLAIM_WB_FILE) #define show_reclaim_flags(flags) \ (flags) ? __print_flags(flags, "|", \ @@ -269,26 +270,27 @@ TRACE_EVENT(mm_shrink_slab_end, __entry->retval) ); -DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, - +TRACE_EVENT(mm_vmscan_lru_isolate, TP_PROTO(int classzone_idx, int order, unsigned long nr_requested, unsigned long nr_scanned, + unsigned long nr_skipped, unsigned long nr_taken, isolate_mode_t isolate_mode, - int file), + int lru), - TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_taken, isolate_mode, file), + TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru), TP_STRUCT__entry( __field(int, classzone_idx) __field(int, order) __field(unsigned long, nr_requested) __field(unsigned long, nr_scanned) + __field(unsigned long, nr_skipped) __field(unsigned long, nr_taken) __field(isolate_mode_t, isolate_mode) - __field(int, file) + __field(int, lru) ), TP_fast_assign( @@ -296,47 +298,21 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, __entry->order = order; __entry->nr_requested = nr_requested; __entry->nr_scanned = nr_scanned; + __entry->nr_skipped = nr_skipped; __entry->nr_taken = nr_taken; __entry->isolate_mode = isolate_mode; - __entry->file = file; + __entry->lru = lru; ), - TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d", + TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s", __entry->isolate_mode, __entry->classzone_idx, __entry->order, __entry->nr_requested, __entry->nr_scanned, + __entry->nr_skipped, __entry->nr_taken, - __entry->file) -); - -DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate, - - TP_PROTO(int classzone_idx, - int order, - unsigned long nr_requested, - unsigned long nr_scanned, - unsigned long nr_taken, - isolate_mode_t isolate_mode, - int file), - - TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_taken, isolate_mode, file) - -); - -DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate, - - TP_PROTO(int classzone_idx, - int order, - unsigned long nr_requested, - unsigned long nr_scanned, - unsigned long nr_taken, - isolate_mode_t isolate_mode, - int file), - - TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_taken, isolate_mode, file) - + __print_symbolic(__entry->lru, LRU_NAMES)) ); TRACE_EVENT(mm_vmscan_writepage, @@ -365,14 +341,27 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive, TP_PROTO(int nid, unsigned long nr_scanned, unsigned long nr_reclaimed, + unsigned long nr_dirty, unsigned long nr_writeback, + unsigned long nr_congested, unsigned long nr_immediate, + unsigned long nr_activate, unsigned long nr_ref_keep, + unsigned long nr_unmap_fail, int priority, int file), - TP_ARGS(nid, nr_scanned, nr_reclaimed, priority, file), + TP_ARGS(nid, nr_scanned, nr_reclaimed, nr_dirty, nr_writeback, + nr_congested, nr_immediate, nr_activate, nr_ref_keep, + nr_unmap_fail, priority, file), TP_STRUCT__entry( __field(int, nid) __field(unsigned long, nr_scanned) __field(unsigned long, nr_reclaimed) + __field(unsigned long, nr_dirty) + __field(unsigned long, nr_writeback) + __field(unsigned long, nr_congested) + __field(unsigned long, nr_immediate) + __field(unsigned long, nr_activate) + __field(unsigned long, nr_ref_keep) + __field(unsigned long, nr_unmap_fail) __field(int, priority) __field(int, reclaim_flags) ), @@ -381,17 +370,102 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive, __entry->nid = nid; __entry->nr_scanned = nr_scanned; __entry->nr_reclaimed = nr_reclaimed; + __entry->nr_dirty = nr_dirty; + __entry->nr_writeback = nr_writeback; + __entry->nr_congested = nr_congested; + __entry->nr_immediate = nr_immediate; + __entry->nr_activate = nr_activate; + __entry->nr_ref_keep = nr_ref_keep; + __entry->nr_unmap_fail = nr_unmap_fail; __entry->priority = priority; __entry->reclaim_flags = trace_shrink_flags(file); ), - TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s", + TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld nr_dirty=%ld nr_writeback=%ld nr_congested=%ld nr_immediate=%ld nr_activate=%ld nr_ref_keep=%ld nr_unmap_fail=%ld priority=%d flags=%s", __entry->nid, __entry->nr_scanned, __entry->nr_reclaimed, + __entry->nr_dirty, __entry->nr_writeback, + __entry->nr_congested, __entry->nr_immediate, + __entry->nr_activate, __entry->nr_ref_keep, + __entry->nr_unmap_fail, __entry->priority, + show_reclaim_flags(__entry->reclaim_flags)) +); + +TRACE_EVENT(mm_vmscan_lru_shrink_active, + + TP_PROTO(int nid, unsigned long nr_taken, + unsigned long nr_active, unsigned long nr_deactivated, + unsigned long nr_referenced, int priority, int file), + + TP_ARGS(nid, nr_taken, nr_active, nr_deactivated, nr_referenced, priority, file), + + TP_STRUCT__entry( + __field(int, nid) + __field(unsigned long, nr_taken) + __field(unsigned long, nr_active) + __field(unsigned long, nr_deactivated) + __field(unsigned long, nr_referenced) + __field(int, priority) + __field(int, reclaim_flags) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->nr_taken = nr_taken; + __entry->nr_active = nr_active; + __entry->nr_deactivated = nr_deactivated; + __entry->nr_referenced = nr_referenced; + __entry->priority = priority; + __entry->reclaim_flags = trace_shrink_flags(file); + ), + + TP_printk("nid=%d nr_taken=%ld nr_active=%ld nr_deactivated=%ld nr_referenced=%ld priority=%d flags=%s", + __entry->nid, + __entry->nr_taken, + __entry->nr_active, __entry->nr_deactivated, __entry->nr_referenced, __entry->priority, show_reclaim_flags(__entry->reclaim_flags)) ); +TRACE_EVENT(mm_vmscan_inactive_list_is_low, + + TP_PROTO(int nid, int reclaim_idx, + unsigned long total_inactive, unsigned long inactive, + unsigned long total_active, unsigned long active, + unsigned long ratio, int file), + + TP_ARGS(nid, reclaim_idx, total_inactive, inactive, total_active, active, ratio, file), + + TP_STRUCT__entry( + __field(int, nid) + __field(int, reclaim_idx) + __field(unsigned long, total_inactive) + __field(unsigned long, inactive) + __field(unsigned long, total_active) + __field(unsigned long, active) + __field(unsigned long, ratio) + __field(int, reclaim_flags) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->reclaim_idx = reclaim_idx; + __entry->total_inactive = total_inactive; + __entry->inactive = inactive; + __entry->total_active = total_active; + __entry->active = active; + __entry->ratio = ratio; + __entry->reclaim_flags = trace_shrink_flags(file) & RECLAIM_WB_LRU; + ), + + TP_printk("nid=%d reclaim_idx=%d total_inactive=%ld inactive=%ld total_active=%ld active=%ld ratio=%ld flags=%s", + __entry->nid, + __entry->reclaim_idx, + __entry->total_inactive, __entry->inactive, + __entry->total_active, __entry->active, + __entry->ratio, + show_reclaim_flags(__entry->reclaim_flags)) +); #endif /* _TRACE_VMSCAN_H */ /* This part must be outside protection */ diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 2ccd9ccbf9ef..7bd8783a590f 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -31,7 +31,7 @@ #define WB_WORK_REASON \ EM( WB_REASON_BACKGROUND, "background") \ - EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \ + EM( WB_REASON_VMSCAN, "vmscan") \ EM( WB_REASON_SYNC, "sync") \ EM( WB_REASON_PERIODIC, "periodic") \ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h new file mode 100644 index 000000000000..1b61357d3f57 --- /dev/null +++ b/include/trace/events/xdp.h @@ -0,0 +1,53 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM xdp + +#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_XDP_H + +#include <linux/netdevice.h> +#include <linux/filter.h> +#include <linux/tracepoint.h> + +#define __XDP_ACT_MAP(FN) \ + FN(ABORTED) \ + FN(DROP) \ + FN(PASS) \ + FN(TX) + +#define __XDP_ACT_TP_FN(x) \ + TRACE_DEFINE_ENUM(XDP_##x); +#define __XDP_ACT_SYM_FN(x) \ + { XDP_##x, #x }, +#define __XDP_ACT_SYM_TAB \ + __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 } +__XDP_ACT_MAP(__XDP_ACT_TP_FN) + +TRACE_EVENT(xdp_exception, + + TP_PROTO(const struct net_device *dev, + const struct bpf_prog *xdp, u32 act), + + TP_ARGS(dev, xdp, act), + + TP_STRUCT__entry( + __string(name, dev->name) + __array(u8, prog_tag, 8) + __field(u32, act) + ), + + TP_fast_assign( + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag)); + memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag)); + __assign_str(name, dev->name); + __entry->act = act; + ), + + TP_printk("prog=%s device=%s action=%s", + __print_hex_str(__entry->prog_tag, 8), + __get_str(name), + __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB)) +); + +#endif /* _TRACE_XDP_H */ + +#include <trace/define_trace.h> diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 467e12f780d8..00f643164ca2 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -283,8 +283,16 @@ TRACE_MAKE_SYSTEM_STR(); trace_print_symbols_seq(p, value, symbols); \ }) +#undef __print_flags_u64 #undef __print_symbolic_u64 #if BITS_PER_LONG == 32 +#define __print_flags_u64(flag, delim, flag_array...) \ + ({ \ + static const struct trace_print_flags_u64 __flags[] = \ + { flag_array, { -1, NULL } }; \ + trace_print_flags_seq_u64(p, delim, flag, __flags); \ + }) + #define __print_symbolic_u64(value, symbol_array...) \ ({ \ static const struct trace_print_flags_u64 symbols[] = \ @@ -292,12 +300,20 @@ TRACE_MAKE_SYSTEM_STR(); trace_print_symbols_seq_u64(p, value, symbols); \ }) #else +#define __print_flags_u64(flag, delim, flag_array...) \ + __print_flags(flag, delim, flag_array) + #define __print_symbolic_u64(value, symbol_array...) \ __print_symbolic(value, symbol_array) #endif #undef __print_hex -#define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len) +#define __print_hex(buf, buf_len) \ + trace_print_hex_seq(p, buf, buf_len, false) + +#undef __print_hex_str +#define __print_hex_str(buf, buf_len) \ + trace_print_hex_seq(p, buf, buf_len, true) #undef __print_array #define __print_array(array, count, el_size) \ @@ -711,6 +727,7 @@ static inline void ftrace_test_probe_##call(void) \ #undef __print_flags #undef __print_symbolic #undef __print_hex +#undef __print_hex_str #undef __get_dynamic_array #undef __get_dynamic_array_len #undef __get_str diff --git a/include/uapi/asm-generic/ioctl.h b/include/uapi/asm-generic/ioctl.h index 7e7c11b52143..749b32fe5623 100644 --- a/include/uapi/asm-generic/ioctl.h +++ b/include/uapi/asm-generic/ioctl.h @@ -48,6 +48,9 @@ /* * Direction bits, which any architecture can choose to override * before including this file. + * + * NOTE: _IOC_WRITE means userland is writing and kernel is + * reading. _IOC_READ means userland is reading and kernel is writing. */ #ifndef _IOC_NONE @@ -72,7 +75,12 @@ #define _IOC_TYPECHECK(t) (sizeof(t)) #endif -/* used to create numbers */ +/* + * Used to create numbers. + * + * NOTE: _IOW means userland is writing and kernel is reading. _IOR + * means userland is reading and kernel is writing. + */ #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) #define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size))) #define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild index 9355dd8eff3b..c97addd08f8c 100644 --- a/include/uapi/drm/Kbuild +++ b/include/uapi/drm/Kbuild @@ -9,6 +9,7 @@ header-y += i810_drm.h header-y += i915_drm.h header-y += mga_drm.h header-y += nouveau_drm.h +header-y += omap_drm.h header-y += qxl_drm.h header-y += r128_drm.h header-y += radeon_drm.h diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 396183628f3c..5797283c2d79 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -528,6 +528,8 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_VBIOS_SIZE 0x1 /* Subquery id: Query vbios image */ #define AMDGPU_INFO_VBIOS_IMAGE 0x2 +/* Query UVD handles */ +#define AMDGPU_INFO_NUM_HANDLES 0x1C #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff @@ -719,6 +721,13 @@ struct drm_amdgpu_info_hw_ip { __u32 _pad; }; +struct drm_amdgpu_info_num_handles { + /** Max handles as supported by firmware for UVD */ + __u32 uvd_max_handles; + /** Handles currently in use for UVD */ + __u32 uvd_used_handles; +}; + #define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6 struct drm_amdgpu_info_vce_clock_table_entry { diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index a5890bf44c0a..ef20abb8119b 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -41,10 +41,17 @@ extern "C" { /* 8 bpp Red */ #define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */ +/* 16 bpp Red */ +#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */ + /* 16 bpp RG */ #define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */ #define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */ +/* 32 bpp RG */ +#define DRM_FORMAT_RG1616 fourcc_code('R', 'G', '3', '2') /* [31:0] R:G 16:16 little endian */ +#define DRM_FORMAT_GR1616 fourcc_code('G', 'R', '3', '2') /* [31:0] G:R 16:16 little endian */ + /* 8 bpp RGB */ #define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ #define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ @@ -154,11 +161,13 @@ extern "C" { /* Vendor Ids: */ #define DRM_FORMAT_MOD_NONE 0 +#define DRM_FORMAT_MOD_VENDOR_NONE 0 #define DRM_FORMAT_MOD_VENDOR_INTEL 0x01 #define DRM_FORMAT_MOD_VENDOR_AMD 0x02 #define DRM_FORMAT_MOD_VENDOR_NV 0x03 #define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04 #define DRM_FORMAT_MOD_VENDOR_QCOM 0x05 +#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06 /* add more to the end as needed */ #define fourcc_mod_code(vendor, val) \ @@ -172,6 +181,16 @@ extern "C" { * authoritative source for all of these. */ +/* + * Linear Layout + * + * Just plain linear layout. Note that this is different from no specifying any + * modifier (e.g. not setting DRM_MODE_FB_MODIFIERS in the DRM_ADDFB2 ioctl), + * which tells the driver to also take driver-internal information into account + * and so might actually result in a tiled framebuffer. + */ +#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0) + /* Intel framebuffer modifiers */ /* @@ -233,6 +252,46 @@ extern "C" { */ #define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) +/* Vivante framebuffer modifiers */ + +/* + * Vivante 4x4 tiling layout + * + * This is a simple tiled layout using tiles of 4x4 pixels in a row-major + * layout. + */ +#define DRM_FORMAT_MOD_VIVANTE_TILED fourcc_mod_code(VIVANTE, 1) + +/* + * Vivante 64x64 super-tiling layout + * + * This is a tiled layout using 64x64 pixel super-tiles, where each super-tile + * contains 8x4 groups of 2x4 tiles of 4x4 pixels (like above) each, all in row- + * major layout. + * + * For more information: see + * https://github.com/etnaviv/etna_viv/blob/master/doc/hardware.md#texture-tiling + */ +#define DRM_FORMAT_MOD_VIVANTE_SUPER_TILED fourcc_mod_code(VIVANTE, 2) + +/* + * Vivante 4x4 tiling layout for dual-pipe + * + * Same as the 4x4 tiling layout, except every second 4x4 pixel tile starts at a + * different base address. Offsets from the base addresses are therefore halved + * compared to the non-split tiled layout. + */ +#define DRM_FORMAT_MOD_VIVANTE_SPLIT_TILED fourcc_mod_code(VIVANTE, 3) + +/* + * Vivante 64x64 super-tiling layout for dual-pipe + * + * Same as the 64x64 super-tiling layout, except every second 4x4 pixel tile + * starts at a different base address. Offsets from the base addresses are + * therefore halved compared to the non-split super-tiled layout. + */ +#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4) + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 1c12a350eca3..57093b455db6 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -258,6 +258,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_USERPTR 0x33 #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 +#define DRM_I915_PERF_OPEN 0x36 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -311,6 +312,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) +#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -393,6 +395,7 @@ typedef struct drm_i915_irq_wait { * priorities and the driver will attempt to execute batches in priority order. */ #define I915_PARAM_HAS_SCHEDULER 41 +#define I915_PARAM_HUC_STATUS 42 typedef struct drm_i915_getparam { __s32 param; @@ -1224,9 +1227,142 @@ struct drm_i915_gem_context_param { #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 +#define I915_CONTEXT_PARAM_BANNABLE 0x5 __u64 value; }; +enum drm_i915_oa_format { + I915_OA_FORMAT_A13 = 1, + I915_OA_FORMAT_A29, + I915_OA_FORMAT_A13_B8_C8, + I915_OA_FORMAT_B4_C8, + I915_OA_FORMAT_A45_B8_C8, + I915_OA_FORMAT_B4_C8_A16, + I915_OA_FORMAT_C4_B8, + + I915_OA_FORMAT_MAX /* non-ABI */ +}; + +enum drm_i915_perf_property_id { + /** + * Open the stream for a specific context handle (as used with + * execbuffer2). A stream opened for a specific context this way + * won't typically require root privileges. + */ + DRM_I915_PERF_PROP_CTX_HANDLE = 1, + + /** + * A value of 1 requests the inclusion of raw OA unit reports as + * part of stream samples. + */ + DRM_I915_PERF_PROP_SAMPLE_OA, + + /** + * The value specifies which set of OA unit metrics should be + * be configured, defining the contents of any OA unit reports. + */ + DRM_I915_PERF_PROP_OA_METRICS_SET, + + /** + * The value specifies the size and layout of OA unit reports. + */ + DRM_I915_PERF_PROP_OA_FORMAT, + + /** + * Specifying this property implicitly requests periodic OA unit + * sampling and (at least on Haswell) the sampling frequency is derived + * from this exponent as follows: + * + * 80ns * 2^(period_exponent + 1) + */ + DRM_I915_PERF_PROP_OA_EXPONENT, + + DRM_I915_PERF_PROP_MAX /* non-ABI */ +}; + +struct drm_i915_perf_open_param { + __u32 flags; +#define I915_PERF_FLAG_FD_CLOEXEC (1<<0) +#define I915_PERF_FLAG_FD_NONBLOCK (1<<1) +#define I915_PERF_FLAG_DISABLED (1<<2) + + /** The number of u64 (id, value) pairs */ + __u32 num_properties; + + /** + * Pointer to array of u64 (id, value) pairs configuring the stream + * to open. + */ + __u64 properties_ptr; +}; + +/** + * Enable data capture for a stream that was either opened in a disabled state + * via I915_PERF_FLAG_DISABLED or was later disabled via + * I915_PERF_IOCTL_DISABLE. + * + * It is intended to be cheaper to disable and enable a stream than it may be + * to close and re-open a stream with the same configuration. + * + * It's undefined whether any pending data for the stream will be lost. + */ +#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) + +/** + * Disable data capture for a stream. + * + * It is an error to try and read a stream that is disabled. + */ +#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) + +/** + * Common to all i915 perf records + */ +struct drm_i915_perf_record_header { + __u32 type; + __u16 pad; + __u16 size; +}; + +enum drm_i915_perf_record_type { + + /** + * Samples are the work horse record type whose contents are extensible + * and defined when opening an i915 perf stream based on the given + * properties. + * + * Boolean properties following the naming convention + * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in + * every sample. + * + * The order of these sample properties given by userspace has no + * affect on the ordering of data within a sample. The order is + * documented here. + * + * struct { + * struct drm_i915_perf_record_header header; + * + * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA + * }; + */ + DRM_I915_PERF_RECORD_SAMPLE = 1, + + /* + * Indicates that one or more OA reports were not written by the + * hardware. This can happen for example if an MI_REPORT_PERF_COUNT + * command collides with periodic sampling - which would be more likely + * at higher sampling frequencies. + */ + DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, + + /** + * An error occurred that resulted in all pending OA reports being lost. + */ + DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, + + DRM_I915_PERF_RECORD_MAX /* non-ABI */ +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h index 407cb55df6ac..7fb97863c945 100644 --- a/include/uapi/drm/omap_drm.h +++ b/include/uapi/drm/omap_drm.h @@ -33,8 +33,8 @@ extern "C" { #define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */ struct drm_omap_param { - uint64_t param; /* in */ - uint64_t value; /* in (set_param), out (get_param) */ + __u64 param; /* in */ + __u64 value; /* in (set_param), out (get_param) */ }; #define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */ @@ -53,18 +53,18 @@ struct drm_omap_param { #define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32) union omap_gem_size { - uint32_t bytes; /* (for non-tiled formats) */ + __u32 bytes; /* (for non-tiled formats) */ struct { - uint16_t width; - uint16_t height; + __u16 width; + __u16 height; } tiled; /* (for tiled formats) */ }; struct drm_omap_gem_new { union omap_gem_size size; /* in */ - uint32_t flags; /* in */ - uint32_t handle; /* out */ - uint32_t __pad; + __u32 flags; /* in */ + __u32 handle; /* out */ + __u32 __pad; }; /* mask of operations: */ @@ -74,33 +74,33 @@ enum omap_gem_op { }; struct drm_omap_gem_cpu_prep { - uint32_t handle; /* buffer handle (in) */ - uint32_t op; /* mask of omap_gem_op (in) */ + __u32 handle; /* buffer handle (in) */ + __u32 op; /* mask of omap_gem_op (in) */ }; struct drm_omap_gem_cpu_fini { - uint32_t handle; /* buffer handle (in) */ - uint32_t op; /* mask of omap_gem_op (in) */ + __u32 handle; /* buffer handle (in) */ + __u32 op; /* mask of omap_gem_op (in) */ /* TODO maybe here we pass down info about what regions are touched * by sw so we can be clever about cache ops? For now a placeholder, * set to zero and we just do full buffer flush.. */ - uint32_t nregions; - uint32_t __pad; + __u32 nregions; + __u32 __pad; }; struct drm_omap_gem_info { - uint32_t handle; /* buffer handle (in) */ - uint32_t pad; - uint64_t offset; /* mmap offset (out) */ + __u32 handle; /* buffer handle (in) */ + __u32 pad; + __u64 offset; /* mmap offset (out) */ /* note: in case of tiled buffers, the user virtual size can be * different from the physical size (ie. how many pages are needed * to back the object) which is returned in DRM_IOCTL_GEM_OPEN.. * This size here is the one that should be used if you want to * mmap() the buffer: */ - uint32_t size; /* virtual size for mmap'ing (out) */ - uint32_t __pad; + __u32 size; /* virtual size for mmap'ing (out) */ + __u32 __pad; }; #define DRM_OMAP_GET_PARAM 0x00 diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index f330ba4547cf..dd9820b1c779 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -64,6 +64,7 @@ header-y += auto_fs.h header-y += auxvec.h header-y += ax25.h header-y += b1lli.h +header-y += batman_adv.h header-y += baycom.h header-y += bcm933xx_hcs.h header-y += bfs_fs.h @@ -109,6 +110,7 @@ header-y += dlm_netlink.h header-y += dlm_plock.h header-y += dm-ioctl.h header-y += dm-log-userspace.h +header-y += dma-buf.h header-y += dn.h header-y += dqblk_xfs.h header-y += edd.h @@ -194,6 +196,7 @@ header-y += if_tun.h header-y += if_tunnel.h header-y += if_vlan.h header-y += if_x25.h +header-y += ife.h header-y += igmp.h header-y += ila.h header-y += in6.h @@ -305,6 +308,7 @@ header-y += netrom.h header-y += net_namespace.h header-y += net_tstamp.h header-y += nfc.h +header-y += psample.h header-y += nfs2.h header-y += nfs3.h header-y += nfs4.h @@ -379,6 +383,10 @@ header-y += sctp.h header-y += sdla.h header-y += seccomp.h header-y += securebits.h +header-y += seg6_genl.h +header-y += seg6.h +header-y += seg6_hmac.h +header-y += seg6_iptunnel.h header-y += selinux_netlink.h header-y += sem.h header-y += serial_core.h @@ -458,6 +466,7 @@ header-y += virtio_console.h header-y += virtio_gpu.h header-y += virtio_ids.h header-y += virtio_input.h +header-y += virtio_mmio.h header-y += virtio_net.h header-y += virtio_pci.h header-y += virtio_ring.h diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 41420e341e75..51f891fb1b18 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -33,6 +33,8 @@ enum { BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), + BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE), + BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE), }; enum { @@ -48,6 +50,14 @@ typedef __u64 binder_size_t; typedef __u64 binder_uintptr_t; #endif +/** + * struct binder_object_header - header shared by all binder metadata objects. + * @type: type of the object + */ +struct binder_object_header { + __u32 type; +}; + /* * This is the flattened representation of a Binder object for transfer * between processes. The 'offsets' supplied as part of a binder transaction @@ -56,9 +66,8 @@ typedef __u64 binder_uintptr_t; * between processes. */ struct flat_binder_object { - /* 8 bytes for large_flat_header. */ - __u32 type; - __u32 flags; + struct binder_object_header hdr; + __u32 flags; /* 8 bytes of data. */ union { @@ -70,6 +79,84 @@ struct flat_binder_object { binder_uintptr_t cookie; }; +/** + * struct binder_fd_object - describes a filedescriptor to be fixed up. + * @hdr: common header structure + * @pad_flags: padding to remain compatible with old userspace code + * @pad_binder: padding to remain compatible with old userspace code + * @fd: file descriptor + * @cookie: opaque data, used by user-space + */ +struct binder_fd_object { + struct binder_object_header hdr; + __u32 pad_flags; + union { + binder_uintptr_t pad_binder; + __u32 fd; + }; + + binder_uintptr_t cookie; +}; + +/* struct binder_buffer_object - object describing a userspace buffer + * @hdr: common header structure + * @flags: one or more BINDER_BUFFER_* flags + * @buffer: address of the buffer + * @length: length of the buffer + * @parent: index in offset array pointing to parent buffer + * @parent_offset: offset in @parent pointing to this buffer + * + * A binder_buffer object represents an object that the + * binder kernel driver can copy verbatim to the target + * address space. A buffer itself may be pointed to from + * within another buffer, meaning that the pointer inside + * that other buffer needs to be fixed up as well. This + * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT + * flag in @flags, by setting @parent buffer to the index + * in the offset array pointing to the parent binder_buffer_object, + * and by setting @parent_offset to the offset in the parent buffer + * at which the pointer to this buffer is located. + */ +struct binder_buffer_object { + struct binder_object_header hdr; + __u32 flags; + binder_uintptr_t buffer; + binder_size_t length; + binder_size_t parent; + binder_size_t parent_offset; +}; + +enum { + BINDER_BUFFER_FLAG_HAS_PARENT = 0x01, +}; + +/* struct binder_fd_array_object - object describing an array of fds in a buffer + * @hdr: common header structure + * @num_fds: number of file descriptors in the buffer + * @parent: index in offset array to buffer holding the fd array + * @parent_offset: start offset of fd array in the buffer + * + * A binder_fd_array object represents an array of file + * descriptors embedded in a binder_buffer_object. It is + * different from a regular binder_buffer_object because it + * describes a list of file descriptors to fix up, not an opaque + * blob of memory, and hence the kernel needs to treat it differently. + * + * An example of how this would be used is with Android's + * native_handle_t object, which is a struct with a list of integers + * and a list of file descriptors. The native_handle_t struct itself + * will be represented by a struct binder_buffer_objct, whereas the + * embedded list of file descriptors is represented by a + * struct binder_fd_array_object with that binder_buffer_object as + * a parent. + */ +struct binder_fd_array_object { + struct binder_object_header hdr; + binder_size_t num_fds; + binder_size_t parent; + binder_size_t parent_offset; +}; + /* * On 64-bit platforms where user code may run in 32-bits the driver must * translate the buffer (and local binder) addresses appropriately. @@ -162,6 +249,11 @@ struct binder_transaction_data { } data; }; +struct binder_transaction_data_sg { + struct binder_transaction_data transaction_data; + binder_size_t buffers_size; +}; + struct binder_ptr_cookie { binder_uintptr_t ptr; binder_uintptr_t cookie; @@ -346,6 +438,12 @@ enum binder_driver_command_protocol { /* * void *: cookie */ + + BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg), + BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg), + /* + * binder_transaction_data_sg: the sent command. + */ }; #endif /* _UAPI_LINUX_BINDER_H */ diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h index 021ed331dd71..744b3d060968 100644 --- a/include/uapi/linux/auto_dev-ioctl.h +++ b/include/uapi/linux/auto_dev-ioctl.h @@ -113,17 +113,13 @@ struct autofs_dev_ioctl { static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in) { - memset(in, 0, sizeof(struct autofs_dev_ioctl)); + memset(in, 0, AUTOFS_DEV_IOCTL_SIZE); in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR; in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; - in->size = sizeof(struct autofs_dev_ioctl); + in->size = AUTOFS_DEV_IOCTL_SIZE; in->ioctlfd = -1; } -/* - * If you change this make sure you make the corresponding change - * to autofs-dev-ioctl.c:lookup_ioctl() - */ enum { /* Get various version info */ AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71, @@ -160,8 +156,6 @@ enum { AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, }; -#define AUTOFS_IOCTL 0x93 - #define AUTOFS_DEV_IOCTL_VERSION \ _IOWR(AUTOFS_IOCTL, \ AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl) diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index 1bfc3ed8b284..aa63451ef20a 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h @@ -61,12 +61,23 @@ struct autofs_packet_expire { char name[NAME_MAX+1]; }; -#define AUTOFS_IOC_READY _IO(0x93, 0x60) -#define AUTOFS_IOC_FAIL _IO(0x93, 0x61) -#define AUTOFS_IOC_CATATONIC _IO(0x93, 0x62) -#define AUTOFS_IOC_PROTOVER _IOR(0x93, 0x63, int) -#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93, 0x64, compat_ulong_t) -#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93, 0x64, unsigned long) -#define AUTOFS_IOC_EXPIRE _IOR(0x93, 0x65, struct autofs_packet_expire) +#define AUTOFS_IOCTL 0x93 + +enum { + AUTOFS_IOC_READY_CMD = 0x60, + AUTOFS_IOC_FAIL_CMD, + AUTOFS_IOC_CATATONIC_CMD, + AUTOFS_IOC_PROTOVER_CMD, + AUTOFS_IOC_SETTIMEOUT_CMD, + AUTOFS_IOC_EXPIRE_CMD, +}; + +#define AUTOFS_IOC_READY _IO(AUTOFS_IOCTL, AUTOFS_IOC_READY_CMD) +#define AUTOFS_IOC_FAIL _IO(AUTOFS_IOCTL, AUTOFS_IOC_FAIL_CMD) +#define AUTOFS_IOC_CATATONIC _IO(AUTOFS_IOCTL, AUTOFS_IOC_CATATONIC_CMD) +#define AUTOFS_IOC_PROTOVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOVER_CMD, int) +#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, compat_ulong_t) +#define AUTOFS_IOC_SETTIMEOUT _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, unsigned long) +#define AUTOFS_IOC_EXPIRE _IOR(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_CMD, struct autofs_packet_expire) #endif /* _UAPI_LINUX_AUTO_FS_H */ diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h index 8f8f1bdcca8c..7c6da423d54e 100644 --- a/include/uapi/linux/auto_fs4.h +++ b/include/uapi/linux/auto_fs4.h @@ -148,10 +148,16 @@ union autofs_v5_packet_union { autofs_packet_expire_direct_t expire_direct; }; -#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93, 0x66, int) -#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI -#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI -#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93, 0x67, int) -#define AUTOFS_IOC_ASKUMOUNT _IOR(0x93, 0x70, int) +enum { + AUTOFS_IOC_EXPIRE_MULTI_CMD = 0x66, /* AUTOFS_IOC_EXPIRE_CMD + 1 */ + AUTOFS_IOC_PROTOSUBVER_CMD, + AUTOFS_IOC_ASKUMOUNT_CMD = 0x70, /* AUTOFS_DEV_IOCTL_VERSION_CMD - 1 */ +}; + +#define AUTOFS_IOC_EXPIRE_MULTI _IOW(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_MULTI_CMD, int) +#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI +#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI +#define AUTOFS_IOC_PROTOSUBVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOSUBVER_CMD, int) +#define AUTOFS_IOC_ASKUMOUNT _IOR(AUTOFS_IOCTL, AUTOFS_IOC_ASKUMOUNT_CMD, int) #endif /* _LINUX_AUTO_FS4_H */ diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index 734fe83ab645..a83ddb7b63db 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors: * * Matthias Schiffer * diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d2b0ac799d03..0539a0ceef38 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -63,6 +63,12 @@ struct bpf_insn { __s32 imm; /* signed immediate constant */ }; +/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ +struct bpf_lpm_trie_key { + __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ + __u8 data[0]; /* Arbitrary size */ +}; + /* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { BPF_MAP_CREATE, @@ -89,6 +95,7 @@ enum bpf_map_type { BPF_MAP_TYPE_CGROUP_ARRAY, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, + BPF_MAP_TYPE_LPM_TRIE, }; enum bpf_prog_type { @@ -437,6 +444,18 @@ union bpf_attr { * @xdp_md: pointer to xdp_md * @delta: An positive/negative integer to be added to xdp_md.data * Return: 0 on success or negative on error + * + * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) + * Copy a NUL terminated string from unsafe address. In case the string + * length is smaller than size, the target is not padded with further NUL + * bytes. In case the string length is larger than size, just count-1 + * bytes are copied and the last byte is set to NUL. + * @dst: destination address + * @size: maximum number of bytes to copy, including the trailing NUL + * @unsafe_ptr: unsafe address + * Return: + * > 0 length of the string including the trailing NUL on success + * < 0 error */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -483,7 +502,8 @@ union bpf_attr { FN(set_hash_invalid), \ FN(get_numa_node_id), \ FN(skb_change_head), \ - FN(xdp_adjust_head), + FN(xdp_adjust_head), \ + FN(probe_read_str), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -509,6 +529,7 @@ enum bpf_func_id { /* BPF_FUNC_l4_csum_replace flags. */ #define BPF_F_PSEUDO_HDR (1ULL << 4) #define BPF_F_MARK_MANGLED_0 (1ULL << 5) +#define BPF_F_MARK_ENFORCE (1ULL << 6) /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ #define BPF_F_INGRESS (1ULL << 0) diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h index 94ffe0c83ce7..fdf75f74fdaf 100644 --- a/include/uapi/linux/can/netlink.h +++ b/include/uapi/linux/can/netlink.h @@ -127,9 +127,16 @@ enum { IFLA_CAN_BERR_COUNTER, IFLA_CAN_DATA_BITTIMING, IFLA_CAN_DATA_BITTIMING_CONST, + IFLA_CAN_TERMINATION, + IFLA_CAN_TERMINATION_CONST, + IFLA_CAN_BITRATE_CONST, + IFLA_CAN_DATA_BITRATE_CONST, __IFLA_CAN_MAX }; #define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1) +/* u16 termination range: 1..65535 Ohms */ +#define CAN_TERMINATION_DISABLED 0 + #endif /* !_UAPI_CAN_NETLINK_H */ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 9014c33d4e77..0f1f3a12e23c 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -57,8 +57,14 @@ enum devlink_command { DEVLINK_CMD_SB_OCC_SNAPSHOT, DEVLINK_CMD_SB_OCC_MAX_CLEAR, - DEVLINK_CMD_ESWITCH_MODE_GET, - DEVLINK_CMD_ESWITCH_MODE_SET, + DEVLINK_CMD_ESWITCH_GET, +#define DEVLINK_CMD_ESWITCH_MODE_GET /* obsolete, never use this! */ \ + DEVLINK_CMD_ESWITCH_GET + + DEVLINK_CMD_ESWITCH_SET, +#define DEVLINK_CMD_ESWITCH_MODE_SET /* obsolete, never use this! */ \ + DEVLINK_CMD_ESWITCH_SET + /* add new commands above here */ __DEVLINK_CMD_MAX, diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index beed138bd359..813afd6eee71 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -63,5 +63,10 @@ #define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */ #define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */ +#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */ +#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */ +#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */ +#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */ + #endif /* _UAPI_LINUX_FCNTL_H */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 36da93fbf188..048a85e9f017 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -132,6 +132,7 @@ struct inodes_stat_t { #define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ /* These sb flags are internal to the kernel */ +#define MS_SUBMOUNT (1<<26) #define MS_NOREMOTELOCK (1<<27) #define MS_NOSEC (1<<28) #define MS_BORN (1<<29) diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h index 1158a043342a..259617a551f2 100644 --- a/include/uapi/linux/if.h +++ b/include/uapi/linux/if.h @@ -24,6 +24,10 @@ #include <linux/socket.h> /* for "struct sockaddr" et al */ #include <linux/compiler.h> /* for "__user" et al */ +#ifndef __KERNEL__ +#include <sys/socket.h> /* for struct sockaddr. */ +#endif + #if __UAPI_DEF_IF_IFNAMSIZ #define IFNAMSIZ 16 #endif /* __UAPI_DEF_IF_IFNAMSIZ */ diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index ab92bca6d448..a9e6244ce438 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -118,6 +118,7 @@ enum { IFLA_BRIDGE_FLAGS, IFLA_BRIDGE_MODE, IFLA_BRIDGE_VLAN_INFO, + IFLA_BRIDGE_VLAN_TUNNEL_INFO, __IFLA_BRIDGE_MAX, }; #define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1) @@ -134,6 +135,16 @@ struct bridge_vlan_info { __u16 vid; }; +enum { + IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC, + IFLA_BRIDGE_VLAN_TUNNEL_ID, + IFLA_BRIDGE_VLAN_TUNNEL_VID, + IFLA_BRIDGE_VLAN_TUNNEL_FLAGS, + __IFLA_BRIDGE_VLAN_TUNNEL_MAX, +}; + +#define IFLA_BRIDGE_VLAN_TUNNEL_MAX (__IFLA_BRIDGE_VLAN_TUNNEL_MAX - 1) + struct bridge_vlan_xstats { __u64 rx_bytes; __u64 rx_packets; diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 3e5185e9ef03..5bc9bfd816b7 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -93,6 +93,7 @@ #define ETH_P_NCSI 0x88F8 /* NCSI protocol */ #define ETH_P_PRP 0x88FB /* IEC 62439-3 PRP/HSRv0 */ #define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ +#define ETH_P_IBOE 0x8915 /* Infiniband over Ethernet */ #define ETH_P_TDLS 0x890D /* TDLS */ #define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */ #define ETH_P_80221 0x8917 /* IEEE 802.21 Media Independent Handover Protocol */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 6b13e591abc9..320fc1e747ee 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -321,6 +321,8 @@ enum { IFLA_BRPORT_MULTICAST_ROUTER, IFLA_BRPORT_PAD, IFLA_BRPORT_MCAST_FLOOD, + IFLA_BRPORT_MCAST_TO_UCAST, + IFLA_BRPORT_VLAN_TUNNEL, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -847,6 +849,7 @@ enum { IFLA_STATS_LINK_XSTATS, IFLA_STATS_LINK_XSTATS_SLAVE, IFLA_STATS_LINK_OFFLOAD_XSTATS, + IFLA_STATS_AF_SPEC, __IFLA_STATS_MAX, }; diff --git a/include/uapi/linux/ife.h b/include/uapi/linux/ife.h new file mode 100644 index 000000000000..2954da32e012 --- /dev/null +++ b/include/uapi/linux/ife.h @@ -0,0 +1,18 @@ +#ifndef __UAPI_IFE_H +#define __UAPI_IFE_H + +#define IFE_METAHDRLEN 2 + +enum { + IFE_META_SKBMARK = 1, + IFE_META_HASHID, + IFE_META_PRIO, + IFE_META_QMAP, + IFE_META_TCINDEX, + __IFE_META_MAX +}; + +/*Can be overridden at runtime by module option*/ +#define IFE_META_MAX (__IFE_META_MAX - 1) + +#endif diff --git a/include/uapi/linux/igmp.h b/include/uapi/linux/igmp.h index ccbb32aa6704..a97f9a7568cf 100644 --- a/include/uapi/linux/igmp.h +++ b/include/uapi/linux/igmp.h @@ -53,7 +53,7 @@ struct igmpv3_grec { struct igmpv3_report { __u8 type; __u8 resv1; - __be16 csum; + __sum16 csum; __be16 resv2; __be16 ngrec; struct igmpv3_grec grec[0]; @@ -62,7 +62,7 @@ struct igmpv3_report { struct igmpv3_query { __u8 type; __u8 code; - __be16 csum; + __sum16 csum; __be32 group; #if defined(__LITTLE_ENDIAN_BITFIELD) __u8 qrv:3, diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h index e54d14a7f876..ffafd6c25a48 100644 --- a/include/uapi/linux/iio/types.h +++ b/include/uapi/linux/iio/types.h @@ -42,6 +42,7 @@ enum iio_chan_type { IIO_ELECTRICALCONDUCTIVITY, IIO_COUNT, IIO_INDEX, + IIO_GRAVITY, }; enum iio_modifier { diff --git a/include/uapi/linux/ip6_tunnel.h b/include/uapi/linux/ip6_tunnel.h index 48af63c9a48d..425926c467d7 100644 --- a/include/uapi/linux/ip6_tunnel.h +++ b/include/uapi/linux/ip6_tunnel.h @@ -2,6 +2,8 @@ #define _IP6_TUNNEL_H #include <linux/types.h> +#include <linux/if.h> /* For IFNAMSIZ. */ +#include <linux/in6.h> /* For struct in6_addr. */ #define IPV6_TLV_TNL_ENCAP_LIMIT 4 #define IPV6_DEFAULT_TNL_ENCAP_LIMIT 4 diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index eaf65dc82e22..8ef9e75e004e 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -182,6 +182,7 @@ enum { DEVCONF_SEG6_ENABLED, DEVCONF_SEG6_REQUIRE_HMAC, DEVCONF_ENHANCED_DAD, + DEVCONF_ADDR_GEN_MODE, DEVCONF_MAX }; diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h index f6598d1c886e..85bbb1799df3 100644 --- a/include/uapi/linux/ipv6_route.h +++ b/include/uapi/linux/ipv6_route.h @@ -14,6 +14,7 @@ #define _UAPI_LINUX_IPV6_ROUTE_H #include <linux/types.h> +#include <linux/in6.h> /* For struct in6_addr. */ #define RTF_DEFAULT 0x00010000 /* default - learned via ND */ #define RTF_ALLONLINK 0x00020000 /* (deprecated and will be removed) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index cac48eda1075..f51d5082a377 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -218,7 +218,8 @@ struct kvm_hyperv_exit { struct kvm_run { /* in */ __u8 request_interrupt_window; - __u8 padding1[7]; + __u8 immediate_exit; + __u8 padding1[6]; /* out */ __u32 exit_reason; @@ -685,6 +686,13 @@ struct kvm_ppc_smmu_info { struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ]; }; +/* for KVM_PPC_RESIZE_HPT_{PREPARE,COMMIT} */ +struct kvm_ppc_resize_hpt { + __u64 flags; + __u32 shift; + __u32 pad; +}; + #define KVMIO 0xAE /* machine type bits, to be used as argument to KVM_CREATE_VM */ @@ -871,6 +879,10 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_S390_USER_INSTR0 130 #define KVM_CAP_MSI_DEVID 131 #define KVM_CAP_PPC_HTM 132 +#define KVM_CAP_SPAPR_RESIZE_HPT 133 +#define KVM_CAP_PPC_MMU_RADIX 134 +#define KVM_CAP_PPC_MMU_HASH_V3 135 +#define KVM_CAP_IMMEDIATE_EXIT 136 #ifdef KVM_CAP_IRQ_ROUTING @@ -1187,6 +1199,13 @@ struct kvm_s390_ucas_mapping { #define KVM_ARM_SET_DEVICE_ADDR _IOW(KVMIO, 0xab, struct kvm_arm_device_addr) /* Available with KVM_CAP_PPC_RTAS */ #define KVM_PPC_RTAS_DEFINE_TOKEN _IOW(KVMIO, 0xac, struct kvm_rtas_token_args) +/* Available with KVM_CAP_SPAPR_RESIZE_HPT */ +#define KVM_PPC_RESIZE_HPT_PREPARE _IOR(KVMIO, 0xad, struct kvm_ppc_resize_hpt) +#define KVM_PPC_RESIZE_HPT_COMMIT _IOR(KVMIO, 0xae, struct kvm_ppc_resize_hpt) +/* Available with KVM_CAP_PPC_RADIX_MMU or KVM_CAP_PPC_HASH_MMU_V3 */ +#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg) +/* Available with KVM_CAP_PPC_RADIX_MMU */ +#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index bf6cd7d5cac2..fed506aeff62 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -14,6 +14,7 @@ #define KVM_EFAULT EFAULT #define KVM_E2BIG E2BIG #define KVM_EPERM EPERM +#define KVM_EOPNOTSUPP 95 #define KVM_HC_VAPIC_POLL_IRQ 1 #define KVM_HC_MMU_OP 2 @@ -23,6 +24,7 @@ #define KVM_HC_MIPS_GET_CLOCK_FREQ 6 #define KVM_HC_MIPS_EXIT_VM 7 #define KVM_HC_MIPS_CONSOLE_OUTPUT 8 +#define KVM_HC_CLOCK_PAIRING 9 /* * hypercalls use architecture specific diff --git a/include/uapi/linux/llc.h b/include/uapi/linux/llc.h index 9c987a402473..a6c17f66ee94 100644 --- a/include/uapi/linux/llc.h +++ b/include/uapi/linux/llc.h @@ -14,6 +14,7 @@ #define _UAPI__LINUX_LLC_H #include <linux/socket.h> +#include <linux/if.h> /* For IFHWADDRLEN. */ #define __LLC_SOCK_SIZE__ 16 /* sizeof(sockaddr_llc), word align. */ struct sockaddr_llc { diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h index 24a6cb1aec86..77a19dfe3990 100644 --- a/include/uapi/linux/mpls.h +++ b/include/uapi/linux/mpls.h @@ -43,4 +43,34 @@ struct mpls_label { #define MPLS_LABEL_FIRST_UNRESERVED 16 /* RFC3032 */ +/* These are embedded into IFLA_STATS_AF_SPEC: + * [IFLA_STATS_AF_SPEC] + * -> [AF_MPLS] + * -> [MPLS_STATS_xxx] + * + * Attributes: + * [MPLS_STATS_LINK] = { + * struct mpls_link_stats + * } + */ +enum { + MPLS_STATS_UNSPEC, /* also used as 64bit pad attribute */ + MPLS_STATS_LINK, + __MPLS_STATS_MAX, +}; + +#define MPLS_STATS_MAX (__MPLS_STATS_MAX - 1) + +struct mpls_link_stats { + __u64 rx_packets; /* total packets received */ + __u64 tx_packets; /* total packets transmitted */ + __u64 rx_bytes; /* total bytes received */ + __u64 tx_bytes; /* total bytes transmitted */ + __u64 rx_errors; /* bad packets received */ + __u64 tx_errors; /* packet transmit problems */ + __u64 rx_dropped; /* packet dropped on receive */ + __u64 tx_dropped; /* packet dropped on transmit */ + __u64 rx_noroute; /* no route for packet dest */ +}; + #endif /* _UAPI_MPLS_H */ diff --git a/include/uapi/linux/mqueue.h b/include/uapi/linux/mqueue.h index d0a2b8e89813..bbd5116ea739 100644 --- a/include/uapi/linux/mqueue.h +++ b/include/uapi/linux/mqueue.h @@ -18,6 +18,8 @@ #ifndef _LINUX_MQUEUE_H #define _LINUX_MQUEUE_H +#include <linux/types.h> + #define MQ_PRIO_MAX 32768 /* per-uid limit of kernel memory used by mqueue, in bytes */ #define MQ_BYTES_MAX 819200 diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h index cf943016930f..1fe4c1e7d66e 100644 --- a/include/uapi/linux/mroute.h +++ b/include/uapi/linux/mroute.h @@ -3,6 +3,7 @@ #include <linux/sockios.h> #include <linux/types.h> +#include <linux/in.h> /* For struct in_addr. */ /* Based on the MROUTING 3.5 defines primarily to keep * source compatibility with BSD. diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h index 5062fb5751e1..ed5721148768 100644 --- a/include/uapi/linux/mroute6.h +++ b/include/uapi/linux/mroute6.h @@ -4,6 +4,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/sockios.h> +#include <linux/in6.h> /* For struct sockaddr_in6. */ /* * Based on the MROUTING 3.5 defines primarily to keep diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index bd99a8d80f36..f3d16dbe09d6 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -26,6 +26,7 @@ enum { NDA_IFINDEX, NDA_MASTER, NDA_LINK_NETNSID, + NDA_SRC_VNI, __NDA_MAX }; diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h index 45dfad509c4d..7e5f0f3e31bf 100644 --- a/include/uapi/linux/netconf.h +++ b/include/uapi/linux/netconf.h @@ -16,6 +16,7 @@ enum { NETCONFA_MC_FORWARDING, NETCONFA_PROXY_NEIGH, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, + NETCONFA_INPUT, __NETCONFA_MAX }; #define NETCONFA_MAX (__NETCONFA_MAX - 1) diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h index 7550e9176a54..c111a91adcc0 100644 --- a/include/uapi/linux/netfilter.h +++ b/include/uapi/linux/netfilter.h @@ -3,7 +3,6 @@ #include <linux/types.h> #include <linux/compiler.h> -#include <linux/sysctl.h> #include <linux/in.h> #include <linux/in6.h> diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index 6d074d14ee27..6a8e33dd4ecb 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -82,6 +82,10 @@ enum ip_conntrack_status { IPS_DYING_BIT = 9, IPS_DYING = (1 << IPS_DYING_BIT), + /* Bits that cannot be altered from userland. */ + IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK | + IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING), + /* Connection has fixed timeout. */ IPS_FIXED_TIMEOUT_BIT = 10, IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT), diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index e3f27e09eb2b..05215d30fe5c 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -207,6 +207,7 @@ enum nft_chain_attributes { * @NFTA_RULE_COMPAT: compatibility specifications of the rule (NLA_NESTED: nft_rule_compat_attributes) * @NFTA_RULE_POSITION: numeric handle of the previous rule (NLA_U64) * @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN) + * @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32) */ enum nft_rule_attributes { NFTA_RULE_UNSPEC, @@ -218,6 +219,7 @@ enum nft_rule_attributes { NFTA_RULE_POSITION, NFTA_RULE_USERDATA, NFTA_RULE_PAD, + NFTA_RULE_ID, __NFTA_RULE_MAX }; #define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1) @@ -704,13 +706,32 @@ enum nft_payload_attributes { }; #define NFTA_PAYLOAD_MAX (__NFTA_PAYLOAD_MAX - 1) +enum nft_exthdr_flags { + NFT_EXTHDR_F_PRESENT = (1 << 0), +}; + +/** + * enum nft_exthdr_op - nf_tables match options + * + * @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers + * @NFT_EXTHDR_OP_TCP: match against tcp options + */ +enum nft_exthdr_op { + NFT_EXTHDR_OP_IPV6, + NFT_EXTHDR_OP_TCPOPT, + __NFT_EXTHDR_OP_MAX +}; +#define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1) + /** - * enum nft_exthdr_attributes - nf_tables IPv6 extension header expression netlink attributes + * enum nft_exthdr_attributes - nf_tables extension header expression netlink attributes * * @NFTA_EXTHDR_DREG: destination register (NLA_U32: nft_registers) * @NFTA_EXTHDR_TYPE: extension header type (NLA_U8) * @NFTA_EXTHDR_OFFSET: extension header offset (NLA_U32) * @NFTA_EXTHDR_LEN: extension header length (NLA_U32) + * @NFTA_EXTHDR_FLAGS: extension header flags (NLA_U32) + * @NFTA_EXTHDR_OP: option match type (NLA_U8) */ enum nft_exthdr_attributes { NFTA_EXTHDR_UNSPEC, @@ -718,6 +739,8 @@ enum nft_exthdr_attributes { NFTA_EXTHDR_TYPE, NFTA_EXTHDR_OFFSET, NFTA_EXTHDR_LEN, + NFTA_EXTHDR_FLAGS, + NFTA_EXTHDR_OP, __NFTA_EXTHDR_MAX }; #define NFTA_EXTHDR_MAX (__NFTA_EXTHDR_MAX - 1) @@ -860,6 +883,11 @@ enum nft_rt_attributes { * @NFT_CT_PROTOCOL: conntrack layer 4 protocol * @NFT_CT_PROTO_SRC: conntrack layer 4 protocol source * @NFT_CT_PROTO_DST: conntrack layer 4 protocol destination + * @NFT_CT_LABELS: conntrack labels + * @NFT_CT_PKTS: conntrack packets + * @NFT_CT_BYTES: conntrack bytes + * @NFT_CT_AVGPKT: conntrack average bytes per packet + * @NFT_CT_ZONE: conntrack zone */ enum nft_ct_keys { NFT_CT_STATE, @@ -878,6 +906,8 @@ enum nft_ct_keys { NFT_CT_LABELS, NFT_CT_PKTS, NFT_CT_BYTES, + NFT_CT_AVGPKT, + NFT_CT_ZONE, }; /** diff --git a/include/uapi/linux/netfilter/nfnetlink.h b/include/uapi/linux/netfilter/nfnetlink.h index 4bb8cb7730e7..a09906a30d77 100644 --- a/include/uapi/linux/netfilter/nfnetlink.h +++ b/include/uapi/linux/netfilter/nfnetlink.h @@ -65,4 +65,16 @@ struct nfgenmsg { #define NFNL_MSG_BATCH_BEGIN NLMSG_MIN_TYPE #define NFNL_MSG_BATCH_END NLMSG_MIN_TYPE+1 +/** + * enum nfnl_batch_attributes - nfnetlink batch netlink attributes + * + * @NFNL_BATCH_GENID: generation ID for this changeset (NLA_U32) + */ +enum nfnl_batch_attributes { + NFNL_BATCH_UNSPEC, + NFNL_BATCH_GENID, + __NFNL_BATCH_MAX +}; +#define NFNL_BATCH_MAX (__NFNL_BATCH_MAX - 1) + #endif /* _UAPI_NFNETLINK_H */ diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h index ae30841ff94e..d42f0396fe30 100644 --- a/include/uapi/linux/netfilter/nfnetlink_queue.h +++ b/include/uapi/linux/netfilter/nfnetlink_queue.h @@ -36,7 +36,7 @@ enum nfqnl_vlan_attr { NFQA_VLAN_TCI, /* __be16 skb htons(vlan_tci) */ __NFQA_VLAN_MAX, }; -#define NFQA_VLAN_MAX (__NFQA_VLAN_MAX + 1) +#define NFQA_VLAN_MAX (__NFQA_VLAN_MAX - 1) enum nfqnl_attr_type { NFQA_UNSPEC, diff --git a/include/uapi/linux/netfilter/xt_hashlimit.h b/include/uapi/linux/netfilter/xt_hashlimit.h index 3efc0ca18345..79da349f1060 100644 --- a/include/uapi/linux/netfilter/xt_hashlimit.h +++ b/include/uapi/linux/netfilter/xt_hashlimit.h @@ -2,6 +2,7 @@ #define _UAPI_XT_HASHLIMIT_H #include <linux/types.h> +#include <linux/limits.h> #include <linux/if.h> /* timings are in milliseconds. */ diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 0dba4e4ed2be..f3946a27bd07 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -27,6 +27,7 @@ #define NETLINK_ECRYPTFS 19 #define NETLINK_RDMA 20 #define NETLINK_CRYPTO 21 /* Crypto layer */ +#define NETLINK_SMC 22 /* SMC monitoring */ #define NETLINK_INET_DIAG NETLINK_SOCK_DIAG diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h index 0df7bd5d2fb1..c3be256107c6 100644 --- a/include/uapi/linux/nfsd/export.h +++ b/include/uapi/linux/nfsd/export.h @@ -32,7 +32,8 @@ #define NFSEXP_ASYNC 0x0010 #define NFSEXP_GATHERED_WRITES 0x0020 #define NFSEXP_NOREADDIRPLUS 0x0040 -/* 80 100 currently unused */ +#define NFSEXP_SECURITY_LABEL 0x0080 +/* 0x100 currently unused */ #define NFSEXP_NOHIDE 0x0200 #define NFSEXP_NOSUBTREECHECK 0x0400 #define NFSEXP_NOAUTHNLM 0x0800 /* Don't authenticate NLM requests - just trust */ @@ -53,7 +54,7 @@ #define NFSEXP_PNFS 0x20000 /* All flags that we claim to support. (Note we don't support NOACL.) */ -#define NFSEXP_ALLFLAGS 0x3FE7F +#define NFSEXP_ALLFLAGS 0x3FEFF /* The flags that may vary depending on security flavor: */ #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index bea982af9cfb..5ed257c4cd4e 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -10,7 +10,7 @@ * Copyright 2008, 2009 Luis R. Rodriguez <lrodriguez@atheros.com> * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com> * Copyright 2008 Colin McCabe <colin@cozybit.com> - * Copyright 2015 Intel Deutschland GmbH + * Copyright 2015-2017 Intel Deutschland GmbH * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -854,12 +854,15 @@ * cfg80211_scan_done(). * * @NL80211_CMD_START_NAN: Start NAN operation, identified by its - * %NL80211_ATTR_WDEV interface. This interface must have been previously - * created with %NL80211_CMD_NEW_INTERFACE. After it has been started, the - * NAN interface will create or join a cluster. This command must have a - * valid %NL80211_ATTR_NAN_MASTER_PREF attribute and optional - * %NL80211_ATTR_NAN_DUAL attributes. - * After this command NAN functions can be added. + * %NL80211_ATTR_WDEV interface. This interface must have been + * previously created with %NL80211_CMD_NEW_INTERFACE. After it + * has been started, the NAN interface will create or join a + * cluster. This command must have a valid + * %NL80211_ATTR_NAN_MASTER_PREF attribute and optional + * %NL80211_ATTR_BANDS attributes. If %NL80211_ATTR_BANDS is + * omitted or set to 0, it means don't-care and the device will + * decide what to use. After this command NAN functions can be + * added. * @NL80211_CMD_STOP_NAN: Stop the NAN operation, identified by * its %NL80211_ATTR_WDEV interface. * @NL80211_CMD_ADD_NAN_FUNCTION: Add a NAN function. The function is defined @@ -880,10 +883,14 @@ * This command is also used as a notification sent when a NAN function is * terminated. This will contain a %NL80211_ATTR_NAN_FUNC_INST_ID * and %NL80211_ATTR_COOKIE attributes. - * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN configuration. NAN - * must be operational (%NL80211_CMD_START_NAN was executed). - * It must contain at least one of the following attributes: - * %NL80211_ATTR_NAN_MASTER_PREF, %NL80211_ATTR_NAN_DUAL. + * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN + * configuration. NAN must be operational (%NL80211_CMD_START_NAN + * was executed). It must contain at least one of the following + * attributes: %NL80211_ATTR_NAN_MASTER_PREF, + * %NL80211_ATTR_BANDS. If %NL80211_ATTR_BANDS is omitted, the + * current configuration is not changed. If it is present but + * set to zero, the configuration is changed to don't-care + * (i.e. the device can decide what to do). * @NL80211_CMD_NAN_FUNC_MATCH: Notification sent when a match is reported. * This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and * %NL80211_ATTR_COOKIE. @@ -1822,6 +1829,8 @@ enum nl80211_commands { * and remove functions. NAN notifications will be sent in unicast to that * socket. Without this attribute, any socket can add functions and the * notifications will be sent to the %NL80211_MCGRP_NAN multicast group. + * If set during %NL80211_CMD_ASSOCIATE or %NL80211_CMD_CONNECT the + * station will deauthenticate when the socket is closed. * * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is * the TDLS link initiator. @@ -1961,10 +1970,13 @@ enum nl80211_commands { * %NL80211_CMD_CHANGE_NAN_CONFIG. Its type is u8 and it can't be 0. * Also, values 1 and 255 are reserved for certification purposes and * should not be used during a normal device operation. - * @NL80211_ATTR_NAN_DUAL: NAN dual band operation config (see - * &enum nl80211_nan_dual_band_conf). This attribute is used with - * %NL80211_CMD_START_NAN and optionally with - * %NL80211_CMD_CHANGE_NAN_CONFIG. + * @NL80211_ATTR_BANDS: operating bands configuration. This is a u32 + * bitmask of BIT(NL80211_BAND_*) as described in %enum + * nl80211_band. For instance, for NL80211_BAND_2GHZ, bit 0 + * would be set. This attribute is used with + * %NL80211_CMD_START_NAN and %NL80211_CMD_CHANGE_NAN_CONFIG, and + * it is optional. If no bands are set, it means don't-care and + * the device will decide what to use. * @NL80211_ATTR_NAN_FUNC: a function that can be added to NAN. See * &enum nl80211_nan_func_attributes for description of this nested * attribute. @@ -1982,6 +1994,24 @@ enum nl80211_commands { * @NL80211_ATTR_BSSID: The BSSID of the AP. Note that %NL80211_ATTR_MAC is also * used in various commands/events for specifying the BSSID. * + * @NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI: Relative RSSI threshold by which + * other BSSs has to be better or slightly worse than the current + * connected BSS so that they get reported to user space. + * This will give an opportunity to userspace to consider connecting to + * other matching BSSs which have better or slightly worse RSSI than + * the current connected BSS by using an offloaded operation to avoid + * unnecessary wakeups. + * + * @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in + * the specified band is to be adjusted before doing + * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparision to figure out + * better BSSs. The attribute value is a packed structure + * value as specified by &struct nl80211_bss_select_rssi_adjust. + * + * @NL80211_ATTR_TIMEOUT_REASON: The reason for which an operation timed out. + * u32 attribute with an &enum nl80211_timeout_reason value. This is used, + * e.g., with %NL80211_CMD_CONNECT event. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2377,7 +2407,7 @@ enum nl80211_attrs { NL80211_ATTR_MESH_PEER_AID, NL80211_ATTR_NAN_MASTER_PREF, - NL80211_ATTR_NAN_DUAL, + NL80211_ATTR_BANDS, NL80211_ATTR_NAN_FUNC, NL80211_ATTR_NAN_MATCH, @@ -2388,6 +2418,11 @@ enum nl80211_attrs { NL80211_ATTR_BSSID, + NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI, + NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST, + + NL80211_ATTR_TIMEOUT_REASON, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3080,6 +3115,13 @@ enum nl80211_reg_rule_attr { * how this API was implemented in the past. Also, due to the same problem, * the only way to create a matchset with only an RSSI filter (with this * attribute) is if there's only a single matchset with the RSSI attribute. + * @NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI: Flag indicating whether + * %NL80211_SCHED_SCAN_MATCH_ATTR_RSSI to be used as absolute RSSI or + * relative to current bss's RSSI. + * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST: When present the RSSI level for + * BSS-es in the specified band is to be adjusted before doing + * RSSI-based BSS selection. The attribute value is a packed structure + * value as specified by &struct nl80211_bss_select_rssi_adjust. * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter * attribute number currently defined * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use @@ -3089,6 +3131,8 @@ enum nl80211_sched_scan_match_attr { NL80211_SCHED_SCAN_MATCH_ATTR_SSID, NL80211_SCHED_SCAN_MATCH_ATTR_RSSI, + NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI, + NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST, /* keep last */ __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST, @@ -3918,6 +3962,8 @@ enum nl80211_ps_state { * %NL80211_CMD_NOTIFY_CQM. Set to 0 to turn off TX error reporting. * @NL80211_ATTR_CQM_BEACON_LOSS_EVENT: flag attribute that's set in a beacon * loss event + * @NL80211_ATTR_CQM_RSSI_LEVEL: the RSSI value in dBm that triggered the + * RSSI threshold event. * @__NL80211_ATTR_CQM_AFTER_LAST: internal * @NL80211_ATTR_CQM_MAX: highest key attribute */ @@ -3931,6 +3977,7 @@ enum nl80211_attr_cqm { NL80211_ATTR_CQM_TXE_PKTS, NL80211_ATTR_CQM_TXE_INTVL, NL80211_ATTR_CQM_BEACON_LOSS_EVENT, + NL80211_ATTR_CQM_RSSI_LEVEL, /* keep last */ __NL80211_ATTR_CQM_AFTER_LAST, @@ -4699,6 +4746,13 @@ enum nl80211_feature_flags { * configuration (AP/mesh) with VHT rates. * @NL80211_EXT_FEATURE_FILS_STA: This driver supports Fast Initial Link Setup * with user space SME (NL80211_CMD_AUTHENTICATE) in station mode. + * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA: This driver supports randomized TA + * in @NL80211_CMD_FRAME while not associated. + * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED: This driver supports + * randomized TA in @NL80211_CMD_FRAME while associated. + * @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan + * for reporting BSSs with better RSSI than the current connected BSS + * (%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI). * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. @@ -4714,6 +4768,9 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_BEACON_RATE_HT, NL80211_EXT_FEATURE_BEACON_RATE_VHT, NL80211_EXT_FEATURE_FILS_STA, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED, + NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -4753,6 +4810,21 @@ enum nl80211_connect_failed_reason { }; /** + * enum nl80211_timeout_reason - timeout reasons + * + * @NL80211_TIMEOUT_UNSPECIFIED: Timeout reason unspecified. + * @NL80211_TIMEOUT_SCAN: Scan (AP discovery) timed out. + * @NL80211_TIMEOUT_AUTH: Authentication timed out. + * @NL80211_TIMEOUT_ASSOC: Association timed out. + */ +enum nl80211_timeout_reason { + NL80211_TIMEOUT_UNSPECIFIED, + NL80211_TIMEOUT_SCAN, + NL80211_TIMEOUT_AUTH, + NL80211_TIMEOUT_ASSOC, +}; + +/** * enum nl80211_scan_flags - scan request control flags * * Scan request control flags are used to control the handling @@ -4966,8 +5038,9 @@ enum nl80211_sched_scan_plan { /** * struct nl80211_bss_select_rssi_adjust - RSSI adjustment parameters. * - * @band: band of BSS that must match for RSSI value adjustment. - * @delta: value used to adjust the RSSI value of matching BSS. + * @band: band of BSS that must match for RSSI value adjustment. The value + * of this field is according to &enum nl80211_band. + * @delta: value used to adjust the RSSI value of matching BSS in dB. */ struct nl80211_bss_select_rssi_adjust { __u8 band; @@ -5008,21 +5081,6 @@ enum nl80211_bss_select_attr { }; /** - * enum nl80211_nan_dual_band_conf - NAN dual band configuration - * - * Defines the NAN dual band mode of operation - * - * @NL80211_NAN_BAND_DEFAULT: device default mode - * @NL80211_NAN_BAND_2GHZ: 2.4GHz mode - * @NL80211_NAN_BAND_5GHZ: 5GHz mode - */ -enum nl80211_nan_dual_band_conf { - NL80211_NAN_BAND_DEFAULT = 1 << 0, - NL80211_NAN_BAND_2GHZ = 1 << 1, - NL80211_NAN_BAND_5GHZ = 1 << 2, -}; - -/** * enum nl80211_nan_function_type - NAN function type * * Defines the function type of a NAN function diff --git a/include/uapi/linux/nsfs.h b/include/uapi/linux/nsfs.h index 3af617230d1b..1a3ca79f466b 100644 --- a/include/uapi/linux/nsfs.h +++ b/include/uapi/linux/nsfs.h @@ -6,8 +6,13 @@ #define NSIO 0xb7 /* Returns a file descriptor that refers to an owning user namespace */ -#define NS_GET_USERNS _IO(NSIO, 0x1) +#define NS_GET_USERNS _IO(NSIO, 0x1) /* Returns a file descriptor that refers to a parent namespace */ -#define NS_GET_PARENT _IO(NSIO, 0x2) +#define NS_GET_PARENT _IO(NSIO, 0x2) +/* Returns the type of namespace (CLONE_NEW* value) referred to by + file descriptor */ +#define NS_GET_NSTYPE _IO(NSIO, 0x3) +/* Get owner UID (in the caller's user namespace) for a user namespace */ +#define NS_GET_OWNER_UID _IO(NSIO, 0x4) #endif /* __LINUX_NSFS_H */ diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 375d812fea36..7f41f7d0000f 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -1,6 +1,6 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2017 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -331,6 +331,8 @@ enum ovs_key_attr { OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */ OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */ OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */ + OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, /* struct ovs_key_ct_tuple_ipv4 */ + OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, /* struct ovs_key_ct_tuple_ipv6 */ #ifdef __KERNEL__ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */ @@ -446,9 +448,13 @@ struct ovs_key_nd { __u8 nd_tll[ETH_ALEN]; }; -#define OVS_CT_LABELS_LEN 16 +#define OVS_CT_LABELS_LEN_32 4 +#define OVS_CT_LABELS_LEN (OVS_CT_LABELS_LEN_32 * sizeof(__u32)) struct ovs_key_ct_labels { - __u8 ct_labels[OVS_CT_LABELS_LEN]; + union { + __u8 ct_labels[OVS_CT_LABELS_LEN]; + __u32 ct_labels_32[OVS_CT_LABELS_LEN_32]; + }; }; /* OVS_KEY_ATTR_CT_STATE flags */ @@ -468,6 +474,22 @@ struct ovs_key_ct_labels { #define OVS_CS_F_NAT_MASK (OVS_CS_F_SRC_NAT | OVS_CS_F_DST_NAT) +struct ovs_key_ct_tuple_ipv4 { + __be32 ipv4_src; + __be32 ipv4_dst; + __be16 src_port; + __be16 dst_port; + __u8 ipv4_proto; +}; + +struct ovs_key_ct_tuple_ipv6 { + __be32 ipv6_src[4]; + __be32 ipv6_dst[4]; + __be16 src_port; + __be16 dst_port; + __u8 ipv6_proto; +}; + /** * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow @@ -652,6 +674,10 @@ struct ovs_action_hash { * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. * @OVS_CT_ATTR_NAT: Nested OVS_NAT_ATTR_* for performing L3 network address * translation (NAT) on the packet. + * @OVS_CT_ATTR_FORCE_COMMIT: Like %OVS_CT_ATTR_COMMIT, but instead of doing + * nothing if the connection is already committed will check that the current + * packet is in conntrack entry's original direction. If directionality does + * not match, will delete the existing conntrack entry and commit a new one. */ enum ovs_ct_attr { OVS_CT_ATTR_UNSPEC, @@ -662,6 +688,7 @@ enum ovs_ct_attr { OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of related connections. */ OVS_CT_ATTR_NAT, /* Nested OVS_NAT_ATTR_* */ + OVS_CT_ATTR_FORCE_COMMIT, /* No argument */ __OVS_CT_ATTR_MAX }; diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h index d08c63f3dd6f..0c5d5dd61b6a 100644 --- a/include/uapi/linux/packet_diag.h +++ b/include/uapi/linux/packet_diag.h @@ -64,7 +64,7 @@ struct packet_diag_mclist { __u32 pdmc_count; __u16 pdmc_type; __u16 pdmc_alen; - __u8 pdmc_addr[MAX_ADDR_LEN]; + __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */ }; struct packet_diag_ring { diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 174d1147081b..634c9c44ed6c 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -682,6 +682,7 @@ #define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */ #define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */ #define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */ +#define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */ #define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ #define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM @@ -973,6 +974,7 @@ #define PCI_EXP_DPC_STATUS 8 /* DPC Status */ #define PCI_EXP_DPC_STATUS_TRIGGER 0x01 /* Trigger Status */ #define PCI_EXP_DPC_STATUS_INTERRUPT 0x08 /* Interrupt Status */ +#define PCI_EXP_DPC_RP_BUSY 0x10 /* Root Port Busy */ #define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */ @@ -985,4 +987,19 @@ #define PCI_PTM_CTRL_ENABLE 0x00000001 /* PTM enable */ #define PCI_PTM_CTRL_ROOT 0x00000002 /* Root select */ +/* L1 PM Substates */ +#define PCI_L1SS_CAP 4 /* capability register */ +#define PCI_L1SS_CAP_PCIPM_L1_2 1 /* PCI PM L1.2 Support */ +#define PCI_L1SS_CAP_PCIPM_L1_1 2 /* PCI PM L1.1 Support */ +#define PCI_L1SS_CAP_ASPM_L1_2 4 /* ASPM L1.2 Support */ +#define PCI_L1SS_CAP_ASPM_L1_1 8 /* ASPM L1.1 Support */ +#define PCI_L1SS_CAP_L1_PM_SS 16 /* L1 PM Substates Support */ +#define PCI_L1SS_CTL1 8 /* Control Register 1 */ +#define PCI_L1SS_CTL1_PCIPM_L1_2 1 /* PCI PM L1.2 Enable */ +#define PCI_L1SS_CTL1_PCIPM_L1_1 2 /* PCI PM L1.1 Support */ +#define PCI_L1SS_CTL1_ASPM_L1_2 4 /* ASPM L1.2 Support */ +#define PCI_L1SS_CTL1_ASPM_L1_1 8 /* ASPM L1.1 Support */ +#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000F +#define PCI_L1SS_CTL2 0xC /* Control Register 2 */ + #endif /* LINUX_PCI_REGS_H */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index a4dcd88ec271..7a69f2a4ca0c 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -4,60 +4,7 @@ #include <linux/types.h> #include <linux/pkt_sched.h> -#ifdef __KERNEL__ -/* I think i could have done better macros ; for now this is stolen from - * some arch/mips code - jhs -*/ -#define _TC_MAKE32(x) ((x)) - -#define _TC_MAKEMASK1(n) (_TC_MAKE32(1) << _TC_MAKE32(n)) -#define _TC_MAKEMASK(v,n) (_TC_MAKE32((_TC_MAKE32(1)<<(v))-1) << _TC_MAKE32(n)) -#define _TC_MAKEVALUE(v,n) (_TC_MAKE32(v) << _TC_MAKE32(n)) -#define _TC_GETVALUE(v,n,m) ((_TC_MAKE32(v) & _TC_MAKE32(m)) >> _TC_MAKE32(n)) - -/* verdict bit breakdown - * -bit 0: when set -> this packet has been munged already - -bit 1: when set -> It is ok to munge this packet - -bit 2,3,4,5: Reclassify counter - sort of reverse TTL - if exceeded -assume loop - -bit 6,7: Where this packet was last seen -0: Above the transmit example at the socket level -1: on the Ingress -2: on the Egress - -bit 8: when set --> Request not to classify on ingress. - -bits 9,10,11: redirect counter - redirect TTL. Loop avoidance - - * - * */ - -#define S_TC_FROM _TC_MAKE32(6) -#define M_TC_FROM _TC_MAKEMASK(2,S_TC_FROM) -#define G_TC_FROM(x) _TC_GETVALUE(x,S_TC_FROM,M_TC_FROM) -#define V_TC_FROM(x) _TC_MAKEVALUE(x,S_TC_FROM) -#define SET_TC_FROM(v,n) ((V_TC_FROM(n)) | (v & ~M_TC_FROM)) -#define AT_STACK 0x0 -#define AT_INGRESS 0x1 -#define AT_EGRESS 0x2 - -#define TC_NCLS _TC_MAKEMASK1(8) -#define SET_TC_NCLS(v) ( TC_NCLS | (v & ~TC_NCLS)) -#define CLR_TC_NCLS(v) ( v & ~TC_NCLS) - -#define S_TC_AT _TC_MAKE32(12) -#define M_TC_AT _TC_MAKEMASK(2,S_TC_AT) -#define G_TC_AT(x) _TC_GETVALUE(x,S_TC_AT,M_TC_AT) -#define V_TC_AT(x) _TC_MAKEVALUE(x,S_TC_AT) -#define SET_TC_AT(v,n) ((V_TC_AT(n)) | (v & ~M_TC_AT)) - -#define MAX_REC_LOOP 4 -#define MAX_RED_LOOP 4 -#endif +#define TC_COOKIE_MAX_SIZE 16 /* Action attributes */ enum { @@ -67,6 +14,7 @@ enum { TCA_ACT_INDEX, TCA_ACT_STATS, TCA_ACT_PAD, + TCA_ACT_COOKIE, __TCA_ACT_MAX }; @@ -155,8 +103,10 @@ enum { #define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1) /* tca flags definitions */ -#define TCA_CLS_FLAGS_SKIP_HW (1 << 0) -#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) +#define TCA_CLS_FLAGS_SKIP_HW (1 << 0) /* don't offload filter to HW */ +#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) /* don't use filter in SW */ +#define TCA_CLS_FLAGS_IN_HW (1 << 2) /* filter is offloaded to HW */ +#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */ /* U32 filters */ @@ -471,6 +421,17 @@ enum { TCA_FLOWER_KEY_ICMPV6_TYPE, /* u8 */ TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */ + TCA_FLOWER_KEY_ARP_SIP, /* be32 */ + TCA_FLOWER_KEY_ARP_SIP_MASK, /* be32 */ + TCA_FLOWER_KEY_ARP_TIP, /* be32 */ + TCA_FLOWER_KEY_ARP_TIP_MASK, /* be32 */ + TCA_FLOWER_KEY_ARP_OP, /* u8 */ + TCA_FLOWER_KEY_ARP_OP_MASK, /* u8 */ + TCA_FLOWER_KEY_ARP_SHA, /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_SHA_MASK, /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_THA, /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_THA_MASK, /* ETH_ALEN */ + __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h new file mode 100644 index 000000000000..ed48996ec0e8 --- /dev/null +++ b/include/uapi/linux/psample.h @@ -0,0 +1,35 @@ +#ifndef __UAPI_PSAMPLE_H +#define __UAPI_PSAMPLE_H + +enum { + /* sampled packet metadata */ + PSAMPLE_ATTR_IIFINDEX, + PSAMPLE_ATTR_OIFINDEX, + PSAMPLE_ATTR_ORIGSIZE, + PSAMPLE_ATTR_SAMPLE_GROUP, + PSAMPLE_ATTR_GROUP_SEQ, + PSAMPLE_ATTR_SAMPLE_RATE, + PSAMPLE_ATTR_DATA, + + /* commands attributes */ + PSAMPLE_ATTR_GROUP_REFCOUNT, + + __PSAMPLE_ATTR_MAX +}; + +enum psample_command { + PSAMPLE_CMD_SAMPLE, + PSAMPLE_CMD_GET_GROUP, + PSAMPLE_CMD_NEW_GROUP, + PSAMPLE_CMD_DEL_GROUP, +}; + +/* Can be overridden at runtime by module option */ +#define PSAMPLE_ATTR_MAX (__PSAMPLE_ATTR_MAX - 1) + +#define PSAMPLE_NL_MCGRP_CONFIG_NAME "config" +#define PSAMPLE_NL_MCGRP_SAMPLE_NAME "packets" +#define PSAMPLE_GENL_NAME "psample" +#define PSAMPLE_GENL_VERSION 1 + +#endif diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 0f9265cb2a96..198892b95f09 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -35,6 +35,7 @@ #define _LINUX_RDS_H #include <linux/types.h> +#include <linux/socket.h> /* For __kernel_sockaddr_storage. */ #define RDS_IB_ABI_VERSION 0x301 @@ -52,6 +53,13 @@ #define RDS_GET_MR_FOR_DEST 7 #define SO_RDS_TRANSPORT 8 +/* Socket option to tap receive path latency + * SO_RDS: SO_RDS_MSG_RXPATH_LATENCY + * Format used struct rds_rx_trace_so + */ +#define SO_RDS_MSG_RXPATH_LATENCY 10 + + /* supported values for SO_RDS_TRANSPORT */ #define RDS_TRANS_IB 0 #define RDS_TRANS_IWARP 1 @@ -77,6 +85,12 @@ * the same as for the GET_MR setsockopt. * RDS_CMSG_RDMA_STATUS (recvmsg) * Returns the status of a completed RDMA operation. + * RDS_CMSG_RXPATH_LATENCY(recvmsg) + * Returns rds message latencies in various stages of receive + * path in nS. Its set per socket using SO_RDS_MSG_RXPATH_LATENCY + * socket option. Legitimate points are defined in + * enum rds_message_rxpath_latency. More points can be added in + * future. CSMG format is struct rds_cmsg_rx_trace. */ #define RDS_CMSG_RDMA_ARGS 1 #define RDS_CMSG_RDMA_DEST 2 @@ -87,6 +101,7 @@ #define RDS_CMSG_ATOMIC_CSWP 7 #define RDS_CMSG_MASKED_ATOMIC_FADD 8 #define RDS_CMSG_MASKED_ATOMIC_CSWP 9 +#define RDS_CMSG_RXPATH_LATENCY 11 #define RDS_INFO_FIRST 10000 #define RDS_INFO_COUNTERS 10000 @@ -103,8 +118,8 @@ #define RDS_INFO_LAST 10010 struct rds_info_counter { - uint8_t name[32]; - uint64_t value; + __u8 name[32]; + __u64 value; } __attribute__((packed)); #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 @@ -114,35 +129,35 @@ struct rds_info_counter { #define TRANSNAMSIZ 16 struct rds_info_connection { - uint64_t next_tx_seq; - uint64_t next_rx_seq; + __u64 next_tx_seq; + __u64 next_rx_seq; __be32 laddr; __be32 faddr; - uint8_t transport[TRANSNAMSIZ]; /* null term ascii */ - uint8_t flags; + __u8 transport[TRANSNAMSIZ]; /* null term ascii */ + __u8 flags; } __attribute__((packed)); #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 struct rds_info_message { - uint64_t seq; - uint32_t len; + __u64 seq; + __u32 len; __be32 laddr; __be32 faddr; __be16 lport; __be16 fport; - uint8_t flags; + __u8 flags; } __attribute__((packed)); struct rds_info_socket { - uint32_t sndbuf; + __u32 sndbuf; __be32 bound_addr; __be32 connected_addr; __be16 bound_port; __be16 connected_port; - uint32_t rcvbuf; - uint64_t inum; + __u32 rcvbuf; + __u64 inum; } __attribute__((packed)); struct rds_info_tcp_socket { @@ -150,25 +165,44 @@ struct rds_info_tcp_socket { __be16 local_port; __be32 peer_addr; __be16 peer_port; - uint64_t hdr_rem; - uint64_t data_rem; - uint32_t last_sent_nxt; - uint32_t last_expected_una; - uint32_t last_seen_una; + __u64 hdr_rem; + __u64 data_rem; + __u32 last_sent_nxt; + __u32 last_expected_una; + __u32 last_seen_una; } __attribute__((packed)); #define RDS_IB_GID_LEN 16 struct rds_info_rdma_connection { __be32 src_addr; __be32 dst_addr; - uint8_t src_gid[RDS_IB_GID_LEN]; - uint8_t dst_gid[RDS_IB_GID_LEN]; - - uint32_t max_send_wr; - uint32_t max_recv_wr; - uint32_t max_send_sge; - uint32_t rdma_mr_max; - uint32_t rdma_mr_size; + __u8 src_gid[RDS_IB_GID_LEN]; + __u8 dst_gid[RDS_IB_GID_LEN]; + + __u32 max_send_wr; + __u32 max_recv_wr; + __u32 max_send_sge; + __u32 rdma_mr_max; + __u32 rdma_mr_size; +}; + +/* RDS message Receive Path Latency points */ +enum rds_message_rxpath_latency { + RDS_MSG_RX_HDR_TO_DGRAM_START = 0, + RDS_MSG_RX_DGRAM_REASSEMBLE, + RDS_MSG_RX_DGRAM_DELIVERED, + RDS_MSG_RX_DGRAM_TRACE_MAX +}; + +struct rds_rx_trace_so { + __u8 rx_traces; + __u8 rx_trace_pos[RDS_MSG_RX_DGRAM_TRACE_MAX]; +}; + +struct rds_cmsg_rx_trace { + __u8 rx_traces; + __u8 rx_trace_pos[RDS_MSG_RX_DGRAM_TRACE_MAX]; + __u64 rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX]; }; /* @@ -209,70 +243,70 @@ struct rds_info_rdma_connection { * (so that the application does not have to worry about * alignment). */ -typedef uint64_t rds_rdma_cookie_t; +typedef __u64 rds_rdma_cookie_t; struct rds_iovec { - uint64_t addr; - uint64_t bytes; + __u64 addr; + __u64 bytes; }; struct rds_get_mr_args { struct rds_iovec vec; - uint64_t cookie_addr; - uint64_t flags; + __u64 cookie_addr; + __u64 flags; }; struct rds_get_mr_for_dest_args { - struct sockaddr_storage dest_addr; + struct __kernel_sockaddr_storage dest_addr; struct rds_iovec vec; - uint64_t cookie_addr; - uint64_t flags; + __u64 cookie_addr; + __u64 flags; }; struct rds_free_mr_args { rds_rdma_cookie_t cookie; - uint64_t flags; + __u64 flags; }; struct rds_rdma_args { rds_rdma_cookie_t cookie; struct rds_iovec remote_vec; - uint64_t local_vec_addr; - uint64_t nr_local; - uint64_t flags; - uint64_t user_token; + __u64 local_vec_addr; + __u64 nr_local; + __u64 flags; + __u64 user_token; }; struct rds_atomic_args { rds_rdma_cookie_t cookie; - uint64_t local_addr; - uint64_t remote_addr; + __u64 local_addr; + __u64 remote_addr; union { struct { - uint64_t compare; - uint64_t swap; + __u64 compare; + __u64 swap; } cswp; struct { - uint64_t add; + __u64 add; } fadd; struct { - uint64_t compare; - uint64_t swap; - uint64_t compare_mask; - uint64_t swap_mask; + __u64 compare; + __u64 swap; + __u64 compare_mask; + __u64 swap_mask; } m_cswp; struct { - uint64_t add; - uint64_t nocarry_mask; + __u64 add; + __u64 nocarry_mask; } m_fadd; }; - uint64_t flags; - uint64_t user_token; + __u64 flags; + __u64 user_token; }; struct rds_rdma_notify { - uint64_t user_token; - int32_t status; + __u64 user_token; + __s32 status; }; #define RDS_RDMA_SUCCESS 0 diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h new file mode 100644 index 000000000000..dedc226e0d3f --- /dev/null +++ b/include/uapi/linux/rpmsg.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2016, Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _UAPI_RPMSG_H_ +#define _UAPI_RPMSG_H_ + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct rpmsg_endpoint_info - endpoint info representation + * @name: name of service + * @src: local address + * @dst: destination address + */ +struct rpmsg_endpoint_info { + char name[32]; + __u32 src; + __u32 dst; +}; + +#define RPMSG_CREATE_EPT_IOCTL _IOW(0xb5, 0x1, struct rpmsg_endpoint_info) +#define RPMSG_DESTROY_EPT_IOCTL _IO(0xb5, 0x2) + +#endif diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index e14377f2ec27..6546917d605a 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -350,6 +350,7 @@ struct rtnexthop { #define RTNH_F_ONLINK 4 /* Gateway is forced on link */ #define RTNH_F_OFFLOAD 8 /* offloaded route */ #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */ +#define RTNH_F_UNRESOLVED 32 /* The entry is unresolved (ipmr) */ #define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD) @@ -657,6 +658,8 @@ enum rtnetlink_groups { #define RTNLGRP_MPLS_ROUTE RTNLGRP_MPLS_ROUTE RTNLGRP_NSID, #define RTNLGRP_NSID RTNLGRP_NSID + RTNLGRP_MPLS_NETCONF, +#define RTNLGRP_MPLS_NETCONF RTNLGRP_MPLS_NETCONF __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h new file mode 100644 index 000000000000..307acbc82d80 --- /dev/null +++ b/include/uapi/linux/sched/types.h @@ -0,0 +1,74 @@ +#ifndef _UAPI_LINUX_SCHED_TYPES_H +#define _UAPI_LINUX_SCHED_TYPES_H + +#include <linux/types.h> + +struct sched_param { + int sched_priority; +}; + +#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ + +/* + * Extended scheduling parameters data structure. + * + * This is needed because the original struct sched_param can not be + * altered without introducing ABI issues with legacy applications + * (e.g., in sched_getparam()). + * + * However, the possibility of specifying more than just a priority for + * the tasks may be useful for a wide variety of application fields, e.g., + * multimedia, streaming, automation and control, and many others. + * + * This variant (sched_attr) is meant at describing a so-called + * sporadic time-constrained task. In such model a task is specified by: + * - the activation period or minimum instance inter-arrival time; + * - the maximum (or average, depending on the actual scheduling + * discipline) computation time of all instances, a.k.a. runtime; + * - the deadline (relative to the actual activation time) of each + * instance. + * Very briefly, a periodic (sporadic) task asks for the execution of + * some specific computation --which is typically called an instance-- + * (at most) every period. Moreover, each instance typically lasts no more + * than the runtime and must be completed by time instant t equal to + * the instance activation time + the deadline. + * + * This is reflected by the actual fields of the sched_attr structure: + * + * @size size of the structure, for fwd/bwd compat. + * + * @sched_policy task's scheduling policy + * @sched_flags for customizing the scheduler behaviour + * @sched_nice task's nice value (SCHED_NORMAL/BATCH) + * @sched_priority task's static priority (SCHED_FIFO/RR) + * @sched_deadline representative of the task's deadline + * @sched_runtime representative of the task's runtime + * @sched_period representative of the task's period + * + * Given this task model, there are a multiplicity of scheduling algorithms + * and policies, that can be used to ensure all the tasks will make their + * timing constraints. + * + * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the + * only user of this new interface. More information about the algorithm + * available in the scheduling class file or in Documentation/. + */ +struct sched_attr { + u32 size; + + u32 sched_policy; + u64 sched_flags; + + /* SCHED_NORMAL, SCHED_BATCH */ + s32 sched_nice; + + /* SCHED_FIFO, SCHED_RR */ + u32 sched_priority; + + /* SCHED_DEADLINE */ + u64 sched_runtime; + u64 sched_deadline; + u64 sched_period; +}; + +#endif /* _UAPI_LINUX_SCHED_TYPES_H */ diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index a406adcc0793..d3ae381fcf33 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -115,6 +115,10 @@ typedef __s32 sctp_assoc_t; #define SCTP_PR_SUPPORTED 113 #define SCTP_DEFAULT_PRINFO 114 #define SCTP_PR_ASSOC_STATUS 115 +#define SCTP_ENABLE_STREAM_RESET 118 +#define SCTP_RESET_STREAMS 119 +#define SCTP_RESET_ASSOC 120 +#define SCTP_ADD_STREAMS 121 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 @@ -138,6 +142,15 @@ typedef __s32 sctp_assoc_t; #define SCTP_PR_RTX_ENABLED(x) (SCTP_PR_POLICY(x) == SCTP_PR_SCTP_RTX) #define SCTP_PR_PRIO_ENABLED(x) (SCTP_PR_POLICY(x) == SCTP_PR_SCTP_PRIO) +/* For enable stream reset */ +#define SCTP_ENABLE_RESET_STREAM_REQ 0x01 +#define SCTP_ENABLE_RESET_ASSOC_REQ 0x02 +#define SCTP_ENABLE_CHANGE_ASSOC_REQ 0x04 +#define SCTP_ENABLE_STRRESET_MASK 0x07 + +#define SCTP_STREAM_RESET_INCOMING 0x01 +#define SCTP_STREAM_RESET_OUTGOING 0x02 + /* These are bit fields for msghdr->msg_flags. See section 5.1. */ /* On user space Linux, these live in <bits/socket.h> as an enum. */ enum sctp_msg_flags { @@ -477,6 +490,18 @@ struct sctp_sender_dry_event { sctp_assoc_t sender_dry_assoc_id; }; +#define SCTP_STREAM_RESET_INCOMING_SSN 0x0001 +#define SCTP_STREAM_RESET_OUTGOING_SSN 0x0002 +#define SCTP_STREAM_RESET_DENIED 0x0004 +#define SCTP_STREAM_RESET_FAILED 0x0008 +struct sctp_stream_reset_event { + __u16 strreset_type; + __u16 strreset_flags; + __u32 strreset_length; + sctp_assoc_t strreset_assoc_id; + __u16 strreset_stream_list[]; +}; + /* * Described in Section 7.3 * Ancillary Data and Notification Interest Options @@ -492,6 +517,7 @@ struct sctp_event_subscribe { __u8 sctp_adaptation_layer_event; __u8 sctp_authentication_event; __u8 sctp_sender_dry_event; + __u8 sctp_stream_reset_event; }; /* @@ -516,6 +542,7 @@ union sctp_notification { struct sctp_pdapi_event sn_pdapi_event; struct sctp_authkey_event sn_authkey_event; struct sctp_sender_dry_event sn_sender_dry_event; + struct sctp_stream_reset_event sn_strreset_event; }; /* Section 5.3.1 @@ -543,6 +570,8 @@ enum sctp_sn_type { #define SCTP_AUTHENTICATION_INDICATION SCTP_AUTHENTICATION_EVENT SCTP_SENDER_DRY_EVENT, #define SCTP_SENDER_DRY_EVENT SCTP_SENDER_DRY_EVENT + SCTP_STREAM_RESET_EVENT, +#define SCTP_STREAM_RESET_EVENT SCTP_STREAM_RESET_EVENT }; /* Notification error codes used to fill up the error fields in some @@ -1008,4 +1037,17 @@ struct sctp_info { __u32 __reserved3; }; +struct sctp_reset_streams { + sctp_assoc_t srs_assoc_id; + uint16_t srs_flags; + uint16_t srs_number_streams; /* 0 == ALL */ + uint16_t srs_stream_list[]; /* list if srs_num_streams is not 0 */ +}; + +struct sctp_add_streams { + sctp_assoc_t sas_assoc_id; + uint16_t sas_instrms; + uint16_t sas_outstrms; +}; + #endif /* _UAPI_SCTP_H */ diff --git a/include/uapi/linux/seg6.h b/include/uapi/linux/seg6.h index 052799e4d751..7278511d339e 100644 --- a/include/uapi/linux/seg6.h +++ b/include/uapi/linux/seg6.h @@ -14,6 +14,9 @@ #ifndef _UAPI_LINUX_SEG6_H #define _UAPI_LINUX_SEG6_H +#include <linux/types.h> +#include <linux/in6.h> /* For struct in6_addr. */ + /* * SRH */ diff --git a/include/uapi/linux/seg6_hmac.h b/include/uapi/linux/seg6_hmac.h index b652dfd51bc5..e691c753fc3f 100644 --- a/include/uapi/linux/seg6_hmac.h +++ b/include/uapi/linux/seg6_hmac.h @@ -1,6 +1,7 @@ #ifndef _UAPI_LINUX_SEG6_HMAC_H #define _UAPI_LINUX_SEG6_HMAC_H +#include <linux/types.h> #include <linux/seg6.h> #define SEG6_HMAC_SECRET_LEN 64 diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h index 0f7dbd280a9c..b6e5a0a1afd7 100644 --- a/include/uapi/linux/seg6_iptunnel.h +++ b/include/uapi/linux/seg6_iptunnel.h @@ -14,6 +14,8 @@ #ifndef _UAPI_LINUX_SEG6_IPTUNNEL_H #define _UAPI_LINUX_SEG6_IPTUNNEL_H +#include <linux/seg6.h> /* For struct ipv6_sr_hdr. */ + enum { SEG6_IPTUNNEL_UNSPEC, SEG6_IPTUNNEL_SRH, @@ -33,6 +35,8 @@ enum { SEG6_IPTUN_MODE_ENCAP, }; +#ifdef __KERNEL__ + static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo) { int encap = (tuninfo->mode == SEG6_IPTUN_MODE_ENCAP); @@ -42,3 +46,5 @@ static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo) } #endif + +#endif diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 99dbed8a8874..9ec741b133fe 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -56,7 +56,8 @@ #define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */ #define PORT_RT2880 29 /* Ralink RT2880 internal UART */ #define PORT_16550A_FSL64 30 /* Freescale 16550 UART with 64 FIFOs */ -#define PORT_MAX_8250 30 /* max port ID */ +#define PORT_DA830 31 /* TI DA8xx/66AK2x */ +#define PORT_MAX_8250 31 /* max port ID */ /* * ARM specific type numbers. These are not currently guaranteed diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h index b4c04842a8c0..5db76880b4ad 100644 --- a/include/uapi/linux/serial_reg.h +++ b/include/uapi/linux/serial_reg.h @@ -327,6 +327,14 @@ #define SERIAL_RSA_BAUD_BASE (921600) #define SERIAL_RSA_BAUD_BASE_LO (SERIAL_RSA_BAUD_BASE / 8) +/* Extra registers for TI DA8xx/66AK2x */ +#define UART_DA830_PWREMU_MGMT 12 + +/* PWREMU_MGMT register bits */ +#define UART_DA830_PWREMU_MGMT_FREE (1 << 0) /* Free-running mode */ +#define UART_DA830_PWREMU_MGMT_URRST (1 << 13) /* Receiver reset/enable */ +#define UART_DA830_PWREMU_MGMT_UTRST (1 << 14) /* Transmitter reset/enable */ + /* * Extra serial register definitions for the internal UARTs * in TI OMAP processors. @@ -359,24 +367,6 @@ #define UART_OMAP_MDR1_DISABLE 0x07 /* Disable (default state) */ /* - * These are definitions for the Exar XR17V35X and XR17(C|D)15X - */ -#define UART_EXAR_8XMODE 0x88 /* 8X sampling rate select */ -#define UART_EXAR_SLEEP 0x8b /* Sleep mode */ -#define UART_EXAR_DVID 0x8d /* Device identification */ - -#define UART_EXAR_FCTR 0x08 /* Feature Control Register */ -#define UART_FCTR_EXAR_IRDA 0x08 /* IrDa data encode select */ -#define UART_FCTR_EXAR_485 0x10 /* Auto 485 half duplex dir ctl */ -#define UART_FCTR_EXAR_TRGA 0x00 /* FIFO trigger table A */ -#define UART_FCTR_EXAR_TRGB 0x60 /* FIFO trigger table B */ -#define UART_FCTR_EXAR_TRGC 0x80 /* FIFO trigger table C */ -#define UART_FCTR_EXAR_TRGD 0xc0 /* FIFO trigger table D programmable */ - -#define UART_EXAR_TXTRG 0x0a /* Tx FIFO trigger level write-only */ -#define UART_EXAR_RXTRG 0x0b /* Rx FIFO trigger level write-only */ - -/* * These are definitions for the Altera ALTR_16550_F32/F64/F128 * Normalized from 0x100 to 0x40 because of shift by 2 (32 bit regs). */ diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h index f2447a83ac8d..ccd0ccd00f47 100644 --- a/include/uapi/linux/serio.h +++ b/include/uapi/linux/serio.h @@ -17,9 +17,10 @@ /* * bit masks for use in "interrupt" flags (3rd argument) */ -#define SERIO_TIMEOUT 1 -#define SERIO_PARITY 2 -#define SERIO_FRAME 4 +#define SERIO_TIMEOUT BIT(0) +#define SERIO_PARITY BIT(1) +#define SERIO_FRAME BIT(2) +#define SERIO_OOB_DATA BIT(3) /* * Serio types diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h new file mode 100644 index 000000000000..ab1dea8e53ee --- /dev/null +++ b/include/uapi/linux/smc.h @@ -0,0 +1,35 @@ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Definitions for generic netlink based configuration of an SMC-R PNET table + * + * Copyright IBM Corp. 2016 + * + * Author(s): Thomas Richter <tmricht@linux.vnet.ibm.com> + */ + +#ifndef _UAPI_LINUX_SMC_H_ +#define _UAPI_LINUX_SMC_H_ + +/* Netlink SMC_PNETID attributes */ +enum { + SMC_PNETID_UNSPEC, + SMC_PNETID_NAME, + SMC_PNETID_ETHNAME, + SMC_PNETID_IBNAME, + SMC_PNETID_IBPORT, + __SMC_PNETID_MAX, + SMC_PNETID_MAX = __SMC_PNETID_MAX - 1 +}; + +enum { /* SMC PNET Table commands */ + SMC_PNETID_GET = 1, + SMC_PNETID_ADD, + SMC_PNETID_DEL, + SMC_PNETID_FLUSH +}; + +#define SMCR_GENL_FAMILY_NAME "SMC_PNETID" +#define SMCR_GENL_FAMILY_VERSION 1 + +#endif /* _UAPI_LINUX_SMC_H */ diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h new file mode 100644 index 000000000000..0063919fea34 --- /dev/null +++ b/include/uapi/linux/smc_diag.h @@ -0,0 +1,85 @@ +#ifndef _UAPI_SMC_DIAG_H_ +#define _UAPI_SMC_DIAG_H_ + +#include <linux/types.h> +#include <linux/inet_diag.h> +#include <rdma/ib_verbs.h> + +/* Request structure */ +struct smc_diag_req { + __u8 diag_family; + __u8 pad[2]; + __u8 diag_ext; /* Query extended information */ + struct inet_diag_sockid id; +}; + +/* Base info structure. It contains socket identity (addrs/ports/cookie) based + * on the internal clcsock, and more SMC-related socket data + */ +struct smc_diag_msg { + __u8 diag_family; + __u8 diag_state; + __u8 diag_fallback; + __u8 diag_shutdown; + struct inet_diag_sockid id; + + __u32 diag_uid; + __u64 diag_inode; +}; + +/* Extensions */ + +enum { + SMC_DIAG_NONE, + SMC_DIAG_CONNINFO, + SMC_DIAG_LGRINFO, + SMC_DIAG_SHUTDOWN, + __SMC_DIAG_MAX, +}; + +#define SMC_DIAG_MAX (__SMC_DIAG_MAX - 1) + +/* SMC_DIAG_CONNINFO */ + +struct smc_diag_cursor { + __u16 reserved; + __u16 wrap; + __u32 count; +}; + +struct smc_diag_conninfo { + __u32 token; /* unique connection id */ + __u32 sndbuf_size; /* size of send buffer */ + __u32 rmbe_size; /* size of RMB element */ + __u32 peer_rmbe_size; /* size of peer RMB element */ + /* local RMB element cursors */ + struct smc_diag_cursor rx_prod; /* received producer cursor */ + struct smc_diag_cursor rx_cons; /* received consumer cursor */ + /* peer RMB element cursors */ + struct smc_diag_cursor tx_prod; /* sent producer cursor */ + struct smc_diag_cursor tx_cons; /* sent consumer cursor */ + __u8 rx_prod_flags; /* received producer flags */ + __u8 rx_conn_state_flags; /* recvd connection flags*/ + __u8 tx_prod_flags; /* sent producer flags */ + __u8 tx_conn_state_flags; /* sent connection flags*/ + /* send buffer cursors */ + struct smc_diag_cursor tx_prep; /* prepared to be sent cursor */ + struct smc_diag_cursor tx_sent; /* sent cursor */ + struct smc_diag_cursor tx_fin; /* confirmed sent cursor */ +}; + +/* SMC_DIAG_LINKINFO */ + +struct smc_diag_linkinfo { + __u8 link_id; /* link identifier */ + __u8 ibname[IB_DEVICE_NAME_MAX]; /* name of the RDMA device */ + __u8 ibport; /* RDMA device port number */ + __u8 gid[40]; /* local GID */ + __u8 peer_gid[40]; /* peer GID */ +}; + +struct smc_diag_lgrinfo { + struct smc_diag_linkinfo lnk[1]; + __u8 role; +}; +#endif /* _UAPI_SMC_DIAG_H_ */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index e7a31f830690..3b2bed7ca9a4 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -240,6 +240,7 @@ enum LINUX_MIB_SACKMERGED, LINUX_MIB_SACKSHIFTFALLBACK, LINUX_MIB_TCPBACKLOGDROP, + LINUX_MIB_PFMEMALLOCDROP, LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */ LINUX_MIB_TCPDEFERACCEPTDROP, LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 7fec7e36d921..51a6b86e3700 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -1,6 +1,7 @@ #ifndef _UAPI_LINUX_STAT_H #define _UAPI_LINUX_STAT_H +#include <linux/types.h> #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) @@ -41,5 +42,135 @@ #endif +/* + * Timestamp structure for the timestamps in struct statx. + * + * tv_sec holds the number of seconds before (negative) or after (positive) + * 00:00:00 1st January 1970 UTC. + * + * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is + * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time. + * + * Note that if both tv_sec and tv_nsec are non-zero, then the two values must + * either be both positive or both negative. + * + * __reserved is held in case we need a yet finer resolution. + */ +struct statx_timestamp { + __s64 tv_sec; + __s32 tv_nsec; + __s32 __reserved; +}; + +/* + * Structures for the extended file attribute retrieval system call + * (statx()). + * + * The caller passes a mask of what they're specifically interested in as a + * parameter to statx(). What statx() actually got will be indicated in + * st_mask upon return. + * + * For each bit in the mask argument: + * + * - if the datum is not supported: + * + * - the bit will be cleared, and + * + * - the datum will be set to an appropriate fabricated value if one is + * available (eg. CIFS can take a default uid and gid), otherwise + * + * - the field will be cleared; + * + * - otherwise, if explicitly requested: + * + * - the datum will be synchronised to the server if AT_STATX_FORCE_SYNC is + * set or if the datum is considered out of date, and + * + * - the field will be filled in and the bit will be set; + * + * - otherwise, if not requested, but available in approximate form without any + * effort, it will be filled in anyway, and the bit will be set upon return + * (it might not be up to date, however, and no attempt will be made to + * synchronise the internal state first); + * + * - otherwise the field and the bit will be cleared before returning. + * + * Items in STATX_BASIC_STATS may be marked unavailable on return, but they + * will have values installed for compatibility purposes so that stat() and + * co. can be emulated in userspace. + */ +struct statx { + /* 0x00 */ + __u32 stx_mask; /* What results were written [uncond] */ + __u32 stx_blksize; /* Preferred general I/O size [uncond] */ + __u64 stx_attributes; /* Flags conveying information about the file [uncond] */ + /* 0x10 */ + __u32 stx_nlink; /* Number of hard links */ + __u32 stx_uid; /* User ID of owner */ + __u32 stx_gid; /* Group ID of owner */ + __u16 stx_mode; /* File mode */ + __u16 __spare0[1]; + /* 0x20 */ + __u64 stx_ino; /* Inode number */ + __u64 stx_size; /* File size */ + __u64 stx_blocks; /* Number of 512-byte blocks allocated */ + __u64 __spare1[1]; + /* 0x40 */ + struct statx_timestamp stx_atime; /* Last access time */ + struct statx_timestamp stx_btime; /* File creation time */ + struct statx_timestamp stx_ctime; /* Last attribute change time */ + struct statx_timestamp stx_mtime; /* Last data modification time */ + /* 0x80 */ + __u32 stx_rdev_major; /* Device ID of special file [if bdev/cdev] */ + __u32 stx_rdev_minor; + __u32 stx_dev_major; /* ID of device containing file [uncond] */ + __u32 stx_dev_minor; + /* 0x90 */ + __u64 __spare2[14]; /* Spare space for future expansion */ + /* 0x100 */ +}; + +/* + * Flags to be stx_mask + * + * Query request/result mask for statx() and struct statx::stx_mask. + * + * These bits should be set in the mask argument of statx() to request + * particular items when calling statx(). + */ +#define STATX_TYPE 0x00000001U /* Want/got stx_mode & S_IFMT */ +#define STATX_MODE 0x00000002U /* Want/got stx_mode & ~S_IFMT */ +#define STATX_NLINK 0x00000004U /* Want/got stx_nlink */ +#define STATX_UID 0x00000008U /* Want/got stx_uid */ +#define STATX_GID 0x00000010U /* Want/got stx_gid */ +#define STATX_ATIME 0x00000020U /* Want/got stx_atime */ +#define STATX_MTIME 0x00000040U /* Want/got stx_mtime */ +#define STATX_CTIME 0x00000080U /* Want/got stx_ctime */ +#define STATX_INO 0x00000100U /* Want/got stx_ino */ +#define STATX_SIZE 0x00000200U /* Want/got stx_size */ +#define STATX_BLOCKS 0x00000400U /* Want/got stx_blocks */ +#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ +#define STATX_BTIME 0x00000800U /* Want/got stx_btime */ +#define STATX_ALL 0x00000fffU /* All currently supported flags */ + +/* + * Attributes to be found in stx_attributes + * + * These give information about the features or the state of a file that might + * be of use to ordinary userspace programs such as GUIs or ls rather than + * specialised tools. + * + * Note that the flags marked [I] correspond to generic FS_IOC_FLAGS + * semantically. Where possible, the numerical value is picked to correspond + * also. + */ +#define STATX_ATTR_COMPRESSED 0x00000004 /* [I] File is compressed by the fs */ +#define STATX_ATTR_IMMUTABLE 0x00000010 /* [I] File is marked immutable */ +#define STATX_ATTR_APPEND 0x00000020 /* [I] File is append-only */ +#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */ +#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */ + +#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */ + #endif /* _UAPI_LINUX_STAT_H */ diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index c506cddb8165..af17b4154ef6 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -105,26 +105,26 @@ struct tcmu_cmd_entry { union { struct { - uint32_t iov_cnt; - uint32_t iov_bidi_cnt; - uint32_t iov_dif_cnt; - uint64_t cdb_off; - uint64_t __pad1; - uint64_t __pad2; + __u32 iov_cnt; + __u32 iov_bidi_cnt; + __u32 iov_dif_cnt; + __u64 cdb_off; + __u64 __pad1; + __u64 __pad2; struct iovec iov[0]; } req; struct { - uint8_t scsi_status; - uint8_t __pad1; - uint16_t __pad2; - uint32_t __pad3; + __u8 scsi_status; + __u8 __pad1; + __u16 __pad2; + __u32 __pad3; char sense_buffer[TCMU_SENSE_BUFFERSIZE]; } rsp; }; } __packed; -#define TCMU_OP_ALIGN_SIZE sizeof(uint64_t) +#define TCMU_OP_ALIGN_SIZE sizeof(__u64) enum tcmu_genl_cmd { TCMU_CMD_UNSPEC, diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild index e3db7403296f..ba62ddf0e58a 100644 --- a/include/uapi/linux/tc_act/Kbuild +++ b/include/uapi/linux/tc_act/Kbuild @@ -4,6 +4,7 @@ header-y += tc_defact.h header-y += tc_gact.h header-y += tc_ipt.h header-y += tc_mirred.h +header-y += tc_sample.h header-y += tc_nat.h header-y += tc_pedit.h header-y += tc_skbedit.h diff --git a/include/uapi/linux/tc_act/tc_csum.h b/include/uapi/linux/tc_act/tc_csum.h index 8ac8041ab5f1..a11bb355dbfb 100644 --- a/include/uapi/linux/tc_act/tc_csum.h +++ b/include/uapi/linux/tc_act/tc_csum.h @@ -21,7 +21,8 @@ enum { TCA_CSUM_UPDATE_FLAG_IGMP = 4, TCA_CSUM_UPDATE_FLAG_TCP = 8, TCA_CSUM_UPDATE_FLAG_UDP = 16, - TCA_CSUM_UPDATE_FLAG_UDPLITE = 32 + TCA_CSUM_UPDATE_FLAG_UDPLITE = 32, + TCA_CSUM_UPDATE_FLAG_SCTP = 64, }; struct tc_csum { diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h index cd18360eca24..7c2817866c97 100644 --- a/include/uapi/linux/tc_act/tc_ife.h +++ b/include/uapi/linux/tc_act/tc_ife.h @@ -3,6 +3,7 @@ #include <linux/types.h> #include <linux/pkt_cls.h> +#include <linux/ife.h> #define TCA_ACT_IFE 25 /* Flag bits for now just encoding/decoding; mutually exclusive */ @@ -28,13 +29,4 @@ enum { }; #define TCA_IFE_MAX (__TCA_IFE_MAX - 1) -#define IFE_META_SKBMARK 1 -#define IFE_META_HASHID 2 -#define IFE_META_PRIO 3 -#define IFE_META_QMAP 4 -#define IFE_META_TCINDEX 5 -/*Can be overridden at runtime by module option*/ -#define __IFE_META_MAX 6 -#define IFE_META_MAX (__IFE_META_MAX - 1) - #endif diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h index 6389959a5157..143d2b31a316 100644 --- a/include/uapi/linux/tc_act/tc_pedit.h +++ b/include/uapi/linux/tc_act/tc_pedit.h @@ -11,10 +11,41 @@ enum { TCA_PEDIT_TM, TCA_PEDIT_PARMS, TCA_PEDIT_PAD, + TCA_PEDIT_PARMS_EX, + TCA_PEDIT_KEYS_EX, + TCA_PEDIT_KEY_EX, __TCA_PEDIT_MAX }; #define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1) +enum { + TCA_PEDIT_KEY_EX_HTYPE = 1, + TCA_PEDIT_KEY_EX_CMD = 2, + __TCA_PEDIT_KEY_EX_MAX +}; +#define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1) + + /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It + * means no specific header type - offset is relative to the network layer + */ +enum pedit_header_type { + TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = 0, + TCA_PEDIT_KEY_EX_HDR_TYPE_ETH = 1, + TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 = 2, + TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 = 3, + TCA_PEDIT_KEY_EX_HDR_TYPE_TCP = 4, + TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5, + __PEDIT_HDR_TYPE_MAX, +}; +#define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1) + +enum pedit_cmd { + TCA_PEDIT_KEY_EX_CMD_SET = 0, + TCA_PEDIT_KEY_EX_CMD_ADD = 1, + __PEDIT_CMD_MAX, +}; +#define TCA_PEDIT_CMD_MAX (__PEDIT_CMD_MAX - 1) + struct tc_pedit_key { __u32 mask; /* AND */ __u32 val; /*XOR */ diff --git a/include/uapi/linux/tc_act/tc_sample.h b/include/uapi/linux/tc_act/tc_sample.h new file mode 100644 index 000000000000..edc9058bb30d --- /dev/null +++ b/include/uapi/linux/tc_act/tc_sample.h @@ -0,0 +1,26 @@ +#ifndef __LINUX_TC_SAMPLE_H +#define __LINUX_TC_SAMPLE_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> +#include <linux/if_ether.h> + +#define TCA_ACT_SAMPLE 26 + +struct tc_sample { + tc_gen; +}; + +enum { + TCA_SAMPLE_UNSPEC, + TCA_SAMPLE_TM, + TCA_SAMPLE_PARMS, + TCA_SAMPLE_RATE, + TCA_SAMPLE_TRUNC_SIZE, + TCA_SAMPLE_PSAMPLE_GROUP, + TCA_SAMPLE_PAD, + __TCA_SAMPLE_MAX +}; +#define TCA_SAMPLE_MAX (__TCA_SAMPLE_MAX - 1) + +#endif diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index c53de2691cec..38a2b07afdff 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -116,6 +116,7 @@ enum { #define TCP_SAVE_SYN 27 /* Record SYN headers for new connections */ #define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */ #define TCP_REPAIR_WINDOW 29 /* Get/set window parameters */ +#define TCP_FASTOPEN_CONNECT 30 /* Attempt FastOpen with connect */ struct tcp_repair_opt { __u32 opt_code; @@ -226,6 +227,8 @@ enum { TCP_NLA_BUSY, /* Time (usec) busy sending data */ TCP_NLA_RWND_LIMITED, /* Time (usec) limited by receive window */ TCP_NLA_SNDBUF_LIMITED, /* Time (usec) limited by send buffer */ + TCP_NLA_DATA_SEGS_OUT, /* Data pkts sent including retransmission */ + TCP_NLA_TOTAL_RETRANS, /* Data pkts retransmitted */ }; /* for TCP_MD5SIG socket option */ diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h index bf049e8fe31b..5351b08c897a 100644 --- a/include/uapi/linux/tipc.h +++ b/include/uapi/linux/tipc.h @@ -1,7 +1,7 @@ /* * include/uapi/linux/tipc.h: Header for TIPC socket interface * - * Copyright (c) 2003-2006, Ericsson AB + * Copyright (c) 2003-2006, 2015-2016 Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -220,7 +220,7 @@ struct sockaddr_tipc { #define TIPC_DESTNAME 3 /* destination name */ /* - * TIPC-specific socket option values + * TIPC-specific socket option names */ #define TIPC_IMPORTANCE 127 /* Default: TIPC_LOW_IMPORTANCE */ @@ -229,6 +229,8 @@ struct sockaddr_tipc { #define TIPC_CONN_TIMEOUT 130 /* Default: 8000 (ms) */ #define TIPC_NODE_RECVQ_DEPTH 131 /* Default: none (read only) */ #define TIPC_SOCK_RECVQ_DEPTH 132 /* Default: none (read only) */ +#define TIPC_MCAST_BROADCAST 133 /* Default: TIPC selects. No arg */ +#define TIPC_MCAST_REPLICAST 134 /* Default: TIPC selects. No arg */ /* * Maximum sizes of TIPC bearer-related names (including terminating NULL) diff --git a/include/uapi/linux/un.h b/include/uapi/linux/un.h index 3ed3e46c1b1f..4f0ab3a548ad 100644 --- a/include/uapi/linux/un.h +++ b/include/uapi/linux/un.h @@ -10,4 +10,6 @@ struct sockaddr_un { char sun_path[UNIX_PATH_MAX]; /* pathname */ }; +#define SIOCUNIXFILE (SIOCPROTOPRIVATE + 0) /* open a socket file with O_PATH */ + #endif /* _LINUX_UN_H */ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 9057d7af3ae1..3b059530dac9 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -11,13 +11,19 @@ #include <linux/types.h> -#define UFFD_API ((__u64)0xAA) /* - * After implementing the respective features it will become: - * #define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \ - * UFFD_FEATURE_EVENT_FORK) + * If the UFFDIO_API is upgraded someday, the UFFDIO_UNREGISTER and + * UFFDIO_WAKE ioctls should be defined as _IOW and not as _IOR. In + * userfaultfd.h we assumed the kernel was reading (instead _IOC_READ + * means the userland is reading). */ -#define UFFD_API_FEATURES (0) +#define UFFD_API ((__u64)0xAA) +#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \ + UFFD_FEATURE_EVENT_REMAP | \ + UFFD_FEATURE_EVENT_REMOVE | \ + UFFD_FEATURE_EVENT_UNMAP | \ + UFFD_FEATURE_MISSING_HUGETLBFS | \ + UFFD_FEATURE_MISSING_SHMEM) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -26,6 +32,9 @@ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY | \ (__u64)1 << _UFFDIO_ZEROPAGE) +#define UFFD_API_RANGE_IOCTLS_BASIC \ + ((__u64)1 << _UFFDIO_WAKE | \ + (__u64)1 << _UFFDIO_COPY) /* * Valid ioctl command number range with this API is from 0x00 to @@ -72,6 +81,21 @@ struct uffd_msg { } pagefault; struct { + __u32 ufd; + } fork; + + struct { + __u64 from; + __u64 to; + __u64 len; + } remap; + + struct { + __u64 start; + __u64 end; + } remove; + + struct { /* unused reserved fields */ __u64 reserved1; __u64 reserved2; @@ -84,9 +108,10 @@ struct uffd_msg { * Start at 0x12 and not at 0 to be more strict against bugs. */ #define UFFD_EVENT_PAGEFAULT 0x12 -#if 0 /* not available yet */ #define UFFD_EVENT_FORK 0x13 -#endif +#define UFFD_EVENT_REMAP 0x14 +#define UFFD_EVENT_REMOVE 0x15 +#define UFFD_EVENT_UNMAP 0x16 /* flags for UFFD_EVENT_PAGEFAULT */ #define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ @@ -104,11 +129,38 @@ struct uffdio_api { * Note: UFFD_EVENT_PAGEFAULT and UFFD_PAGEFAULT_FLAG_WRITE * are to be considered implicitly always enabled in all kernels as * long as the uffdio_api.api requested matches UFFD_API. + * + * UFFD_FEATURE_MISSING_HUGETLBFS means an UFFDIO_REGISTER + * with UFFDIO_REGISTER_MODE_MISSING mode will succeed on + * hugetlbfs virtual memory ranges. Adding or not adding + * UFFD_FEATURE_MISSING_HUGETLBFS to uffdio_api.features has + * no real functional effect after UFFDIO_API returns, but + * it's only useful for an initial feature set probe at + * UFFDIO_API time. There are two ways to use it: + * + * 1) by adding UFFD_FEATURE_MISSING_HUGETLBFS to the + * uffdio_api.features before calling UFFDIO_API, an error + * will be returned by UFFDIO_API on a kernel without + * hugetlbfs missing support + * + * 2) the UFFD_FEATURE_MISSING_HUGETLBFS can not be added in + * uffdio_api.features and instead it will be set by the + * kernel in the uffdio_api.features if the kernel supports + * it, so userland can later check if the feature flag is + * present in uffdio_api.features after UFFDIO_API + * succeeded. + * + * UFFD_FEATURE_MISSING_SHMEM works the same as + * UFFD_FEATURE_MISSING_HUGETLBFS, but it applies to shmem + * (i.e. tmpfs and other shmem based APIs). */ -#if 0 /* not available yet */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) -#endif +#define UFFD_FEATURE_EVENT_REMAP (1<<2) +#define UFFD_FEATURE_EVENT_REMOVE (1<<3) +#define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4) +#define UFFD_FEATURE_MISSING_SHMEM (1<<5) +#define UFFD_FEATURE_EVENT_UNMAP (1<<6) __u64 features; __u64 ioctls; diff --git a/include/linux/virtio_mmio.h b/include/uapi/linux/virtio_mmio.h index c4b09689ab64..c4b09689ab64 100644 --- a/include/linux/virtio_mmio.h +++ b/include/uapi/linux/virtio_mmio.h diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 90007a1abcab..15b4385a2be1 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -79,7 +79,7 @@ * configuration space */ #define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) /* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ -#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled) +#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled) /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild index bb68cb1b04ed..1e0af1ff75c3 100644 --- a/include/uapi/rdma/Kbuild +++ b/include/uapi/rdma/Kbuild @@ -1,5 +1,6 @@ # UAPI Header export list header-y += ib_user_cm.h +header-y += rdma_user_ioctl.h header-y += ib_user_mad.h header-y += ib_user_sa.h header-y += ib_user_verbs.h diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h new file mode 100644 index 000000000000..e2c8a3f0ccec --- /dev/null +++ b/include/uapi/rdma/bnxt_re-abi.h @@ -0,0 +1,89 @@ +/* + * Broadcom NetXtreme-E RoCE driver. + * + * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Limited and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Description: Uverbs ABI header file + */ + +#ifndef __BNXT_RE_UVERBS_ABI_H__ +#define __BNXT_RE_UVERBS_ABI_H__ + +#define BNXT_RE_ABI_VERSION 1 + +struct bnxt_re_uctx_resp { + __u32 dev_id; + __u32 max_qp; + __u32 pg_size; + __u32 cqe_sz; + __u32 max_cqd; + __u32 rsvd; +}; + +struct bnxt_re_pd_resp { + __u32 pdid; + __u32 dpi; + __u64 dbr; +}; + +struct bnxt_re_cq_req { + __u64 cq_va; + __u64 cq_handle; +}; + +struct bnxt_re_cq_resp { + __u32 cqid; + __u32 tail; + __u32 phase; + __u32 rsvd; +}; + +struct bnxt_re_qp_req { + __u64 qpsva; + __u64 qprva; + __u64 qp_handle; +}; + +struct bnxt_re_qp_resp { + __u32 qpid; + __u32 rsvd; +}; + +enum bnxt_re_shpg_offt { + BNXT_RE_BEG_RESV_OFFT = 0x00, + BNXT_RE_AVID_OFFT = 0x10, + BNXT_RE_AVID_SIZE = 0x04, + BNXT_RE_END_RESV_OFFT = 0xFF0 +}; + +#endif /* __BNXT_RE_UVERBS_ABI_H__*/ diff --git a/include/uapi/rdma/hfi/Kbuild b/include/uapi/rdma/hfi/Kbuild index ef23c294fc71..b65b0b3a5f63 100644 --- a/include/uapi/rdma/hfi/Kbuild +++ b/include/uapi/rdma/hfi/Kbuild @@ -1,2 +1,3 @@ # UAPI Header export list header-y += hfi1_user.h +header-y += hfi1_ioctl.h diff --git a/include/uapi/rdma/hfi/hfi1_ioctl.h b/include/uapi/rdma/hfi/hfi1_ioctl.h new file mode 100644 index 000000000000..4791cc8cb09b --- /dev/null +++ b/include/uapi/rdma/hfi/hfi1_ioctl.h @@ -0,0 +1,173 @@ +/* + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _LINUX__HFI1_IOCTL_H +#define _LINUX__HFI1_IOCTL_H +#include <linux/types.h> + +/* + * This structure is passed to the driver to tell it where + * user code buffers are, sizes, etc. The offsets and sizes of the + * fields must remain unchanged, for binary compatibility. It can + * be extended, if userversion is changed so user code can tell, if needed + */ +struct hfi1_user_info { + /* + * version of user software, to detect compatibility issues. + * Should be set to HFI1_USER_SWVERSION. + */ + __u32 userversion; + __u32 pad; + /* + * If two or more processes wish to share a context, each process + * must set the subcontext_cnt and subcontext_id to the same + * values. The only restriction on the subcontext_id is that + * it be unique for a given node. + */ + __u16 subctxt_cnt; + __u16 subctxt_id; + /* 128bit UUID passed in by PSM. */ + __u8 uuid[16]; +}; + +struct hfi1_ctxt_info { + __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */ + __u32 rcvegr_size; /* size of each eager buffer */ + __u16 num_active; /* number of active units */ + __u16 unit; /* unit (chip) assigned to caller */ + __u16 ctxt; /* ctxt on unit assigned to caller */ + __u16 subctxt; /* subctxt on unit assigned to caller */ + __u16 rcvtids; /* number of Rcv TIDs for this context */ + __u16 credits; /* number of PIO credits for this context */ + __u16 numa_node; /* NUMA node of the assigned device */ + __u16 rec_cpu; /* cpu # for affinity (0xffff if none) */ + __u16 send_ctxt; /* send context in use by this user context */ + __u16 egrtids; /* number of RcvArray entries for Eager Rcvs */ + __u16 rcvhdrq_cnt; /* number of RcvHdrQ entries */ + __u16 rcvhdrq_entsize; /* size (in bytes) for each RcvHdrQ entry */ + __u16 sdma_ring_size; /* number of entries in SDMA request ring */ +}; + +struct hfi1_tid_info { + /* virtual address of first page in transfer */ + __u64 vaddr; + /* pointer to tid array. this array is big enough */ + __u64 tidlist; + /* number of tids programmed by this request */ + __u32 tidcnt; + /* length of transfer buffer programmed by this request */ + __u32 length; +}; + +/* + * This structure is returned by the driver immediately after + * open to get implementation-specific info, and info specific to this + * instance. + * + * This struct must have explicit pad fields where type sizes + * may result in different alignments between 32 and 64 bit + * programs, since the 64 bit * bit kernel requires the user code + * to have matching offsets + */ +struct hfi1_base_info { + /* version of hardware, for feature checking. */ + __u32 hw_version; + /* version of software, for feature checking. */ + __u32 sw_version; + /* Job key */ + __u16 jkey; + __u16 padding1; + /* + * The special QP (queue pair) value that identifies PSM + * protocol packet from standard IB packets. + */ + __u32 bthqp; + /* PIO credit return address, */ + __u64 sc_credits_addr; + /* + * Base address of write-only pio buffers for this process. + * Each buffer has sendpio_credits*64 bytes. + */ + __u64 pio_bufbase_sop; + /* + * Base address of write-only pio buffers for this process. + * Each buffer has sendpio_credits*64 bytes. + */ + __u64 pio_bufbase; + /* address where receive buffer queue is mapped into */ + __u64 rcvhdr_bufbase; + /* base address of Eager receive buffers. */ + __u64 rcvegr_bufbase; + /* base address of SDMA completion ring */ + __u64 sdma_comp_bufbase; + /* + * User register base for init code, not to be used directly by + * protocol or applications. Always maps real chip register space. + * the register addresses are: + * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail, + * ur_rcvtidflow + */ + __u64 user_regbase; + /* notification events */ + __u64 events_bufbase; + /* status page */ + __u64 status_bufbase; + /* rcvhdrtail update */ + __u64 rcvhdrtail_base; + /* + * shared memory pages for subctxts if ctxt is shared; these cover + * all the processes in the group sharing a single context. + * all have enough space for the num_subcontexts value on this job. + */ + __u64 subctxt_uregbase; + __u64 subctxt_rcvegrbuf; + __u64 subctxt_rcvhdrbuf; +}; +#endif /* _LINIUX__HFI1_IOCTL_H */ diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index 587b7360e820..3f4ee93ae5eb 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -57,6 +57,7 @@ #define _LINUX__HFI1_USER_H #include <linux/types.h> +#include <rdma/rdma_user_ioctl.h> /* * This version number is given to the driver by the user code during @@ -112,61 +113,6 @@ #define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1) #define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2) -/* User commands. */ -#define HFI1_CMD_ASSIGN_CTXT 1 /* allocate HFI and context */ -#define HFI1_CMD_CTXT_INFO 2 /* find out what resources we got */ -#define HFI1_CMD_USER_INFO 3 /* set up userspace */ -#define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */ -#define HFI1_CMD_TID_FREE 5 /* free expected TID entries */ -#define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */ - -#define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */ -#define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */ -#define HFI1_CMD_ACK_EVENT 10 /* ack & clear user status bits */ -#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */ -#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */ -#define HFI1_CMD_TID_INVAL_READ 13 /* read TID cache invalidations */ -#define HFI1_CMD_GET_VERS 14 /* get the version of the user cdev */ - -/* - * User IOCTLs can not go above 128 if they do then see common.h and change the - * base for the snoop ioctl - */ -#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl/ioctl-number.txt */ - -/* - * Make the ioctls occupy the last 0xf0-0xff portion of the IB range - */ -#define __NUM(cmd) (HFI1_CMD_##cmd + 0xe0) - -struct hfi1_cmd; -#define HFI1_IOCTL_ASSIGN_CTXT \ - _IOWR(IB_IOCTL_MAGIC, __NUM(ASSIGN_CTXT), struct hfi1_user_info) -#define HFI1_IOCTL_CTXT_INFO \ - _IOW(IB_IOCTL_MAGIC, __NUM(CTXT_INFO), struct hfi1_ctxt_info) -#define HFI1_IOCTL_USER_INFO \ - _IOW(IB_IOCTL_MAGIC, __NUM(USER_INFO), struct hfi1_base_info) -#define HFI1_IOCTL_TID_UPDATE \ - _IOWR(IB_IOCTL_MAGIC, __NUM(TID_UPDATE), struct hfi1_tid_info) -#define HFI1_IOCTL_TID_FREE \ - _IOWR(IB_IOCTL_MAGIC, __NUM(TID_FREE), struct hfi1_tid_info) -#define HFI1_IOCTL_CREDIT_UPD \ - _IO(IB_IOCTL_MAGIC, __NUM(CREDIT_UPD)) -#define HFI1_IOCTL_RECV_CTRL \ - _IOW(IB_IOCTL_MAGIC, __NUM(RECV_CTRL), int) -#define HFI1_IOCTL_POLL_TYPE \ - _IOW(IB_IOCTL_MAGIC, __NUM(POLL_TYPE), int) -#define HFI1_IOCTL_ACK_EVENT \ - _IOW(IB_IOCTL_MAGIC, __NUM(ACK_EVENT), unsigned long) -#define HFI1_IOCTL_SET_PKEY \ - _IOW(IB_IOCTL_MAGIC, __NUM(SET_PKEY), __u16) -#define HFI1_IOCTL_CTXT_RESET \ - _IO(IB_IOCTL_MAGIC, __NUM(CTXT_RESET)) -#define HFI1_IOCTL_TID_INVAL_READ \ - _IOWR(IB_IOCTL_MAGIC, __NUM(TID_INVAL_READ), struct hfi1_tid_info) -#define HFI1_IOCTL_GET_VERS \ - _IOR(IB_IOCTL_MAGIC, __NUM(GET_VERS), int) - #define _HFI1_EVENT_FROZEN_BIT 0 #define _HFI1_EVENT_LINKDOWN_BIT 1 #define _HFI1_EVENT_LID_CHANGE_BIT 2 @@ -211,60 +157,6 @@ struct hfi1_cmd; #define HFI1_POLL_TYPE_ANYRCV 0x0 #define HFI1_POLL_TYPE_URGENT 0x1 -/* - * This structure is passed to the driver to tell it where - * user code buffers are, sizes, etc. The offsets and sizes of the - * fields must remain unchanged, for binary compatibility. It can - * be extended, if userversion is changed so user code can tell, if needed - */ -struct hfi1_user_info { - /* - * version of user software, to detect compatibility issues. - * Should be set to HFI1_USER_SWVERSION. - */ - __u32 userversion; - __u32 pad; - /* - * If two or more processes wish to share a context, each process - * must set the subcontext_cnt and subcontext_id to the same - * values. The only restriction on the subcontext_id is that - * it be unique for a given node. - */ - __u16 subctxt_cnt; - __u16 subctxt_id; - /* 128bit UUID passed in by PSM. */ - __u8 uuid[16]; -}; - -struct hfi1_ctxt_info { - __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */ - __u32 rcvegr_size; /* size of each eager buffer */ - __u16 num_active; /* number of active units */ - __u16 unit; /* unit (chip) assigned to caller */ - __u16 ctxt; /* ctxt on unit assigned to caller */ - __u16 subctxt; /* subctxt on unit assigned to caller */ - __u16 rcvtids; /* number of Rcv TIDs for this context */ - __u16 credits; /* number of PIO credits for this context */ - __u16 numa_node; /* NUMA node of the assigned device */ - __u16 rec_cpu; /* cpu # for affinity (0xffff if none) */ - __u16 send_ctxt; /* send context in use by this user context */ - __u16 egrtids; /* number of RcvArray entries for Eager Rcvs */ - __u16 rcvhdrq_cnt; /* number of RcvHdrQ entries */ - __u16 rcvhdrq_entsize; /* size (in bytes) for each RcvHdrQ entry */ - __u16 sdma_ring_size; /* number of entries in SDMA request ring */ -}; - -struct hfi1_tid_info { - /* virtual address of first page in transfer */ - __u64 vaddr; - /* pointer to tid array. this array is big enough */ - __u64 tidlist; - /* number of tids programmed by this request */ - __u32 tidcnt; - /* length of transfer buffer programmed by this request */ - __u32 length; -}; - enum hfi1_sdma_comp_state { FREE = 0, QUEUED, @@ -289,71 +181,6 @@ struct hfi1_status { char freezemsg[0]; }; -/* - * This structure is returned by the driver immediately after - * open to get implementation-specific info, and info specific to this - * instance. - * - * This struct must have explicit pad fields where type sizes - * may result in different alignments between 32 and 64 bit - * programs, since the 64 bit * bit kernel requires the user code - * to have matching offsets - */ -struct hfi1_base_info { - /* version of hardware, for feature checking. */ - __u32 hw_version; - /* version of software, for feature checking. */ - __u32 sw_version; - /* Job key */ - __u16 jkey; - __u16 padding1; - /* - * The special QP (queue pair) value that identifies PSM - * protocol packet from standard IB packets. - */ - __u32 bthqp; - /* PIO credit return address, */ - __u64 sc_credits_addr; - /* - * Base address of write-only pio buffers for this process. - * Each buffer has sendpio_credits*64 bytes. - */ - __u64 pio_bufbase_sop; - /* - * Base address of write-only pio buffers for this process. - * Each buffer has sendpio_credits*64 bytes. - */ - __u64 pio_bufbase; - /* address where receive buffer queue is mapped into */ - __u64 rcvhdr_bufbase; - /* base address of Eager receive buffers. */ - __u64 rcvegr_bufbase; - /* base address of SDMA completion ring */ - __u64 sdma_comp_bufbase; - /* - * User register base for init code, not to be used directly by - * protocol or applications. Always maps real chip register space. - * the register addresses are: - * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail, - * ur_rcvtidflow - */ - __u64 user_regbase; - /* notification events */ - __u64 events_bufbase; - /* status page */ - __u64 status_bufbase; - /* rcvhdrtail update */ - __u64 rcvhdrtail_base; - /* - * shared memory pages for subctxts if ctxt is shared; these cover - * all the processes in the group sharing a single context. - * all have enough space for the num_subcontexts value on this job. - */ - __u64 subctxt_uregbase; - __u64 subctxt_rcvegrbuf; - __u64 subctxt_rcvhdrbuf; -}; - enum sdma_req_opcode { EXPECTED = 0, EAGER diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h index 09f809f323ea..5c7abd859e0f 100644 --- a/include/uapi/rdma/ib_user_mad.h +++ b/include/uapi/rdma/ib_user_mad.h @@ -35,7 +35,7 @@ #define IB_USER_MAD_H #include <linux/types.h> -#include <linux/ioctl.h> +#include <rdma/rdma_user_ioctl.h> /* * Increment this value if any changes that break userspace ABI @@ -230,16 +230,4 @@ struct ib_user_mad_reg_req2 { __u8 reserved[3]; }; -#define IB_IOCTL_MAGIC 0x1b - -#define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \ - struct ib_user_mad_reg_req) - -#define IB_USER_MAD_UNREGISTER_AGENT _IOW(IB_IOCTL_MAGIC, 2, __u32) - -#define IB_USER_MAD_ENABLE_PKEY _IO(IB_IOCTL_MAGIC, 3) - -#define IB_USER_MAD_REGISTER_AGENT2 _IOWR(IB_IOCTL_MAGIC, 4, \ - struct ib_user_mad_reg_req2) - #endif /* IB_USER_MAD_H */ diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index f4f87cff6dc6..997f904c7692 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -246,7 +246,7 @@ struct ib_uverbs_ex_query_device_resp { __u64 device_cap_flags_ex; struct ib_uverbs_rss_caps rss_caps; __u32 max_wq_type_rq; - __u32 reserved; + __u32 raw_packet_caps; }; struct ib_uverbs_query_port { @@ -934,6 +934,19 @@ struct ib_uverbs_flow_spec_ipv6 { struct ib_uverbs_flow_ipv6_filter mask; }; +struct ib_uverbs_flow_spec_action_tag { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + __u32 tag_id; + __u32 reserved1; +}; + struct ib_uverbs_flow_tunnel_filter { __be32 tunnel_id; }; @@ -1053,6 +1066,8 @@ struct ib_uverbs_ex_create_wq { __u32 cq_handle; __u32 max_wr; __u32 max_sge; + __u32 create_flags; /* Use enum ib_wq_flags */ + __u32 reserved; }; struct ib_uverbs_ex_create_wq_resp { @@ -1081,6 +1096,8 @@ struct ib_uverbs_ex_modify_wq { __u32 wq_handle; __u32 wq_state; __u32 curr_wq_state; + __u32 flags; /* Use enum ib_wq_flags */ + __u32 flags_mask; /* Use enum ib_wq_flags */ }; /* Prevent memory allocation rather than max expected size */ diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index fae6cdaeb56d..da7cd62bace7 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -61,19 +61,24 @@ enum { */ struct mlx5_ib_alloc_ucontext_req { - __u32 total_num_uuars; - __u32 num_low_latency_uuars; + __u32 total_num_bfregs; + __u32 num_low_latency_bfregs; +}; + +enum mlx5_lib_caps { + MLX5_LIB_CAP_4K_UAR = (u64)1 << 0, }; struct mlx5_ib_alloc_ucontext_req_v2 { - __u32 total_num_uuars; - __u32 num_low_latency_uuars; + __u32 total_num_bfregs; + __u32 num_low_latency_bfregs; __u32 flags; __u32 comp_mask; __u8 max_cqe_version; __u8 reserved0; __u16 reserved1; __u32 reserved2; + __u64 lib_caps; }; enum mlx5_ib_alloc_ucontext_resp_mask { @@ -85,10 +90,21 @@ enum mlx5_user_cmds_supp_uhw { MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, }; +/* The eth_min_inline response value is set to off-by-one vs the FW + * returned value to allow user-space to deal with older kernels. + */ +enum mlx5_user_inline_mode { + MLX5_USER_INLINE_MODE_NA, + MLX5_USER_INLINE_MODE_NONE, + MLX5_USER_INLINE_MODE_L2, + MLX5_USER_INLINE_MODE_IP, + MLX5_USER_INLINE_MODE_TCP_UDP, +}; + struct mlx5_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 bf_reg_size; - __u32 tot_uuars; + __u32 tot_bfregs; __u32 cache_line_size; __u16 max_sq_desc_sz; __u16 max_rq_desc_sz; @@ -101,8 +117,11 @@ struct mlx5_ib_alloc_ucontext_resp { __u32 response_length; __u8 cqe_version; __u8 cmds_supp_uhw; - __u16 reserved2; + __u8 eth_min_inline; + __u8 reserved2; __u64 hca_core_clock_offset; + __u32 log_uar_size; + __u32 num_uars_per_page; }; struct mlx5_ib_alloc_pd_resp { @@ -241,7 +260,7 @@ struct mlx5_ib_create_qp_rss { }; struct mlx5_ib_create_qp_resp { - __u32 uuar_index; + __u32 bfreg_index; }; struct mlx5_ib_alloc_mw { diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h new file mode 100644 index 000000000000..9388125ad51b --- /dev/null +++ b/include/uapi/rdma/rdma_user_ioctl.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2016 Mellanox Technologies, LTD. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef RDMA_USER_IOCTL_H +#define RDMA_USER_IOCTL_H + +#include <linux/types.h> +#include <linux/ioctl.h> +#include <rdma/ib_user_mad.h> +#include <rdma/hfi/hfi1_ioctl.h> + +/* Documentation/ioctl/ioctl-number.txt */ +#define RDMA_IOCTL_MAGIC 0x1b +/* Legacy name, for user space application which already use it */ +#define IB_IOCTL_MAGIC RDMA_IOCTL_MAGIC + +/* + * General blocks assignments + * It is closed on purpose do not expose it it user space + * #define MAD_CMD_BASE 0x00 + * #define HFI1_CMD_BAS 0xE0 + */ + +/* MAD specific section */ +#define IB_USER_MAD_REGISTER_AGENT _IOWR(RDMA_IOCTL_MAGIC, 0x01, struct ib_user_mad_reg_req) +#define IB_USER_MAD_UNREGISTER_AGENT _IOW(RDMA_IOCTL_MAGIC, 0x02, __u32) +#define IB_USER_MAD_ENABLE_PKEY _IO(RDMA_IOCTL_MAGIC, 0x03) +#define IB_USER_MAD_REGISTER_AGENT2 _IOWR(RDMA_IOCTL_MAGIC, 0x04, struct ib_user_mad_reg_req2) + +/* HFI specific section */ +/* allocate HFI and context */ +#define HFI1_IOCTL_ASSIGN_CTXT _IOWR(RDMA_IOCTL_MAGIC, 0xE1, struct hfi1_user_info) +/* find out what resources we got */ +#define HFI1_IOCTL_CTXT_INFO _IOW(RDMA_IOCTL_MAGIC, 0xE2, struct hfi1_ctxt_info) +/* set up userspace */ +#define HFI1_IOCTL_USER_INFO _IOW(RDMA_IOCTL_MAGIC, 0xE3, struct hfi1_base_info) +/* update expected TID entries */ +#define HFI1_IOCTL_TID_UPDATE _IOWR(RDMA_IOCTL_MAGIC, 0xE4, struct hfi1_tid_info) +/* free expected TID entries */ +#define HFI1_IOCTL_TID_FREE _IOWR(RDMA_IOCTL_MAGIC, 0xE5, struct hfi1_tid_info) +/* force an update of PIO credit */ +#define HFI1_IOCTL_CREDIT_UPD _IO(RDMA_IOCTL_MAGIC, 0xE6) +/* control receipt of packets */ +#define HFI1_IOCTL_RECV_CTRL _IOW(RDMA_IOCTL_MAGIC, 0xE8, int) +/* set the kind of polling we want */ +#define HFI1_IOCTL_POLL_TYPE _IOW(RDMA_IOCTL_MAGIC, 0xE9, int) +/* ack & clear user status bits */ +#define HFI1_IOCTL_ACK_EVENT _IOW(RDMA_IOCTL_MAGIC, 0xEA, unsigned long) +/* set context's pkey */ +#define HFI1_IOCTL_SET_PKEY _IOW(RDMA_IOCTL_MAGIC, 0xEB, __u16) +/* reset context's HW send context */ +#define HFI1_IOCTL_CTXT_RESET _IO(RDMA_IOCTL_MAGIC, 0xEC) +/* read TID cache invalidations */ +#define HFI1_IOCTL_TID_INVAL_READ _IOWR(RDMA_IOCTL_MAGIC, 0xED, struct hfi1_tid_info) +/* get the version of the user cdev */ +#define HFI1_IOCTL_GET_VERS _IOR(RDMA_IOCTL_MAGIC, 0xEE, int) + +#endif /* RDMA_USER_IOCTL_H */ diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h index 0098a522d9f4..ef8e2a8ad0af 100644 --- a/include/video/exynos5433_decon.h +++ b/include/video/exynos5433_decon.h @@ -89,6 +89,8 @@ #define VIDCON0_ENVID_F (1 << 0) /* VIDOUTCON0 */ +#define VIDOUT_INTERLACE_FIELD_F (1 << 29) +#define VIDOUT_INTERLACE_EN_F (1 << 28) #define VIDOUT_LCD_ON (1 << 24) #define VIDOUT_IF_F_MASK (0x3 << 20) #define VIDOUT_RGB_IF (0x0 << 20) diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h index 95251512e2c4..44b587b49904 100644 --- a/include/xen/arm/hypervisor.h +++ b/include/xen/arm/hypervisor.h @@ -18,7 +18,7 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return PARAVIRT_LAZY_NONE; } -extern struct dma_map_ops *xen_dma_ops; +extern const struct dma_map_ops *xen_dma_ops; #ifdef CONFIG_XEN void __init xen_early_init(void); diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h index 56806bc90c2f..7fb7112d667c 100644 --- a/include/xen/interface/grant_table.h +++ b/include/xen/interface/grant_table.h @@ -181,7 +181,7 @@ struct grant_entry_header { }; /* - * Version 2 of the grant entry structure, here is an union because three + * Version 2 of the grant entry structure, here is a union because three * different types are suppotted: full_page, sub_page and transitive. */ union grant_entry_v2 { diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index a0083be5d529..1f6d78f044b6 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -2,6 +2,7 @@ #define __LINUX_SWIOTLB_XEN_H #include <linux/dma-direction.h> +#include <linux/scatterlist.h> #include <linux/swiotlb.h> extern int xen_swiotlb_init(int verbose, bool early); @@ -55,4 +56,14 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask); extern int xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask); + +extern int +xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); + +extern int +xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, + unsigned long attrs); #endif /* __LINUX_SWIOTLB_XEN_H */ |