diff options
author | David S. Miller <davem@davemloft.net> | 2016-07-06 10:35:22 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-07-06 10:35:22 -0700 |
commit | 30d0844bdcea9fb8b0b3c8abfa5547bc3bcf8baa (patch) | |
tree | 87302af9e03ee50cf135cc9ce6589f41fe3b3db1 | |
parent | ae3e4562e2ce0149a4424c994a282955700711e7 (diff) | |
parent | bc86765181aa26cc9afcb0a6f9f253cbb1186f26 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/usb/r8152.c
All three conflicts were overlapping changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
154 files changed, 1455 insertions, 618 deletions
@@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 7 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc6 NAME = Psychotic Stoned Sheep # *DOCUMENTATION* diff --git a/arch/arc/Makefile b/arch/arc/Makefile index d4df6be66d58..85814e74677d 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -66,8 +66,6 @@ endif endif -cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables - # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok ifeq ($(atleast_gcc48),y) cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2 diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index e0efff15a5ae..b9192a653b7e 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, * prelogue is setup (callee regs saved and then fp set and not other * way around */ - pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); + pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); return 0; #endif diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 893941ec98dc..f1bde7c4e736 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -263,6 +263,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kvm_timer_vcpu_terminate(vcpu); kvm_vgic_vcpu_destroy(vcpu); kvm_pmu_vcpu_destroy(vcpu); + kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); } diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index a6b611f1da43..f53816744d60 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -24,7 +24,7 @@ struct mm_struct; struct vm_area_struct; #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \ - _CACHE_CACHABLE_NONCOHERENT) + _page_cachable_default) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ _page_cachable_default) #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \ @@ -476,7 +476,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); pte.pte_high &= (_PFN_MASK | _CACHE_MASK); pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; - pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; + pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); return pte; } #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) @@ -491,7 +491,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #else static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | + (pgprot_val(newprot) & ~_PAGE_CHG_MASK)); } #endif @@ -632,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd) static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { - pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); + pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | + (pgprot_val(newprot) & ~_PAGE_CHG_MASK); return pmd; } diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 88a5ecaa157b..ab84c89c9e98 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -230,6 +230,7 @@ extern unsigned long __kernel_virt_size; #define KERN_VIRT_SIZE __kernel_virt_size extern struct page *vmemmap; extern unsigned long ioremap_bot; +extern unsigned long pci_io_base; #endif /* __ASSEMBLY__ */ #include <asm/book3s/64/hash.h> diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index b5f73cb5eeb6..d70101e1e25c 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -647,7 +647,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, pci_unlock_rescan_remove(); } } else if (frozen_bus) { - eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data); + eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data); } /* diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 3759df52bd67..a5ae49a2dcc4 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -47,7 +47,6 @@ static int __init pcibios_init(void) printk(KERN_INFO "PCI: Probing PCI hardware\n"); - pci_io_base = ISA_IO_BASE; /* For now, override phys_mem_access_prot. If we need it,g * later, we may move that initialization to each ppc_md */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index e2f12cbcade9..0b93893424f5 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1505,6 +1505,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) current->thread.regs = regs - 1; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* + * Clear any transactional state, we're exec()ing. The cause is + * not important as there will never be a recheckpoint so it's not + * user visible. + */ + if (MSR_TM_SUSPENDED(mfmsr())) + tm_reclaim_current(0); +#endif + memset(regs->gpr, 0, sizeof(regs->gpr)); regs->ctr = 0; regs->link = 0; diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index bf8f34a58670..b7019b559ddb 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim) std r3, STK_PARAM(R3)(r1) SAVE_NVGPRS(r1) - /* We need to setup MSR for VSX register save instructions. Here we - * also clear the MSR RI since when we do the treclaim, we won't have a - * valid kernel pointer for a while. We clear RI here as it avoids - * adding another mtmsr closer to the treclaim. This makes the region - * maked as non-recoverable wider than it needs to be but it saves on - * inserting another mtmsrd later. - */ + /* We need to setup MSR for VSX register save instructions. */ mfmsr r14 mr r15, r14 ori r15, r15, MSR_FP - li r16, MSR_RI + li r16, 0 ori r16, r16, MSR_EE /* IRQs hard off */ andc r15, r15, r16 oris r15, r15, MSR_VEC@h @@ -176,7 +170,17 @@ dont_backup_fp: 1: tdeqi r6, 0 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 - /* The moment we treclaim, ALL of our GPRs will switch + /* Clear MSR RI since we are about to change r1, EE is already off. */ + li r4, 0 + mtmsrd r4, 1 + + /* + * BE CAREFUL HERE: + * At this point we can't take an SLB miss since we have MSR_RI + * off. Load only to/from the stack/paca which are in SLB bolted regions + * until we turn MSR RI back on. + * + * The moment we treclaim, ALL of our GPRs will switch * to user register state. (FPRs, CCR etc. also!) * Use an sprg and a tm_scratch in the PACA to shuffle. */ @@ -197,6 +201,11 @@ dont_backup_fp: /* Store the PPR in r11 and reset to decent value */ std r11, GPR11(r1) /* Temporary stash */ + + /* Reset MSR RI so we can take SLB faults again */ + li r11, MSR_RI + mtmsrd r11, 1 + mfspr r11, SPRN_PPR HMT_MEDIUM @@ -397,11 +406,6 @@ restore_gprs: ld r5, THREAD_TM_DSCR(r3) ld r6, THREAD_TM_PPR(r3) - /* Clear the MSR RI since we are about to change R1. EE is already off - */ - li r4, 0 - mtmsrd r4, 1 - REST_GPR(0, r7) /* GPR0 */ REST_2GPRS(2, r7) /* GPR2-3 */ REST_GPR(4, r7) /* GPR4 */ @@ -439,10 +443,33 @@ restore_gprs: ld r6, _CCR(r7) mtcr r6 - REST_GPR(1, r7) /* GPR1 */ - REST_GPR(5, r7) /* GPR5-7 */ REST_GPR(6, r7) - ld r7, GPR7(r7) + + /* + * Store r1 and r5 on the stack so that we can access them + * after we clear MSR RI. + */ + + REST_GPR(5, r7) + std r5, -8(r1) + ld r5, GPR1(r7) + std r5, -16(r1) + + REST_GPR(7, r7) + + /* Clear MSR RI since we are about to change r1. EE is already off */ + li r5, 0 + mtmsrd r5, 1 + + /* + * BE CAREFUL HERE: + * At this point we can't take an SLB miss since we have MSR_RI + * off. Load only to/from the stack/paca which are in SLB bolted regions + * until we turn MSR RI back on. + */ + + ld r5, -8(r1) + ld r1, -16(r1) /* Commit register state as checkpointed state: */ TRECHKPT diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 5b22ba0b58bc..2971ea18c768 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -922,6 +922,10 @@ void __init hash__early_init_mmu(void) vmemmap = (struct page *)H_VMEMMAP_BASE; ioremap_bot = IOREMAP_BASE; +#ifdef CONFIG_PCI + pci_io_base = ISA_IO_BASE; +#endif + /* Initialize the MMU Hash table and create the linear mapping * of memory. Has to be done before SLB initialization as this is * currently where the page size encoding is obtained. diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index e58707deef5c..7931e1496f0d 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -328,6 +328,11 @@ void __init radix__early_init_mmu(void) __vmalloc_end = RADIX_VMALLOC_END; vmemmap = (struct page *)RADIX_VMEMMAP_BASE; ioremap_bot = IOREMAP_BASE; + +#ifdef CONFIG_PCI + pci_io_base = ISA_IO_BASE; +#endif + /* * For now radix also use the same frag size */ diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index fdcc04020636..7c1c89598688 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -69,29 +69,22 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) } static __always_inline -u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src) -{ - u64 delta = rdtsc_ordered() - src->tsc_timestamp; - return pvclock_scale_delta(delta, src->tsc_to_system_mul, - src->tsc_shift); -} - -static __always_inline unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, cycle_t *cycles, u8 *flags) { unsigned version; - cycle_t ret, offset; - u8 ret_flags; + cycle_t offset; + u64 delta; version = src->version; + /* Make the latest version visible */ + smp_rmb(); - offset = pvclock_get_nsec_offset(src); - ret = src->system_time + offset; - ret_flags = src->flags; - - *cycles = ret; - *flags = ret_flags; + delta = rdtsc_ordered() - src->tsc_timestamp; + offset = pvclock_scale_delta(delta, src->tsc_to_system_mul, + src->tsc_shift); + *cycles = src->system_time + offset; + *flags = src->flags; return version; } diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 99bfc025111d..06c58ce46762 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -61,11 +61,16 @@ void pvclock_resume(void) u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src) { unsigned version; - cycle_t ret; u8 flags; do { - version = __pvclock_read_cycles(src, &ret, &flags); + version = src->version; + /* Make the latest version visible */ + smp_rmb(); + + flags = src->flags; + /* Make sure that the version double-check is last. */ + smp_rmb(); } while ((src->version & 1) || version != src->version); return flags & valid_flags; @@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) do { version = __pvclock_read_cycles(src, &ret, &flags); + /* Make sure that the version double-check is last. */ + smp_rmb(); } while ((src->version & 1) || version != src->version); if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) { diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index bbb5b283ff63..a397200281c1 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1310,7 +1310,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ if (guest_tsc < tsc_deadline) - __delay(tsc_deadline - guest_tsc); + __delay(min(tsc_deadline - guest_tsc, + nsec_to_cycles(vcpu, lapic_timer_advance_ns))); } static void start_apic_timer(struct kvm_lapic *apic) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 003618e324ce..64a79f271276 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6671,7 +6671,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, /* Checks for #GP/#SS exceptions. */ exn = false; - if (is_protmode(vcpu)) { + if (is_long_mode(vcpu)) { + /* Long mode: #GP(0)/#SS(0) if the memory address is in a + * non-canonical form. This is the only check on the memory + * destination for long mode! + */ + exn = is_noncanonical_address(*ret); + } else if (is_protmode(vcpu)) { /* Protected mode: apply checks for segment validity in the * following order: * - segment type check (#GP(0) may be thrown) @@ -6688,17 +6694,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, * execute-only code segment */ exn = ((s.type & 0xa) == 8); - } - if (exn) { - kvm_queue_exception_e(vcpu, GP_VECTOR, 0); - return 1; - } - if (is_long_mode(vcpu)) { - /* Long mode: #GP(0)/#SS(0) if the memory address is in a - * non-canonical form. This is an only check for long mode. - */ - exn = is_noncanonical_address(*ret); - } else if (is_protmode(vcpu)) { + if (exn) { + kvm_queue_exception_e(vcpu, GP_VECTOR, 0); + return 1; + } /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. */ exn = (s.unusable != 0); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 902d9da12392..7da5dd2057a9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1244,12 +1244,6 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); static unsigned long max_tsc_khz; -static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) -{ - return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, - vcpu->arch.virtual_tsc_shift); -} - static u32 adjust_tsc_khz(u32 khz, s32 ppm) { u64 v = (u64)khz * (1000000 + ppm); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 7ce3634ab5fe..a82ca466b62e 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -2,6 +2,7 @@ #define ARCH_X86_KVM_X86_H #include <linux/kvm_host.h> +#include <asm/pvclock.h> #include "kvm_cache_regs.h" #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL @@ -195,6 +196,12 @@ extern unsigned int lapic_timer_advance_ns; extern struct static_key kvm_no_apic_vcpu; +static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) +{ + return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, + vcpu->arch.virtual_tsc_shift); +} + /* Same "calling convention" as do_div: * - divide (n << 32) by base * - put result in n diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index 2215fc847fa9..ac6ddcc080d4 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -928,7 +928,7 @@ static ssize_t format_show(struct device *dev, { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code)); + return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); } static DEVICE_ATTR_RO(format); @@ -961,8 +961,8 @@ static ssize_t format1_show(struct device *dev, continue; if (nfit_dcr->dcr->code == dcr->code) continue; - rc = sprintf(buf, "%#x\n", - be16_to_cpu(nfit_dcr->dcr->code)); + rc = sprintf(buf, "0x%04x\n", + le16_to_cpu(nfit_dcr->dcr->code)); break; } if (rc != ENXIO) @@ -1131,11 +1131,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, /* * Until standardization materializes we need to consider up to 3 - * different command sets. Note, that checking for function0 (bit0) - * tells us if any commands are reachable through this uuid. + * different command sets. Note, that checking for zero functions + * tells us if any commands might be reachable through this uuid. */ for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++) - if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) + if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 0)) break; /* limit the supported commands to those that are publicly documented */ diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 11cb38348aef..02b9ea1e8d2e 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -53,12 +53,12 @@ enum nfit_uuids { }; /* - * Region format interface codes are stored as an array of bytes in the - * NFIT DIMM Control Region structure + * Region format interface codes are stored with the interface as the + * LSB and the function as the MSB. */ -#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */ -#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */ -#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */ +#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */ +#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */ +#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */ enum { NFIT_BLK_READ_FLUSH = 1, diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 8fc7323ed3e8..4ed4061813e6 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -839,7 +839,7 @@ void acpi_penalize_isa_irq(int irq, int active) { if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty))) acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) + - active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING; + (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); } bool acpi_isa_irq_available(int irq) diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 22c09952e177..b4de130f2d57 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -680,9 +680,6 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs) u64 mask = 0; union acpi_object *obj; - if (funcs == 0) - return false; - obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL); if (!obj) return false; @@ -695,6 +692,9 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs) mask |= (((u64)obj->buffer.pointer[i]) << (i * 8)); ACPI_FREE(obj); + if (funcs == 0) + return true; + /* * Bit 0 indicates whether there's support for any functions other than * function 0 for the specified UUID and revision. diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c index efba7d4dbcfc..79bcb2e42060 100644 --- a/drivers/clk/clk-oxnas.c +++ b/drivers/clk/clk-oxnas.c @@ -144,9 +144,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev) return -ENOMEM; regmap = syscon_node_to_regmap(of_get_parent(np)); - if (!regmap) { + if (IS_ERR(regmap)) { dev_err(&pdev->dev, "failed to have parent regmap\n"); - return -EINVAL; + return PTR_ERR(regmap); } for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) { diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c index 4bb130cd0062..05b3d73bfefa 100644 --- a/drivers/clk/rockchip/clk-cpu.c +++ b/drivers/clk/rockchip/clk-cpu.c @@ -321,9 +321,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name, } cclk = clk_register(NULL, &cpuclk->hw); - if (IS_ERR(clk)) { + if (IS_ERR(cclk)) { pr_err("%s: could not register cpuclk %s\n", __func__, name); - ret = PTR_ERR(clk); + ret = PTR_ERR(cclk); goto free_rate_table; } diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index bc856f21f6b2..077fcdc7908b 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -41,8 +41,6 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw, #define ROCKCHIP_MMC_DEGREE_MASK 0x3 #define ROCKCHIP_MMC_DELAYNUM_OFFSET 2 #define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET) -#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1 -#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1 #define PSECS_PER_SEC 1000000000000LL @@ -154,6 +152,7 @@ struct clk *rockchip_clk_register_mmc(const char *name, return ERR_PTR(-ENOMEM); init.name = name; + init.flags = 0; init.num_parents = num_parents; init.parent_names = parent_names; init.ops = &rockchip_mmc_clk_ops; @@ -162,15 +161,6 @@ struct clk *rockchip_clk_register_mmc(const char *name, mmc_clock->reg = reg; mmc_clock->shift = shift; - /* - * Assert init_state to soft reset the CLKGEN - * for mmc tuning phase and degree - */ - if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT) - writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET, - ROCKCHIP_MMC_INIT_STATE_RESET, - mmc_clock->shift), mmc_clock->reg); - clk = clk_register(NULL, &mmc_clock->hw); if (IS_ERR(clk)) kfree(mmc_clock); diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c index 291543f52caa..8059a8d3ea36 100644 --- a/drivers/clk/rockchip/clk-rk3399.c +++ b/drivers/clk/rockchip/clk-rk3399.c @@ -832,9 +832,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(13), 1, GFLAGS), /* perihp */ - GATE(0, "cpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED, + GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(5), 0, GFLAGS), - GATE(0, "gpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED, + GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(5), 1, GFLAGS), COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED, RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS, @@ -1466,6 +1466,8 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = { static const char *const rk3399_cru_critical_clocks[] __initconst = { "aclk_cci_pre", + "aclk_gic", + "aclk_gic_noc", "pclk_perilp0", "pclk_perilp0", "hclk_perilp0", @@ -1508,6 +1510,7 @@ static void __init rk3399_clk_init(struct device_node *np) ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); + iounmap(reg_base); return; } @@ -1553,6 +1556,7 @@ static void __init rk3399_pmu_clk_init(struct device_node *np) ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS); if (IS_ERR(ctx)) { pr_err("%s: rockchip pmu clk init failed\n", __func__); + iounmap(reg_base); return; } diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 3646b143bbf5..0bb44d5b5df4 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -79,15 +79,16 @@ static const struct of_device_id machines[] __initconst = { static int __init cpufreq_dt_platdev_init(void) { struct device_node *np = of_find_node_by_path("/"); + const struct of_device_id *match; if (!np) return -ENODEV; - if (!of_match_node(machines, np)) + match = of_match_node(machines, np); + of_node_put(np); + if (!match) return -ENODEV; - of_node_put(of_root); - return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1, NULL, 0)); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 9009295f5134..5617c7087d77 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2261,6 +2261,10 @@ int cpufreq_update_policy(unsigned int cpu) * -> ask driver for current freq and notify governors about a change */ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { + if (cpufreq_suspended) { + ret = -EAGAIN; + goto unlock; + } new_policy.cur = cpufreq_update_current_freq(policy); if (WARN_ON(!new_policy.cur)) { ret = -EIO; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index fe9dc17ea873..1fa1a32928d7 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1400,6 +1400,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num) { struct cpudata *cpu = all_cpu_data[cpu_num]; + if (cpu->update_util_set) + return; + /* Prevent intel_pstate_update_util() from using stale data. */ cpu->sample.time = 0; cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, @@ -1440,8 +1443,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) if (!policy->cpuinfo.max_freq) return -ENODEV; - intel_pstate_clear_update_util_hook(policy->cpu); - pr_debug("set_policy cpuinfo.max %u policy->max %u\n", policy->cpuinfo.max_freq, policy->max); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e19520c4b4b6..d9c88d13f8db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1106,6 +1106,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) if (fences == 0 && handles == 0) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, false); + /* just work around for uvd clock remain high even + * when uvd dpm disabled on Polaris10 */ + if (adev->asic_type == CHIP_POLARIS10) + amdgpu_asic_set_uvd_clocks(adev, 0, 0); } else { amdgpu_asic_set_uvd_clocks(adev, 0, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 1a5cbaff1e34..b2ebd4fef6cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -47,6 +47,8 @@ #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" +#include "smu/smu_7_1_3_d.h" + #define GFX8_NUM_GFX_RINGS 1 #define GFX8_NUM_COMPUTE_RINGS 8 @@ -693,6 +695,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, polaris10_golden_common_all, (const u32)ARRAY_SIZE(polaris10_golden_common_all)); + WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C); break; case CHIP_CARRIZO: amdgpu_program_register_sequence(adev, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index 64ee78f7d41e..ec2a7ada346a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -98,6 +98,7 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) +#define CEILING_UCHAR(double) ((double-(uint8_t)(double)) > 0 ? (uint8_t)(double+1) : (uint8_t)(double)) static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] = { {600, 1050, 3, 0}, {600, 1050, 6, 1} }; @@ -1422,22 +1423,19 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; - if (!data->sclk_dpm_key_disabled) { - /* Get MinVoltage and Frequency from DPM0, - * already converted to SMC_UL */ - sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value; - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_sclk, - table->ACPILevel.SclkFrequency, - &table->ACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDC voltage value " - "in Clock Dependency Table", ); - } else { - sclk_frequency = data->vbios_boot_state.sclk_bootup_value; - table->ACPILevel.MinVoltage = - data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE; - } + + /* Get MinVoltage and Frequency from DPM0, + * already converted to SMC_UL */ + sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value; + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, + sclk_frequency, + &table->ACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDC voltage value " + "in Clock Dependency Table", + ); + result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting)); PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result); @@ -1462,24 +1460,18 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); - if (!data->mclk_dpm_key_disabled) { - /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ - table->MemoryACPILevel.MclkFrequency = - data->dpm_table.mclk_table.dpm_levels[0].value; - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_mclk, - table->MemoryACPILevel.MclkFrequency, - &table->MemoryACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDCI voltage value " - "in Clock Dependency Table", - ); - } else { - table->MemoryACPILevel.MclkFrequency = - data->vbios_boot_state.mclk_bootup_value; - table->MemoryACPILevel.MinVoltage = - data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE; - } + + /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ + table->MemoryACPILevel.MclkFrequency = + data->dpm_table.mclk_table.dpm_levels[0].value; + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, + table->MemoryACPILevel.MclkFrequency, + &table->MemoryACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDCI voltage value " + "in Clock Dependency Table", + ); us_mvdd = 0; if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) || @@ -1524,6 +1516,7 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = table_info->mm_dep_table; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + uint32_t vddci; table->VceLevelCount = (uint8_t)(mm_table->count); table->VceBootLevel = 0; @@ -1533,9 +1526,18 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr, table->VceLevel[count].MinVoltage = 0; table->VceLevel[count].MinVoltage |= (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->VceLevel[count].MinVoltage |= - ((mm_table->entries[count].vddc - data->vddc_vddci_delta) * - VOLTAGE_SCALE) << VDDCI_SHIFT; + (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; /*retrieve divider value for VBIOS */ @@ -1564,6 +1566,7 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = table_info->mm_dep_table; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + uint32_t vddci; table->SamuBootLevel = 0; table->SamuLevelCount = (uint8_t)(mm_table->count); @@ -1574,8 +1577,16 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr, table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; + + if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; /* retrieve divider value for VBIOS */ @@ -1658,6 +1669,7 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = table_info->mm_dep_table; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + uint32_t vddci; table->UvdLevelCount = (uint8_t)(mm_table->count); table->UvdBootLevel = 0; @@ -1668,8 +1680,16 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; + + if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; /* retrieve divider value for VBIOS */ @@ -1690,8 +1710,8 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); - } + return result; } @@ -1787,24 +1807,32 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) ro = efuse * (max -min)/255 + min; - /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset + * there is a little difference in calculating + * volt_with_cks with windows */ for (i = 0; i < sclk_table->count; i++) { data->smc_state_table.Sclk_CKS_masterEn0_7 |= sclk_table->entries[i].cks_enable << i; - - volt_without_cks = (uint32_t)(((ro - 40) * 1000 - 2753594 - sclk_table->entries[i].clk/100 * 136418 /1000) / \ - (sclk_table->entries[i].clk/100 * 1132925 /10000 - 242418)/100); - - volt_with_cks = (uint32_t)((ro * 1000 -2396351 - sclk_table->entries[i].clk/100 * 329021/1000) / \ - (sclk_table->entries[i].clk/10000 * 649434 /1000 - 18005)/10); + if (hwmgr->chip_id == CHIP_POLARIS10) { + volt_without_cks = (uint32_t)((2753594000 + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \ + (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); + volt_with_cks = (uint32_t)((279720200 + sclk_table->entries[i].clk * 3232 - (ro - 65) * 100000000) / \ + (252248000 - sclk_table->entries[i].clk/100 * 115764)); + } else { + volt_without_cks = (uint32_t)((2416794800 + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \ + (2625416 - (sclk_table->entries[i].clk/100) * 12586807/10000)); + volt_with_cks = (uint32_t)((2999656000 + sclk_table->entries[i].clk * 392803/100 - (ro - 44) * 1000000) / \ + (3422454 - sclk_table->entries[i].clk/100 * 18886376/10000)); + } if (volt_without_cks >= volt_with_cks) - volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + - sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); + volt_offset = (uint8_t)CEILING_UCHAR((volt_without_cks - volt_with_cks + + sclk_table->entries[i].cks_voffset) * 100 / 625); data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; } + data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; /* Populate CKS Lookup Table */ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) stretch_amount2 = 0; @@ -2487,6 +2515,8 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable VR hot GPIO interrupt!", result = tmp_result); + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay); + tmp_result = polaris10_enable_sclk_control(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable SCLK control!", result = tmp_result); @@ -2913,6 +2943,31 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) return 0; } +int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = + table_info->vdd_dep_on_mclk; + struct phm_ppt_v1_voltage_lookup_table *lookup_table = + table_info->vddc_lookup_table; + uint32_t i; + + if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) { + if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) + return 0; + + for (i = 0; i < lookup_table->count; i++) { + if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { + dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; + return 0; + } + } + } + return 0; +} + + int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -2990,6 +3045,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) polaris10_set_features_platform_caps(hwmgr); + polaris10_patch_voltage_workaround(hwmgr); polaris10_init_dpm_defaults(hwmgr); /* Get leakage voltage based on leakage ID. */ @@ -4359,6 +4415,15 @@ static int polaris10_notify_link_speed_change_after_state_change( return 0; } +static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); + return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; +} + static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) { int tmp_result, result = 0; @@ -4407,6 +4472,11 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i "Failed to program memory timing parameters!", result = tmp_result); + tmp_result = polaris10_notify_smc_display(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to notify smc display settings!", + result = tmp_result); + tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", @@ -4441,6 +4511,7 @@ static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_ PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); } + int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) { PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; @@ -4460,8 +4531,6 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ polaris10_notify_smc_display_change(hwmgr, false); - else - polaris10_notify_smc_display_change(hwmgr, true); return 0; } @@ -4502,6 +4571,8 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr) frame_time_in_us = 1000000 / refresh_rate; pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; + data->frame_time_x2 = frame_time_in_us * 2 / 100; + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); @@ -4510,8 +4581,6 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); - polaris10_notify_smc_display_change(hwmgr, num_active_displays != 0); - return 0; } @@ -4623,7 +4692,7 @@ int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr) return 0; } - data->need_long_memory_training = true; + data->need_long_memory_training = false; /* * PPMCME_FirmwareDescriptorEntry *pfd = NULL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h index d717789441f5..afc3434822d1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h @@ -315,6 +315,7 @@ struct polaris10_hwmgr { uint32_t avfs_vdroop_override_setting; bool apply_avfs_cks_off_voltage; + uint32_t frame_time_x2; }; /* To convert to Q8.8 format for firmware */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 28f571449495..77e8e33d5870 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -411,6 +411,8 @@ struct phm_cac_tdp_table { uint8_t ucVr_I2C_Line; uint8_t ucPlx_I2C_address; uint8_t ucPlx_I2C_Line; + uint32_t usBoostPowerLimit; + uint8_t ucCKS_LDO_REFSEL; }; struct phm_ppm_table { diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h index d41d37ab5b7c..b8f4b73c322e 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h @@ -392,6 +392,8 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300) #define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301) +#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306) + #define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600) #define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601) #define PPSMC_MSG_SetAddress ((uint16_t) 0x800) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h index b85ff5400e57..899d6d8108c2 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h @@ -270,7 +270,8 @@ struct SMU74_Discrete_DpmTable { uint8_t BootPhases; uint8_t VRHotLevel; - uint8_t Reserved1[3]; + uint8_t LdoRefSel; + uint8_t Reserved1[2]; uint16_t FanStartTemperature; uint16_t FanStopTemperature; uint16_t MaxVoltage; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 32690332d441..103546834b60 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2365,16 +2365,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) task = get_pid_task(file->pid, PIDTYPE_PID); if (!task) { ret = -ESRCH; - goto out_put; + goto out_unlock; } seq_printf(m, "\nproc: %s\n", task->comm); put_task_struct(task); idr_for_each(&file_priv->context_idr, per_file_ctx, (void *)(unsigned long)m); } +out_unlock: mutex_unlock(&dev->filelist_mutex); -out_put: intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 56a1637c864f..04452cf3eae8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8447,16 +8447,16 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) tmp |= FDI_MPHY_IOSFSB_RESET_CTL; I915_WRITE(SOUTH_CHICKEN2, tmp); - if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS, 100)) + if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS, 100)) DRM_ERROR("FDI mPHY reset assert timeout\n"); tmp = I915_READ(SOUTH_CHICKEN2); tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; I915_WRITE(SOUTH_CHICKEN2, tmp); - if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) + if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) DRM_ERROR("FDI mPHY reset de-assert timeout\n"); } @@ -9440,8 +9440,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, val |= LCPLL_CD_SOURCE_FCLK; I915_WRITE(LCPLL_CTL, val); - if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE, 1)) + if (wait_for_us(I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE, 1)) DRM_ERROR("Switching to FCLK failed\n"); val = I915_READ(LCPLL_CTL); @@ -9514,8 +9514,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) val &= ~LCPLL_CD_SOURCE_FCLK; I915_WRITE(LCPLL_CTL, val); - if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) + if (wait_for_us((I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) DRM_ERROR("Switching back to LCPLL failed\n"); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 79cf2d5f5a20..40745e38d438 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -663,7 +663,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, msecs_to_jiffies_timeout(10)); else - done = wait_for_atomic(C, 10) == 0; + done = wait_for(C, 10) == 0; if (!done) DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", has_aux_irq); @@ -4899,13 +4899,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) void intel_dp_encoder_reset(struct drm_encoder *encoder) { - struct intel_dp *intel_dp; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (!HAS_DDI(dev_priv)) + intel_dp->DP = I915_READ(intel_dp->output_reg); if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP) return; - intel_dp = enc_to_intel_dp(encoder); - pps_lock(intel_dp); /* diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index baf6f5584cbd..58f60b27837e 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -1377,8 +1377,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); POSTING_READ(BXT_PORT_PLL_ENABLE(port)); - if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & - PORT_PLL_LOCK), 200)) + if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), + 200)) DRM_ERROR("PLL %d not locked\n", port); /* diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 923f56598d4b..3a9f106787d2 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro) mutex_lock(&st->buf_lock); ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); - if (ret) + if (ret < 0) goto error_ret; st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C); st->tx[1] = (ret & ~KXSD9_FS_MASK) | i; @@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_SCALE: ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); - if (ret) + if (ret < 0) goto error_ret; *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; ret = IIO_VAL_INT_PLUS_MICRO; diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index 21e19b60e2b9..2123f0ac2e2a 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi) st = iio_priv(indio_dev); - st->reg = devm_regulator_get(&spi->dev, "vref"); - if (!IS_ERR_OR_NULL(st->reg)) { + st->reg = devm_regulator_get_optional(&spi->dev, "vref"); + if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) return ret; @@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi) st->vref_mv = ret / 1000; } else { + /* Any other error indicates that the regulator does exist */ + if (PTR_ERR(st->reg) != -ENODEV) + return PTR_ERR(st->reg); /* Use internal reference */ st->vref_mv = 2500; } diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c index f62b8bd9ad7e..dd6fc6d21f9d 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c @@ -56,6 +56,7 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev, int i; acpi_status status; union acpi_object *cpm; + int ret; status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer); if (ACPI_FAILURE(status)) @@ -82,10 +83,10 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev, } } } - + ret = cpm->package.count; kfree(buffer.pointer); - return cpm->package.count; + return ret; } static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data) diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 9e0034196e10..d091defc3426 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1107,13 +1107,13 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; } + devid = e->devid; DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n", hid, uid, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid)); - devid = e->devid; flags = e->flags; ret = add_acpi_hid_device(hid, uid, &devid, false); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 10700945994e..cfe410eedaf0 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4607,7 +4607,7 @@ static void free_all_cpu_cached_iovas(unsigned int cpu) if (!iommu) continue; - for (did = 0; did < 0xffff; did++) { + for (did = 0; did < cap_ndoms(iommu->cap); did++) { domain = get_iommu_domain(iommu, did); if (!domain) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index ba764a0835d3..e23001bfcfee 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -420,8 +420,10 @@ retry: /* Try replenishing IOVAs by flushing rcache. */ flushed_rcache = true; + preempt_disable(); for_each_online_cpu(cpu) free_cpu_cached_iovas(cpu, iovad); + preempt_enable(); goto retry; } @@ -749,7 +751,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, bool can_insert = false; unsigned long flags; - cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); + cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); spin_lock_irqsave(&cpu_rcache->lock, flags); if (!iova_magazine_full(cpu_rcache->loaded)) { @@ -779,6 +781,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, iova_magazine_push(cpu_rcache->loaded, iova_pfn); spin_unlock_irqrestore(&cpu_rcache->lock, flags); + put_cpu_ptr(rcache->cpu_rcaches); if (mag_to_free) { iova_magazine_free_pfns(mag_to_free, iovad); @@ -812,7 +815,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, bool has_pfn = false; unsigned long flags; - cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); + cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); spin_lock_irqsave(&cpu_rcache->lock, flags); if (!iova_magazine_empty(cpu_rcache->loaded)) { @@ -834,6 +837,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); spin_unlock_irqrestore(&cpu_rcache->lock, flags); + put_cpu_ptr(rcache->cpu_rcaches); return iova_pfn; } diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c index 199d261990be..f32fbb8e8129 100644 --- a/drivers/mfd/max77620.c +++ b/drivers/mfd/max77620.c @@ -203,6 +203,7 @@ static int max77620_get_fps_period_reg_value(struct max77620_chip *chip, break; case MAX77620: fps_min_period = MAX77620_FPS_PERIOD_MIN_US; + break; default: return -EINVAL; } @@ -236,6 +237,7 @@ static int max77620_config_fps(struct max77620_chip *chip, break; case MAX77620: fps_max_period = MAX77620_FPS_PERIOD_MAX_US; + break; default: return -EINVAL; } diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index ca81f46ea1aa..edc70ffad660 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -101,11 +101,14 @@ enum ad_link_speed_type { #define MAC_ADDRESS_EQUAL(A, B) \ ether_addr_equal_64bits((const u8 *)A, (const u8 *)B) -static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } }; +static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = { + 0, 0, 0, 0, 0, 0 +}; static u16 ad_ticks_per_sec; static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; -static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR; +static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = + MULTICAST_LACPDU_ADDR; /* ================= main 802.3ad protocol functions ================== */ static int ad_lacpdu_send(struct port *port); @@ -1739,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator) aggregator->is_individual = false; aggregator->actor_admin_aggregator_key = 0; aggregator->actor_oper_aggregator_key = 0; - aggregator->partner_system = null_mac_addr; + eth_zero_addr(aggregator->partner_system.mac_addr_value); aggregator->partner_system_priority = 0; aggregator->partner_oper_aggregator_key = 0; aggregator->receive_state = 0; @@ -1761,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator) if (aggregator) { ad_clear_agg(aggregator); - aggregator->aggregator_mac_address = null_mac_addr; + eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value); aggregator->aggregator_identifier = 0; aggregator->slave = NULL; } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c5ac160a8ae9..551f0f8dead3 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -42,13 +42,10 @@ -#ifndef __long_aligned -#define __long_aligned __attribute__((aligned((sizeof(long))))) -#endif -static const u8 mac_bcast[ETH_ALEN] __long_aligned = { +static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; -static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = { +static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 }; static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 480d73ac7d1b..b571ed9fd63d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1584,6 +1584,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } /* check for initial state */ + new_slave->link = BOND_LINK_NOCHANGE; if (bond->params.miimon) { if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { if (bond->params.updelay) { diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 834afbb51aff..b2d30863caeb 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -370,7 +370,7 @@ static void bcm_sysport_get_stats(struct net_device *dev, else p = (char *)priv; p += s->stat_offset; - data[i] = *(u32 *)p; + data[i] = *(unsigned long *)p; } } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index c4b262ca7d43..2accab386323 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,8 +36,8 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x0E -#define T4FW_VERSION_MICRO 0x04 +#define T4FW_VERSION_MINOR 0x0F +#define T4FW_VERSION_MICRO 0x25 #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -45,8 +45,8 @@ #define T4FW_MIN_VERSION_MICRO 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x0E -#define T5FW_VERSION_MICRO 0x04 +#define T5FW_VERSION_MINOR 0x0F +#define T5FW_VERSION_MICRO 0x25 #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -54,8 +54,8 @@ #define T5FW_MIN_VERSION_MICRO 0x00 #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x0E -#define T6FW_VERSION_MICRO 0x04 +#define T6FW_VERSION_MINOR 0x0F +#define T6FW_VERSION_MICRO 0x25 #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8294c9a10bd2..41f32c0b341e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -154,16 +154,6 @@ void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) writel(val, hw->hw_addr + reg); } -static bool e1000e_vlan_used(struct e1000_adapter *adapter) -{ - u16 vid; - - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - return true; - - return false; -} - /** * e1000_regdump - register printout routine * @hw: pointer to the HW structure @@ -3453,8 +3443,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev) ew32(RCTL, rctl); - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX || - e1000e_vlan_used(adapter)) + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) e1000e_vlan_strip_enable(adapter); else e1000e_vlan_strip_disable(adapter); @@ -6927,6 +6916,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev, if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) features &= ~NETIF_F_RXFCS; + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + return features; } diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index 61a80da8b6f0..2819abc454c7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) { struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = -IXGBE_ERR_MBX; + s32 ret_val = IXGBE_ERR_MBX; if (!mbx->ops.read) goto out; @@ -111,7 +111,7 @@ out: static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) { struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = -IXGBE_ERR_MBX; + s32 ret_val = IXGBE_ERR_MBX; /* exit if either we can't write or there isn't a defined timeout */ if (!mbx->ops.write || !mbx->timeout) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index a6d26d351dfc..d5d263bda333 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev) return 0; err_free_irq: + unregister_cpu_notifier(&pp->cpu_notifier); + on_each_cpu(mvneta_percpu_disable, pp, true); free_percpu_irq(pp->dev->irq, pp->ports); err_cleanup_txqs: mvneta_cleanup_txqs(pp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 0b4986268cc9..d6e2a1cae19a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DESTROY_FLOW_GROUP: case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: + case MLX5_CMD_OP_2ERR_QP: + case MLX5_CMD_OP_2RST_QP: + case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: + case MLX5_CMD_OP_MODIFY_FLOW_TABLE: + case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: + case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_RTR2RTS_QP: case MLX5_CMD_OP_RTS2RTS_QP: case MLX5_CMD_OP_SQERR2RTS_QP: - case MLX5_CMD_OP_2ERR_QP: - case MLX5_CMD_OP_2RST_QP: case MLX5_CMD_OP_QUERY_QP: case MLX5_CMD_OP_SQD_RTS_QP: case MLX5_CMD_OP_INIT2INIT_QP: @@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: - case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: case MLX5_CMD_OP_SET_ROCE_ADDRESS: case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: @@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_CREATE_RQT: case MLX5_CMD_OP_MODIFY_RQT: case MLX5_CMD_OP_QUERY_RQT: + case MLX5_CMD_OP_CREATE_FLOW_TABLE: case MLX5_CMD_OP_QUERY_FLOW_TABLE: case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_QUERY_FLOW_GROUP: - case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: + case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: @@ -602,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev, pr_debug("\n"); } +static u16 msg_to_opcode(struct mlx5_cmd_msg *in) +{ + struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); + + return be16_to_cpu(hdr->opcode); +} + +static void cb_timeout_handler(struct work_struct *work) +{ + struct delayed_work *dwork = container_of(work, struct delayed_work, + work); + struct mlx5_cmd_work_ent *ent = container_of(dwork, + struct mlx5_cmd_work_ent, + cb_timeout_work); + struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, + cmd); + + ent->ret = -ETIMEDOUT; + mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx); +} + static void cmd_work_handler(struct work_struct *work) { struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); struct mlx5_cmd *cmd = ent->cmd; struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); + unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); struct mlx5_cmd_layout *lay; struct semaphore *sem; unsigned long flags; @@ -647,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work) dump_command(dev, ent, 1); ent->ts1 = ktime_get_ns(); + if (ent->callback) + schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); + /* ring doorbell after the descriptor is valid */ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); @@ -691,13 +723,6 @@ static const char *deliv_status_to_str(u8 status) } } -static u16 msg_to_opcode(struct mlx5_cmd_msg *in) -{ - struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); - - return be16_to_cpu(hdr->opcode); -} - static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) { unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); @@ -706,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) if (cmd->mode == CMD_MODE_POLLING) { wait_for_completion(&ent->done); - err = ent->ret; - } else { - if (!wait_for_completion_timeout(&ent->done, timeout)) - err = -ETIMEDOUT; - else - err = 0; + } else if (!wait_for_completion_timeout(&ent->done, timeout)) { + ent->ret = -ETIMEDOUT; + mlx5_cmd_comp_handler(dev, 1UL << ent->idx); } + + err = ent->ret; + if (err == -ETIMEDOUT) { mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_command_str(msg_to_opcode(ent->in)), @@ -761,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, if (!callback) init_completion(&ent->done); + INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); INIT_WORK(&ent->work, cmd_work_handler); if (page_queue) { cmd_work_handler(&ent->work); @@ -770,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, goto out_free; } - if (!callback) { - err = wait_func(dev, ent); - if (err == -ETIMEDOUT) - goto out; - - ds = ent->ts2 - ent->ts1; - op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); - if (op < ARRAY_SIZE(cmd->stats)) { - stats = &cmd->stats[op]; - spin_lock_irq(&stats->lock); - stats->sum += ds; - ++stats->n; - spin_unlock_irq(&stats->lock); - } - mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, - "fw exec time for %s is %lld nsec\n", - mlx5_command_str(op), ds); - *status = ent->status; - free_cmd(ent); - } + if (callback) + goto out; - return err; + err = wait_func(dev, ent); + if (err == -ETIMEDOUT) + goto out_free; + + ds = ent->ts2 - ent->ts1; + op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); + if (op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[op]; + spin_lock_irq(&stats->lock); + stats->sum += ds; + ++stats->n; + spin_unlock_irq(&stats->lock); + } + mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, + "fw exec time for %s is %lld nsec\n", + mlx5_command_str(op), ds); + *status = ent->status; out_free: free_cmd(ent); @@ -1181,41 +1205,30 @@ err_dbg: return err; } -void mlx5_cmd_use_events(struct mlx5_core_dev *dev) +static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) { struct mlx5_cmd *cmd = &dev->cmd; int i; for (i = 0; i < cmd->max_reg_cmds; i++) down(&cmd->sem); - down(&cmd->pages_sem); - flush_workqueue(cmd->wq); - - cmd->mode = CMD_MODE_EVENTS; + cmd->mode = mode; up(&cmd->pages_sem); for (i = 0; i < cmd->max_reg_cmds; i++) up(&cmd->sem); } -void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) +void mlx5_cmd_use_events(struct mlx5_core_dev *dev) { - struct mlx5_cmd *cmd = &dev->cmd; - int i; - - for (i = 0; i < cmd->max_reg_cmds; i++) - down(&cmd->sem); - - down(&cmd->pages_sem); - - flush_workqueue(cmd->wq); - cmd->mode = CMD_MODE_POLLING; + mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); +} - up(&cmd->pages_sem); - for (i = 0; i < cmd->max_reg_cmds; i++) - up(&cmd->sem); +void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) +{ + mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); } static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) @@ -1251,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) struct semaphore *sem; ent = cmd->ent_arr[i]; + if (ent->callback) + cancel_delayed_work(&ent->cb_timeout_work); if (ent->page_queue) sem = &cmd->pages_sem; else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 1365cdc81838..4cbd452fec25 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -164,7 +164,6 @@ enum mlx5e_priv_flag { #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ -#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ #endif struct mlx5e_cq_moder { @@ -215,6 +214,7 @@ struct mlx5e_tstamp { enum { MLX5E_RQ_STATE_POST_WQES_ENABLE, MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, + MLX5E_RQ_STATE_FLUSH_TIMEOUT, MLX5E_RQ_STATE_AM, }; @@ -246,6 +246,8 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq, typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); +typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix); + struct mlx5e_dma_info { struct page *page; dma_addr_t addr; @@ -291,6 +293,7 @@ struct mlx5e_rq { struct mlx5e_cq cq; mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_alloc_wqe alloc_wqe; + mlx5e_fp_dealloc_wqe dealloc_wqe; unsigned long state; int ix; @@ -357,6 +360,7 @@ struct mlx5e_sq_dma { enum { MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, MLX5E_SQ_STATE_BF_ENABLE, + MLX5E_SQ_STATE_TX_TIMEOUT, }; struct mlx5e_ico_wqe_info { @@ -626,6 +630,7 @@ struct mlx5e_priv { struct workqueue_struct *wq; struct work_struct update_carrier_work; struct work_struct set_rx_mode_work; + struct work_struct tx_timeout_work; struct delayed_work update_stats_work; u32 pflags; @@ -684,12 +689,16 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); +void mlx5e_free_tx_descs(struct mlx5e_sq *sq); +void mlx5e_free_rx_descs(struct mlx5e_rq *rq); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); +void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); +void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq); void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index e6883132b555..caa9a3ccc3f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; break; case IEEE_8021QAZ_TSA_ETS: - tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC; + tc_tx_bw[i] = ets->tc_tx_bw[i]; break; } } @@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets) /* Validate Bandwidth Sum */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { + if (!ets->tc_tx_bw[i]) + return -EINVAL; + bw_sum += ets->tc_tx_bw[i]; + } } if (bw_sum != 0 && bw_sum != 100) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 96ec53a6a595..611ab550136e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -39,6 +39,13 @@ #include "eswitch.h" #include "vxlan.h" +enum { + MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000, + MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20, + MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS / + MLX5_EN_QP_FLUSH_MSLEEP_QUANT, +}; + struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; struct mlx5_wq_param wq; @@ -76,10 +83,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv) port_state = mlx5_query_vport_state(mdev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); - if (port_state == VPORT_STATE_UP) + if (port_state == VPORT_STATE_UP) { + netdev_info(priv->netdev, "Link up\n"); netif_carrier_on(priv->netdev); - else + } else { + netdev_info(priv->netdev, "Link down\n"); netif_carrier_off(priv->netdev); + } } static void mlx5e_update_carrier_work(struct work_struct *work) @@ -93,6 +103,26 @@ static void mlx5e_update_carrier_work(struct work_struct *work) mutex_unlock(&priv->state_lock); } +static void mlx5e_tx_timeout_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + tx_timeout_work); + int err; + + rtnl_lock(); + mutex_lock(&priv->state_lock); + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock; + mlx5e_close_locked(priv->netdev); + err = mlx5e_open_locked(priv->netdev); + if (err) + netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", + err); +unlock: + mutex_unlock(&priv->state_lock); + rtnl_unlock(); +} + static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) { struct mlx5e_sw_stats *s = &priv->stats.sw; @@ -307,6 +337,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, } rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq; rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; + rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); @@ -322,6 +353,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, } rq->handle_rx_cqe = mlx5e_handle_rx_cqe; rq->alloc_wqe = mlx5e_alloc_rx_wqe; + rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz : @@ -533,12 +565,19 @@ err_destroy_rq: static void mlx5e_close_rq(struct mlx5e_rq *rq) { + int tout = 0; + int err; + clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ - mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); - while (!mlx5_wq_ll_is_empty(&rq->wq)) - msleep(20); + err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); + while (!mlx5_wq_ll_is_empty(&rq->wq) && !err && + tout++ < MLX5_EN_QP_FLUSH_MAX_ITER) + msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT); + + if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER) + set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state); /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ napi_synchronize(&rq->channel->napi); @@ -546,6 +585,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) cancel_work_sync(&rq->am.work); mlx5e_disable_rq(rq); + mlx5e_free_rx_descs(rq); mlx5e_destroy_rq(rq); } @@ -800,6 +840,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq) static void mlx5e_close_sq(struct mlx5e_sq *sq) { + int tout = 0; + int err; + if (sq->txq) { clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); /* prevent netif_tx_wake_queue */ @@ -810,16 +853,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq) if (mlx5e_sq_has_room_for(sq, 1)) mlx5e_send_nop(sq, true); - mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR, - false, 0); + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_ERR, false, 0); + if (err) + set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); } - while (sq->cc != sq->pc) /* wait till sq is empty */ - msleep(20); + /* wait till sq is empty, unless a TX timeout occurred on this SQ */ + while (sq->cc != sq->pc && + !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) { + msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT); + if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER) + set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); + } /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ napi_synchronize(&sq->channel->napi); + mlx5e_free_tx_descs(sq); mlx5e_disable_sq(sq); mlx5e_destroy_sq(sq); } @@ -1736,8 +1787,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) netdev_set_num_tc(netdev, ntc); + /* Map netdev TCs to offset 0 + * We have our own UP to TXQ mapping for QoS + */ for (tc = 0; tc < ntc; tc++) - netdev_set_tc_queue(netdev, tc, nch, tc * nch); + netdev_set_tc_queue(netdev, tc, nch, 0); } int mlx5e_open_locked(struct net_device *netdev) @@ -2709,6 +2763,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, return features; } +static void mlx5e_tx_timeout(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + bool sched_work = false; + int i; + + netdev_err(dev, "TX timeout detected\n"); + + for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) { + struct mlx5e_sq *sq = priv->txq_to_sq_map[i]; + + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) + continue; + sched_work = true; + set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); + netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", + i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); + } + + if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state)) + schedule_work(&priv->tx_timeout_work); +} + static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, @@ -2727,6 +2804,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif + .ndo_tx_timeout = mlx5e_tx_timeout, }; static const struct net_device_ops mlx5e_netdev_ops_sriov = { @@ -2757,6 +2835,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, + .ndo_tx_timeout = mlx5e_tx_timeout, }; static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) @@ -2983,6 +3062,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); + INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 022acc2e8922..9f2a16a507e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -212,6 +212,20 @@ err_free_skb: return -ENOMEM; } +void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) +{ + struct sk_buff *skb = rq->skb[ix]; + + if (skb) { + rq->skb[ix] = NULL; + dma_unmap_single(rq->pdev, + *((dma_addr_t *)skb->cb), + rq->wqe_sz, + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + } +} + static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) { return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; @@ -574,6 +588,30 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) return 0; } +void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) +{ + struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; + + wi->free_wqe(rq, wi); +} + +void mlx5e_free_rx_descs(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5e_rx_wqe *wqe; + __be16 wqe_ix_be; + u16 wqe_ix; + + while (!mlx5_wq_ll_is_empty(wq)) { + wqe_ix_be = *wq->tail_next; + wqe_ix = be16_to_cpu(wqe_ix_be); + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix); + rq->dealloc_wqe(rq, wqe_ix); + mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, + &wqe->next.next_wqe_index); + } +} + #define RQ_CANNOT_POST(rq) \ (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) @@ -878,6 +916,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); int work_done = 0; + if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state))) + return 0; + if (cq->decmprs_left) work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 5a750b9cd006..5740b465ef84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -110,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, { struct mlx5e_priv *priv = netdev_priv(dev); int channel_ix = fallback(dev, skb); - int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ? - skb->vlan_tci >> VLAN_PRIO_SHIFT : 0; + int up = 0; + + if (!netdev_get_num_tc(dev)) + return channel_ix; + + if (skb_vlan_tag_present(skb)) + up = skb->vlan_tci >> VLAN_PRIO_SHIFT; + + /* channel_ix can be larger than num_channels since + * dev->num_real_tx_queues = num_channels * num_tc + */ + if (channel_ix >= priv->params.num_channels) + channel_ix = reciprocal_scale(channel_ix, + priv->params.num_channels); return priv->channeltc_to_txq_map[channel_ix][up]; } @@ -123,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, * headers and occur before the data gather. * Therefore these headers must be copied into the WQE */ -#define MLX5E_MIN_INLINE ETH_HLEN +#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) if (bf) { u16 ihs = skb_headlen(skb); @@ -135,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, return skb_headlen(skb); } - return MLX5E_MIN_INLINE; + return max(skb_network_offset(skb), MLX5E_MIN_INLINE); } static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, @@ -341,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) return mlx5e_sq_xmit(sq, skb); } +void mlx5e_free_tx_descs(struct mlx5e_sq *sq) +{ + struct mlx5e_tx_wqe_info *wi; + struct sk_buff *skb; + u16 ci; + int i; + + while (sq->cc != sq->pc) { + ci = sq->cc & sq->wq.sz_m1; + skb = sq->skb[ci]; + wi = &sq->wqe_info[ci]; + + if (!skb) { /* nop */ + sq->cc++; + continue; + } + + for (i = 0; i < wi->num_dma; i++) { + struct mlx5e_sq_dma *dma = + mlx5e_dma_get(sq, sq->dma_fifo_cc++); + + mlx5e_tx_dma_unmap(sq->pdev, dma); + } + + dev_kfree_skb_any(skb); + sq->cc += wi->num_wqebbs; + } +} + bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_sq *sq; @@ -352,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) sq = container_of(cq, struct mlx5e_sq, cq); + if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state))) + return false; + npkts = 0; nbytes = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 42d16b9458e4..96a59463ae65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev) void mlx5_enter_error_state(struct mlx5_core_dev *dev) { + mutex_lock(&dev->intf_state_mutex); if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) - return; + goto unlock; mlx5_core_err(dev, "start\n"); - if (pci_channel_offline(dev->pdev) || in_fatal(dev)) + if (pci_channel_offline(dev->pdev) || in_fatal(dev)) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + trigger_cmd_completions(dev); + } mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); mlx5_core_err(dev, "end\n"); + +unlock: + mutex_unlock(&dev->intf_state_mutex); } static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) @@ -245,7 +251,6 @@ static void poll_health(unsigned long data) u32 count; if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - trigger_cmd_completions(dev); mod_timer(&health->timer, get_next_poll_jiffies()); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 1fb3c681df97..4f491d43e77d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1450,46 +1450,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev) mlx5_pci_err_detected(dev->pdev, 0); } -/* wait for the device to show vital signs. For now we check - * that we can read the device ID and that the health buffer - * shows a non zero value which is different than 0xffffffff +/* wait for the device to show vital signs by waiting + * for the health counter to start counting. */ -static void wait_vital(struct pci_dev *pdev) +static int wait_vital(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_health *health = &dev->priv.health; const int niter = 100; + u32 last_count = 0; u32 count; - u16 did; int i; - /* Wait for firmware to be ready after reset */ - msleep(1000); - for (i = 0; i < niter; i++) { - if (pci_read_config_word(pdev, 2, &did)) { - dev_warn(&pdev->dev, "failed reading config word\n"); - break; - } - if (did == pdev->device) { - dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i); - break; - } - msleep(50); - } - if (i == niter) - dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); - for (i = 0; i < niter; i++) { count = ioread32be(health->health_counter); if (count && count != 0xffffffff) { - dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); - break; + if (last_count && last_count != count) { + dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); + return 0; + } + last_count = count; } msleep(50); } - if (i == niter) - dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); + return -ETIMEDOUT; } static void mlx5_pci_resume(struct pci_dev *pdev) @@ -1501,7 +1486,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev) dev_info(&pdev->dev, "%s was called\n", __func__); pci_save_state(pdev); - wait_vital(pdev); + err = wait_vital(pdev); + if (err) { + dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); + return; + } err = mlx5_load_one(dev, priv); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 9eeee0545f1c..32dea3524cee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -345,7 +345,6 @@ retry: func_id, npages, err); goto out_4k; } - dev->priv.fw_pages += npages; err = mlx5_cmd_status_to_err(&out.hdr); if (err) { @@ -373,6 +372,33 @@ out_free: return err; } +static int reclaim_pages_cmd(struct mlx5_core_dev *dev, + struct mlx5_manage_pages_inbox *in, int in_size, + struct mlx5_manage_pages_outbox *out, int out_size) +{ + struct fw_page *fwp; + struct rb_node *p; + u32 npages; + u32 i = 0; + + if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) + return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size, + (u32 *)out, out_size); + + npages = be32_to_cpu(in->num_entries); + + p = rb_first(&dev->priv.page_root); + while (p && i < npages) { + fwp = rb_entry(p, struct fw_page, rb_node); + out->pas[i] = cpu_to_be64(fwp->addr); + p = rb_next(p); + i++; + } + + out->num_entries = cpu_to_be32(i); + return 0; +} + static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, int *nclaimed) { @@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, in.func_id = cpu_to_be16(func_id); in.num_entries = cpu_to_be32(npages); mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen); if (err) { - mlx5_core_err(dev, "failed reclaiming pages\n"); - goto out_free; - } - dev->priv.fw_pages -= npages; - - if (out->hdr.status) { - err = mlx5_cmd_status_to_err(&out->hdr); + mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); goto out_free; } @@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, err = -EINVAL; goto out_free; } - if (nclaimed) - *nclaimed = num_claimed; for (i = 0; i < num_claimed; i++) { addr = be64_to_cpu(out->pas[i]); free_4k(dev, addr); } + + if (nclaimed) + *nclaimed = num_claimed; + dev->priv.fw_pages -= num_claimed; if (func_id) dev->priv.vfs_pages -= num_claimed; @@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) p = rb_first(&dev->priv.page_root); if (p) { fwp = rb_entry(p, struct fw_page, rb_node); - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - free_4k(dev, fwp->addr); - nclaimed = 1; - } else { - err = reclaim_pages(dev, fwp->func_id, - optimal_reclaimed_pages(), - &nclaimed); - } + err = reclaim_pages(dev, fwp->func_id, + optimal_reclaimed_pages(), + &nclaimed); + if (err) { mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); @@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) } } while (p); + WARN(dev->priv.fw_pages, + "FW pages counter is %d after reclaiming all pages\n", + dev->priv.fw_pages); + WARN(dev->priv.vfs_pages, + "VFs FW pages counter is %d after reclaiming all pages\n", + dev->priv.vfs_pages); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index daf44cd4c566..91846dfcbe9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -513,7 +513,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, { int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); void *nic_vport_context; - u8 *guid; void *in; int err; @@ -535,8 +534,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); - guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context, - node_guid); MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); err = mlx5_modify_nic_vport_context(mdev, in, inlen); diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index 7066954c39d6..0a26b11ca8f6 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -1151,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work) enc28j60_phy_read(priv, PHIR); } /* TX complete handler */ - if ((intflags & EIR_TXIF) != 0) { + if (((intflags & EIR_TXIF) != 0) && + ((intflags & EIR_TXERIF) == 0)) { bool err = false; loop++; if (netif_msg_intr(priv)) @@ -1203,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work) enc28j60_tx_clear(ndev, true); } else enc28j60_tx_clear(ndev, true); - locked_reg_bfclr(priv, EIR, EIR_TXERIF); + locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF); } /* RX Error handler */ if ((intflags & EIR_RXERIF) != 0) { @@ -1238,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work) */ static void enc28j60_hw_tx(struct enc28j60_net *priv) { + BUG_ON(!priv->tx_skb); + if (netif_msg_tx_queued(priv)) printk(KERN_DEBUG DRV_NAME ": Tx Packet Len:%d\n", priv->tx_skb->len); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 607bb7d4514d..87c642d3b075 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_ring->tx_stats.tx_bytes += skb->len; tx_ring->tx_stats.xmit_called++; + /* Ensure writes are complete before HW fetches Tx descriptors */ + wmb(); qlcnic_update_cmd_producer(tx_ring); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index aab777c1ba33..c23ccabc2d8a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2819,7 +2819,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) priv->tx_path_in_lpi_mode = true; if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) priv->tx_path_in_lpi_mode = false; - if (status & CORE_IRQ_MTL_RX_OVERFLOW) + if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr) priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr, STMMAC_CHAN0); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 310e0b9c2657..5de892f3c0e0 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1036,12 +1036,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict) { + struct geneve_dev *geneve = netdev_priv(dev); /* The max_mtu calculation does not take account of GENEVE * options, to avoid excluding potentially valid * configurations. */ - int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr) - - dev->hard_header_len; + int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len; + + if (geneve->remote.sa.sa_family == AF_INET6) + max_mtu -= sizeof(struct ipv6hdr); + else + max_mtu -= sizeof(struct iphdr); if (new_mtu < 68) return -EINVAL; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 0e7eff7f1cd2..8bcd78f94966 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2640,6 +2640,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.OutPktsUntagged++; u64_stats_update_end(&secy_stats->syncp); + skb->dev = macsec->real_dev; len = skb->len; ret = dev_queue_xmit(skb); count_tx(dev, ret, len); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 2afa61b51d41..91177a4a32ad 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -57,6 +57,7 @@ /* PHY CTRL bits */ #define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14 +#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14) /* RGMIIDCTL bits */ #define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4 @@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev) static int dp83867_config_init(struct phy_device *phydev) { struct dp83867_private *dp83867; - int ret; - u16 val, delay; + int ret, val; + u16 delay; if (!phydev->priv) { dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867), @@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev) } if (phy_interface_is_rgmii(phydev)) { - ret = phy_write(phydev, MII_DP83867_PHYCTRL, - (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT)); + val = phy_read(phydev, MII_DP83867_PHYCTRL); + if (val < 0) + return val; + val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK; + val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT); + ret = phy_write(phydev, MII_DP83867_PHYCTRL, val); if (ret) return ret; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 53759c315b97..877c9516e781 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -854,6 +854,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ if (cdc_ncm_init(dev)) goto error2; + /* Some firmwares need a pause here or they will silently fail + * to set up the interface properly. This value was decided + * empirically on a Sierra Wireless MC7455 running 02.08.02.00 + * firmware. + */ + usleep_range(10000, 20000); + /* configure data interface */ temp = usb_set_interface(dev->udev, iface_no, data_altsetting); if (temp) { diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 24d367280ecf..b225bc27fbe2 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -31,7 +31,7 @@ #define NETNEXT_VERSION "08" /* Information for net */ -#define NET_VERSION "4" +#define NET_VERSION "5" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" @@ -625,6 +625,7 @@ struct r8152 { int (*eee_set)(struct r8152 *, struct ethtool_eee *); bool (*in_nway)(struct r8152 *); void (*hw_phy_cfg)(struct r8152 *); + void (*autosuspend_en)(struct r8152 *tp, bool enable); } rtl_ops; int intr_interval; @@ -2412,9 +2413,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) if (enable) { u32 ocp_data; - r8153_u1u2en(tp, false); - r8153_u2p3en(tp, false); - __rtl_set_wol(tp, WAKE_ANY); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); @@ -2425,7 +2423,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } else { + u32 ocp_data; + __rtl_set_wol(tp, tp->saved_wolopts); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); + ocp_data &= ~LINK_OFF_WAKE_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); + } +} + +static void rtl8153_runtime_enable(struct r8152 *tp, bool enable) +{ + rtl_runtime_suspend_enable(tp, enable); + + if (enable) { + r8153_u1u2en(tp, false); + r8153_u2p3en(tp, false); + } else { r8153_u2p3en(tp, true); r8153_u1u2en(tp, true); } @@ -3530,7 +3549,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) napi_disable(&tp->napi); if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { rtl_stop_rx(tp); - rtl_runtime_suspend_enable(tp, true); + tp->rtl_ops.autosuspend_en(tp, true); } else { cancel_delayed_work_sync(&tp->schedule); tp->rtl_ops.down(tp); @@ -3557,7 +3576,7 @@ static int rtl8152_resume(struct usb_interface *intf) if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { - rtl_runtime_suspend_enable(tp, false); + tp->rtl_ops.autosuspend_en(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); napi_disable(&tp->napi); set_bit(WORK_ENABLE, &tp->flags); @@ -3572,7 +3591,7 @@ static int rtl8152_resume(struct usb_interface *intf) usb_submit_urb(tp->intr_urb, GFP_KERNEL); } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { if (tp->netdev->flags & IFF_UP) - rtl_runtime_suspend_enable(tp, false); + tp->rtl_ops.autosuspend_en(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); } @@ -4158,6 +4177,7 @@ static int rtl_ops_init(struct r8152 *tp) ops->eee_set = r8152_set_eee; ops->in_nway = rtl8152_in_nway; ops->hw_phy_cfg = r8152b_hw_phy_cfg; + ops->autosuspend_en = rtl_runtime_suspend_enable; break; case RTL_VER_03: @@ -4174,6 +4194,7 @@ static int rtl_ops_init(struct r8152 *tp) ops->eee_set = r8153_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; + ops->autosuspend_en = rtl8153_runtime_enable; break; default: diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 61ba46404937..6086a0163249 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -395,8 +395,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu) dev->hard_mtu = net->mtu + net->hard_header_len; if (dev->rx_urb_size == old_hard_mtu) { dev->rx_urb_size = dev->hard_mtu; - if (dev->rx_urb_size > old_rx_urb_size) + if (dev->rx_urb_size > old_rx_urb_size) { + usbnet_pause_rx(dev); usbnet_unlink_rx_urbs(dev); + usbnet_resume_rx(dev); + } } /* max qlen depend on hard_mtu and rx_urb_size */ @@ -1508,8 +1511,9 @@ static void usbnet_bh (unsigned long param) } else if (netif_running (dev->net) && netif_device_present (dev->net) && netif_carrier_ok(dev->net) && - !timer_pending (&dev->delay) && - !test_bit (EVENT_RX_HALT, &dev->flags)) { + !timer_pending(&dev->delay) && + !test_bit(EVENT_RX_PAUSED, &dev->flags) && + !test_bit(EVENT_RX_HALT, &dev->flags)) { int temp = dev->rxq.qlen; if (temp < RX_QLEN(dev)) { diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index f7718ec685fa..cea8350fbc7e 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -344,6 +344,8 @@ struct device *nd_pfn_create(struct nd_region *nd_region) int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) { u64 checksum, offset; + unsigned long align; + enum nd_pfn_mode mode; struct nd_namespace_io *nsio; struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; struct nd_namespace_common *ndns = nd_pfn->ndns; @@ -386,22 +388,50 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) return -ENXIO; } + align = le32_to_cpu(pfn_sb->align); + offset = le64_to_cpu(pfn_sb->dataoff); + if (align == 0) + align = 1UL << ilog2(offset); + mode = le32_to_cpu(pfn_sb->mode); + if (!nd_pfn->uuid) { - /* from probe we allocate */ + /* + * When probing a namepace via nd_pfn_probe() the uuid + * is NULL (see: nd_pfn_devinit()) we init settings from + * pfn_sb + */ nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL); if (!nd_pfn->uuid) return -ENOMEM; + nd_pfn->align = align; + nd_pfn->mode = mode; } else { - /* from init we validate */ + /* + * When probing a pfn / dax instance we validate the + * live settings against the pfn_sb + */ if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) return -ENODEV; + + /* + * If the uuid validates, but other settings mismatch + * return EINVAL because userspace has managed to change + * the configuration without specifying new + * identification. + */ + if (nd_pfn->align != align || nd_pfn->mode != mode) { + dev_err(&nd_pfn->dev, + "init failed, settings mismatch\n"); + dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n", + nd_pfn->align, align, nd_pfn->mode, + mode); + return -EINVAL; + } } - if (nd_pfn->align == 0) - nd_pfn->align = le32_to_cpu(pfn_sb->align); - if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) { + if (align > nvdimm_namespace_capacity(ndns)) { dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n", - nd_pfn->align, nvdimm_namespace_capacity(ndns)); + align, nvdimm_namespace_capacity(ndns)); return -EINVAL; } @@ -411,7 +441,6 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) * namespace has changed since the pfn superblock was * established. */ - offset = le64_to_cpu(pfn_sb->dataoff); nsio = to_nd_namespace_io(&ndns->dev); if (offset >= resource_size(&nsio->res)) { dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n", @@ -419,10 +448,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) return -EBUSY; } - if ((nd_pfn->align && !IS_ALIGNED(offset, nd_pfn->align)) + if ((align && !IS_ALIGNED(offset, align)) || !IS_ALIGNED(offset, PAGE_SIZE)) { - dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n", - offset); + dev_err(&nd_pfn->dev, + "bad offset: %#llx dax disabled align: %#lx\n", + offset, align); return -ENXIO; } @@ -502,7 +532,6 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, res->start += start_pad; res->end -= end_trunc; - nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode); if (nd_pfn->mode == PFN_MODE_RAM) { if (offset < SZ_8K) return ERR_PTR(-EINVAL); diff --git a/drivers/phy/phy-bcm-ns-usb2.c b/drivers/phy/phy-bcm-ns-usb2.c index 95ab6b2a0de5..58dff80e9386 100644 --- a/drivers/phy/phy-bcm-ns-usb2.c +++ b/drivers/phy/phy-bcm-ns-usb2.c @@ -109,8 +109,8 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev) } usb2->phy = devm_phy_create(dev, NULL, &ops); - if (IS_ERR(dev)) - return PTR_ERR(dev); + if (IS_ERR(usb2->phy)) + return PTR_ERR(usb2->phy); phy_set_drvdata(usb2->phy, usb2); platform_set_drvdata(pdev, usb2); diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c index 3acd2a1808df..213e2e15339c 100644 --- a/drivers/phy/phy-miphy28lp.c +++ b/drivers/phy/phy-miphy28lp.c @@ -1143,7 +1143,8 @@ static int miphy28lp_probe_resets(struct device_node *node, struct miphy28lp_dev *miphy_dev = miphy_phy->phydev; int err; - miphy_phy->miphy_rst = of_reset_control_get(node, "miphy-sw-rst"); + miphy_phy->miphy_rst = + of_reset_control_get_shared(node, "miphy-sw-rst"); if (IS_ERR(miphy_phy->miphy_rst)) { dev_err(miphy_dev->dev, diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c index 76bb88f0700a..4be3f5dbbc9f 100644 --- a/drivers/phy/phy-rcar-gen3-usb2.c +++ b/drivers/phy/phy-rcar-gen3-usb2.c @@ -144,12 +144,6 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch) extcon_set_cable_state_(ch->extcon, EXTCON_USB, true); } -static bool rcar_gen3_check_vbus(struct rcar_gen3_chan *ch) -{ - return !!(readl(ch->base + USB2_ADPCTRL) & - USB2_ADPCTRL_OTGSESSVLD); -} - static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch) { return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG); @@ -157,13 +151,7 @@ static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch) static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch) { - bool is_host = true; - - /* B-device? */ - if (rcar_gen3_check_id(ch) && rcar_gen3_check_vbus(ch)) - is_host = false; - - if (is_host) + if (!rcar_gen3_check_id(ch)) rcar_gen3_init_for_host(ch); else rcar_gen3_init_for_peri(ch); diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/phy-rockchip-dp.c index 793ecb6d87bc..8b267a746576 100644 --- a/drivers/phy/phy-rockchip-dp.c +++ b/drivers/phy/phy-rockchip-dp.c @@ -90,7 +90,7 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev) return -ENODEV; dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); - if (IS_ERR(dp)) + if (!dp) return -ENOMEM; dp->dev = dev; diff --git a/drivers/phy/phy-stih407-usb.c b/drivers/phy/phy-stih407-usb.c index 1d5ae5f8ef69..b1f44ab669fb 100644 --- a/drivers/phy/phy-stih407-usb.c +++ b/drivers/phy/phy-stih407-usb.c @@ -105,13 +105,13 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev) phy_dev->dev = dev; dev_set_drvdata(dev, phy_dev); - phy_dev->rstc = devm_reset_control_get(dev, "global"); + phy_dev->rstc = devm_reset_control_get_shared(dev, "global"); if (IS_ERR(phy_dev->rstc)) { dev_err(dev, "failed to ctrl picoPHY reset\n"); return PTR_ERR(phy_dev->rstc); } - phy_dev->rstport = devm_reset_control_get(dev, "port"); + phy_dev->rstport = devm_reset_control_get_exclusive(dev, "port"); if (IS_ERR(phy_dev->rstport)) { dev_err(dev, "failed to ctrl picoPHY reset\n"); return PTR_ERR(phy_dev->rstport); diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index bae54f7a1f48..de3101fbbf40 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c @@ -175,7 +175,7 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data, { struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy); u32 temp, usbc_bit = BIT(phy->index * 2); - void *phyctl = phy_data->base + phy_data->cfg->phyctl_offset; + void __iomem *phyctl = phy_data->base + phy_data->cfg->phyctl_offset; int i; mutex_lock(&phy_data->mutex); @@ -514,9 +514,9 @@ static int sun4i_usb_phy_remove(struct platform_device *pdev) if (data->vbus_power_nb_registered) power_supply_unreg_notifier(&data->vbus_power_nb); - if (data->id_det_irq >= 0) + if (data->id_det_irq > 0) devm_free_irq(dev, data->id_det_irq, data); - if (data->vbus_det_irq >= 0) + if (data->vbus_det_irq > 0) devm_free_irq(dev, data->vbus_det_irq, data); cancel_delayed_work_sync(&data->detect); @@ -645,11 +645,11 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) data->id_det_irq = gpiod_to_irq(data->id_det_gpio); data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio); - if ((data->id_det_gpio && data->id_det_irq < 0) || - (data->vbus_det_gpio && data->vbus_det_irq < 0)) + if ((data->id_det_gpio && data->id_det_irq <= 0) || + (data->vbus_det_gpio && data->vbus_det_irq <= 0)) data->phy0_poll = true; - if (data->id_det_irq >= 0) { + if (data->id_det_irq > 0) { ret = devm_request_irq(dev, data->id_det_irq, sun4i_usb_phy0_id_vbus_det_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, @@ -660,7 +660,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) } } - if (data->vbus_det_irq >= 0) { + if (data->vbus_det_irq > 0) { ret = devm_request_irq(dev, data->vbus_det_irq, sun4i_usb_phy0_id_vbus_det_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c index 6d8ee3b15872..8abd80dbcbed 100644 --- a/drivers/platform/chrome/cros_ec_dev.c +++ b/drivers/platform/chrome/cros_ec_dev.c @@ -151,13 +151,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg) goto exit; } + if (u_cmd.outsize != s_cmd->outsize || + u_cmd.insize != s_cmd->insize) { + ret = -EINVAL; + goto exit; + } + s_cmd->command += ec->cmd_offset; ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd); /* Only copy data to userland if data was received. */ if (ret < 0) goto exit; - if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize)) + if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize)) ret = -EFAULT; exit: kfree(s_cmd); diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index 63cd5e68c864..3a6d0290c54c 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c @@ -296,7 +296,7 @@ static int anatop_regulator_probe(struct platform_device *pdev) if (!sreg->sel && !strcmp(sreg->name, "vddpu")) sreg->sel = 22; - if (!sreg->sel) { + if (!sreg->bypass && !sreg->sel) { dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n"); return -EINVAL; } diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c index 321e804aeab0..a1b49a6d538f 100644 --- a/drivers/regulator/max77620-regulator.c +++ b/drivers/regulator/max77620-regulator.c @@ -123,6 +123,9 @@ static int max77620_regulator_set_fps_src(struct max77620_regulator *pmic, unsigned int val; int ret; + if (!rinfo) + return 0; + switch (fps_src) { case MAX77620_FPS_SRC_0: case MAX77620_FPS_SRC_1: @@ -171,6 +174,9 @@ static int max77620_regulator_set_fps_slots(struct max77620_regulator *pmic, int pd = rpdata->active_fps_pd_slot; int ret = 0; + if (!rinfo) + return 0; + if (is_suspend) { pu = rpdata->suspend_fps_pu_slot; pd = rpdata->suspend_fps_pd_slot; @@ -680,7 +686,6 @@ static struct max77620_regulator_info max77620_regs_info[MAX77620_NUM_REGS] = { RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1), RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE), RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE), - RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE), RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000), RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000), diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 9fd48de38a4c..7bc20c5188bc 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1045,6 +1045,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) qeth_l2_set_offline(cgdev); if (card->dev) { + netif_napi_del(&card->napi); unregister_netdev(card->dev); card->dev = NULL; } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index bcd324e054a9..72934666fedf 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3167,6 +3167,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) qeth_l3_set_offline(cgdev); if (card->dev) { + netif_napi_del(&card->napi); unregister_netdev(card->dev); card->dev = NULL; } diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index cd89682065b9..1026e180eed7 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -578,7 +578,7 @@ static int rockchip_spi_transfer_one( struct spi_device *spi, struct spi_transfer *xfer) { - int ret = 1; + int ret = 0; struct rockchip_spi *rs = spi_master_get_devdata(master); WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) && @@ -627,6 +627,8 @@ static int rockchip_spi_transfer_one( spi_enable_chip(rs, 1); ret = rockchip_spi_prepare_dma(rs); } + /* successful DMA prepare means the transfer is in progress */ + ret = ret ? ret : 1; } else { spi_enable_chip(rs, 1); ret = rockchip_spi_pio_transfer(rs); diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index 1ddd9e2309b6..cf007f3b83ec 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -173,13 +173,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master, { struct sun4i_spi *sspi = spi_master_get_devdata(master); unsigned int mclk_rate, div, timeout; + unsigned int start, end, tx_time; unsigned int tx_len = 0; int ret = 0; u32 reg; /* We don't support transfer larger than the FIFO */ if (tfr->len > SUN4I_FIFO_DEPTH) - return -EINVAL; + return -EMSGSIZE; + + if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH) + return -EMSGSIZE; reinit_completion(&sspi->done); sspi->tx_buf = tfr->tx_buf; @@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master, sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len)); sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len)); - /* Fill the TX FIFO */ - sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH); + /* + * Fill the TX FIFO + * Filling the FIFO fully causes timeout for some reason + * at least on spi2 on A10s + */ + sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1); /* Enable the interrupts */ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC); @@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master, reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH); + tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U); + start = jiffies; timeout = wait_for_completion_timeout(&sspi->done, - msecs_to_jiffies(1000)); + msecs_to_jiffies(tx_time)); + end = jiffies; if (!timeout) { + dev_warn(&master->dev, + "%s: timeout transferring %u bytes@%iHz for %i(%i)ms", + dev_name(&spi->dev), tfr->len, tfr->speed_hz, + jiffies_to_msecs(end - start), tx_time); ret = -ETIMEDOUT; goto out; } diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index 42e2c4bd690a..7fce79a60608 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, { struct sun6i_spi *sspi = spi_master_get_devdata(master); unsigned int mclk_rate, div, timeout; + unsigned int start, end, tx_time; unsigned int tx_len = 0; int ret = 0; u32 reg; @@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master, reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG); sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH); + tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U); + start = jiffies; timeout = wait_for_completion_timeout(&sspi->done, - msecs_to_jiffies(1000)); + msecs_to_jiffies(tx_time)); + end = jiffies; if (!timeout) { + dev_warn(&master->dev, + "%s: timeout transferring %u bytes@%iHz for %i(%i)ms", + dev_name(&spi->dev), tfr->len, tfr->speed_hz, + jiffies_to_msecs(end - start), tx_time); ret = -ETIMEDOUT; goto out; } diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 443f664534e1..29ea8d2f9824 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -646,6 +646,13 @@ free_master: static int ti_qspi_remove(struct platform_device *pdev) { + struct ti_qspi *qspi = platform_get_drvdata(pdev); + int rc; + + rc = spi_master_suspend(qspi->master); + if (rc) + return rc; + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c index a8f533af9eca..ec12181822e6 100644 --- a/drivers/staging/iio/accel/sca3000_core.c +++ b/drivers/staging/iio/accel/sca3000_core.c @@ -594,7 +594,7 @@ static ssize_t sca3000_read_frequency(struct device *dev, goto error_ret_mut; ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL); mutex_unlock(&st->lock); - if (ret) + if (ret < 0) goto error_ret; val = ret; if (base_freq > 0) diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c index 825da0769936..9587fa86dc69 100644 --- a/drivers/staging/iio/adc/ad7606_spi.c +++ b/drivers/staging/iio/adc/ad7606_spi.c @@ -21,7 +21,7 @@ static int ad7606_spi_read_block(struct device *dev, { struct spi_device *spi = to_spi_device(dev); int i, ret; - unsigned short *data; + unsigned short *data = buf; __be16 *bdata = buf; ret = spi_read(spi, buf, count * 2); diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 9f43976f4ef2..170ac980abcb 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -444,10 +444,10 @@ static ssize_t ad5933_store(struct device *dev, st->settling_cycles = val; /* 2x, 4x handling, see datasheet */ - if (val > 511) - val = (val >> 1) | (1 << 9); - else if (val > 1022) + if (val > 1022) val = (val >> 2) | (3 << 9); + else if (val > 511) + val = (val >> 1) | (1 << 9); dat = cpu_to_be16(val); ret = ad5933_i2c_write(st->client, diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index f856c4544eea..51e0d32883ba 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -667,8 +667,11 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) fsi = tty->driver_data; else fsi = tty->link->driver_data; - devpts_kill_index(fsi, tty->index); - devpts_release(fsi); + + if (fsi) { + devpts_kill_index(fsi, tty->index); + devpts_release(fsi); + } } static const struct tty_operations ptm_unix98_ops = { diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index dc125322f48f..5b0fe97c46ca 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -750,6 +750,7 @@ static void visual_init(struct vc_data *vc, int num, int init) vc->vc_complement_mask = 0; vc->vc_can_do_color = 0; vc->vc_panic_force_write = false; + vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS; vc->vc_sw->con_init(vc, init); if (!vc->vc_complement_mask) vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c index 9059b7dc185e..2f537bbdda09 100644 --- a/drivers/usb/common/usb-otg-fsm.c +++ b/drivers/usb/common/usb-otg-fsm.c @@ -21,6 +21,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/mutex.h> @@ -450,3 +451,4 @@ int otg_statemachine(struct otg_fsm *fsm) return fsm->state_changed; } EXPORT_SYMBOL_GPL(otg_statemachine); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 34b837ae1ed7..d2e3f655c26f 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2598,26 +2598,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd); * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is * deallocated. * - * Make sure to only deallocate the bandwidth_mutex when the primary HCD is - * freed. When hcd_release() is called for either hcd in a peer set - * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to - * block new peering attempts + * Make sure to deallocate the bandwidth_mutex only when the last HCD is + * freed. When hcd_release() is called for either hcd in a peer set, + * invalidate the peer's ->shared_hcd and ->primary_hcd pointers. */ static void hcd_release(struct kref *kref) { struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref); mutex_lock(&usb_port_peer_mutex); - if (usb_hcd_is_primary_hcd(hcd)) { - kfree(hcd->address0_mutex); - kfree(hcd->bandwidth_mutex); - } if (hcd->shared_hcd) { struct usb_hcd *peer = hcd->shared_hcd; peer->shared_hcd = NULL; - if (peer->primary_hcd == hcd) - peer->primary_hcd = NULL; + peer->primary_hcd = NULL; + } else { + kfree(hcd->address0_mutex); + kfree(hcd->bandwidth_mutex); } mutex_unlock(&usb_port_peer_mutex); kfree(hcd); diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 50d6ae6f88bc..89a2f712fdfe 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -233,7 +233,8 @@ static int st_dwc3_probe(struct platform_device *pdev) dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n", dwc3_data->glue_base, dwc3_data->syscfg_reg_off); - dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown"); + dwc3_data->rstc_pwrdn = + devm_reset_control_get_exclusive(dev, "powerdown"); if (IS_ERR(dwc3_data->rstc_pwrdn)) { dev_err(&pdev->dev, "could not get power controller\n"); ret = PTR_ERR(dwc3_data->rstc_pwrdn); @@ -243,7 +244,8 @@ static int st_dwc3_probe(struct platform_device *pdev) /* Manage PowerDown */ reset_control_deassert(dwc3_data->rstc_pwrdn); - dwc3_data->rstc_rst = devm_reset_control_get(dev, "softreset"); + dwc3_data->rstc_rst = + devm_reset_control_get_shared(dev, "softreset"); if (IS_ERR(dwc3_data->rstc_rst)) { dev_err(&pdev->dev, "could not get reset controller\n"); ret = PTR_ERR(dwc3_data->rstc_rst); diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c index a94ed677d937..be4a2788fc58 100644 --- a/drivers/usb/host/ehci-st.c +++ b/drivers/usb/host/ehci-st.c @@ -206,7 +206,8 @@ static int st_ehci_platform_probe(struct platform_device *dev) priv->clk48 = NULL; } - priv->pwr = devm_reset_control_get_optional(&dev->dev, "power"); + priv->pwr = + devm_reset_control_get_optional_shared(&dev->dev, "power"); if (IS_ERR(priv->pwr)) { err = PTR_ERR(priv->pwr); if (err == -EPROBE_DEFER) @@ -214,7 +215,8 @@ static int st_ehci_platform_probe(struct platform_device *dev) priv->pwr = NULL; } - priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset"); + priv->rst = + devm_reset_control_get_optional_shared(&dev->dev, "softreset"); if (IS_ERR(priv->rst)) { err = PTR_ERR(priv->rst); if (err == -EPROBE_DEFER) diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c index acf2eb2a5676..02816a1515a1 100644 --- a/drivers/usb/host/ohci-st.c +++ b/drivers/usb/host/ohci-st.c @@ -188,13 +188,15 @@ static int st_ohci_platform_probe(struct platform_device *dev) priv->clk48 = NULL; } - priv->pwr = devm_reset_control_get_optional(&dev->dev, "power"); + priv->pwr = + devm_reset_control_get_optional_shared(&dev->dev, "power"); if (IS_ERR(priv->pwr)) { err = PTR_ERR(priv->pwr); goto err_put_clks; } - priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset"); + priv->rst = + devm_reset_control_get_optional_shared(&dev->dev, "softreset"); if (IS_ERR(priv->rst)) { err = PTR_ERR(priv->rst); goto err_put_clks; diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index b84c291ba1eb..d7b78d531e63 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -74,7 +74,7 @@ int v9fs_file_open(struct inode *inode, struct file *file) v9fs_proto_dotu(v9ses)); fid = file->private_data; if (!fid) { - fid = v9fs_fid_clone(file->f_path.dentry); + fid = v9fs_fid_clone(file_dentry(file)); if (IS_ERR(fid)) return PTR_ERR(fid); @@ -100,7 +100,7 @@ int v9fs_file_open(struct inode *inode, struct file *file) * because we want write after unlink usecase * to work. */ - fid = v9fs_writeback_fid(file->f_path.dentry); + fid = v9fs_writeback_fid(file_dentry(file)); if (IS_ERR(fid)) { err = PTR_ERR(fid); mutex_unlock(&v9inode->v_mutex); @@ -516,7 +516,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma) * because we want write after unlink usecase * to work. */ - fid = v9fs_writeback_fid(filp->f_path.dentry); + fid = v9fs_writeback_fid(file_dentry(filp)); if (IS_ERR(fid)) { retval = PTR_ERR(fid); mutex_unlock(&v9inode->v_mutex); diff --git a/fs/ceph/export.c b/fs/ceph/export.c index 6e72c98162d5..1780218a48f0 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -95,10 +95,8 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino) } dentry = d_obtain_alias(inode); - if (IS_ERR(dentry)) { - iput(inode); + if (IS_ERR(dentry)) return dentry; - } err = ceph_init_dentry(dentry); if (err < 0) { dput(dentry); @@ -167,10 +165,8 @@ static struct dentry *__get_parent(struct super_block *sb, return ERR_PTR(-ENOENT); dentry = d_obtain_alias(inode); - if (IS_ERR(dentry)) { - iput(inode); + if (IS_ERR(dentry)) return dentry; - } err = ceph_init_dentry(dentry); if (err < 0) { dput(dentry); @@ -210,7 +206,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb, dout("fh_to_parent %llx\n", cfh->parent_ino); dentry = __get_parent(sb, NULL, cfh->ino); - if (IS_ERR(dentry) && PTR_ERR(dentry) == -ENOENT) + if (unlikely(dentry == ERR_PTR(-ENOENT))) dentry = __fh_to_dentry(sb, cfh->parent_ino); return dentry; } @@ -208,7 +208,12 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, dax.addr += first; size = map_len - first; } - max = min(pos + size, end); + /* + * pos + size is one past the last offset for IO, + * so pos + size can overflow loff_t at extreme offsets. + * Cast to u64 to catch this and get the true minimum. + */ + max = min_t(u64, pos + size, end); } if (iov_iter_rw(iter) == WRITE) { diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index ccd4971cc6c1..264f07c7754e 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -341,8 +341,10 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, struct dentry *newent; bool outarg_valid = true; + fuse_lock_inode(dir); err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name, &outarg, &inode); + fuse_unlock_inode(dir); if (err == -ENOENT) { outarg_valid = false; err = 0; @@ -1341,7 +1343,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, FUSE_READDIR); } + fuse_lock_inode(inode); fuse_request_send(fc, req); + fuse_unlock_inode(inode); nbytes = req->out.args[0].size; err = req->out.h.error; fuse_put_request(fc, req); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index eddbe02c4028..929c383432b0 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -110,6 +110,9 @@ struct fuse_inode { /** Miscellaneous bits describing inode state */ unsigned long state; + + /** Lock for serializing lookup and readdir for back compatibility*/ + struct mutex mutex; }; /** FUSE inode state bits */ @@ -540,6 +543,9 @@ struct fuse_conn { /** write-back cache policy (default is write-through) */ unsigned writeback_cache:1; + /** allow parallel lookups and readdir (default is serialized) */ + unsigned parallel_dirops:1; + /* * The following bitfields are only for optimization purposes * and hence races in setting them will not cause malfunction @@ -956,4 +962,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr, void fuse_set_initialized(struct fuse_conn *fc); +void fuse_unlock_inode(struct inode *inode); +void fuse_lock_inode(struct inode *inode); + #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 1ce67668a8e1..9961d8432ce3 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -97,6 +97,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) INIT_LIST_HEAD(&fi->queued_writes); INIT_LIST_HEAD(&fi->writepages); init_waitqueue_head(&fi->page_waitq); + mutex_init(&fi->mutex); fi->forget = fuse_alloc_forget(); if (!fi->forget) { kmem_cache_free(fuse_inode_cachep, inode); @@ -117,6 +118,7 @@ static void fuse_destroy_inode(struct inode *inode) struct fuse_inode *fi = get_fuse_inode(inode); BUG_ON(!list_empty(&fi->write_files)); BUG_ON(!list_empty(&fi->queued_writes)); + mutex_destroy(&fi->mutex); kfree(fi->forget); call_rcu(&inode->i_rcu, fuse_i_callback); } @@ -351,6 +353,18 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, return 0; } +void fuse_lock_inode(struct inode *inode) +{ + if (!get_fuse_conn(inode)->parallel_dirops) + mutex_lock(&get_fuse_inode(inode)->mutex); +} + +void fuse_unlock_inode(struct inode *inode) +{ + if (!get_fuse_conn(inode)->parallel_dirops) + mutex_unlock(&get_fuse_inode(inode)->mutex); +} + static void fuse_umount_begin(struct super_block *sb) { fuse_abort_conn(get_fuse_conn_super(sb)); @@ -898,6 +912,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) fc->async_dio = 1; if (arg->flags & FUSE_WRITEBACK_CACHE) fc->writeback_cache = 1; + if (arg->flags & FUSE_PARALLEL_DIROPS) + fc->parallel_dirops = 1; if (arg->time_gran && arg->time_gran <= 1000000000) fc->sb->s_time_gran = arg->time_gran; } else { @@ -928,7 +944,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | - FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT; + FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT | + FUSE_PARALLEL_DIROPS; req->in.h.opcode = FUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); diff --git a/fs/libfs.c b/fs/libfs.c index cedeacbae303..74dc8b9e7f53 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -84,6 +84,61 @@ int dcache_dir_close(struct inode *inode, struct file *file) } EXPORT_SYMBOL(dcache_dir_close); +/* parent is locked at least shared */ +static struct dentry *next_positive(struct dentry *parent, + struct list_head *from, + int count) +{ + unsigned *seq = &parent->d_inode->i_dir_seq, n; + struct dentry *res; + struct list_head *p; + bool skipped; + int i; + +retry: + i = count; + skipped = false; + n = smp_load_acquire(seq) & ~1; + res = NULL; + rcu_read_lock(); + for (p = from->next; p != &parent->d_subdirs; p = p->next) { + struct dentry *d = list_entry(p, struct dentry, d_child); + if (!simple_positive(d)) { + skipped = true; + } else if (!--i) { + res = d; + break; + } + } + rcu_read_unlock(); + if (skipped) { + smp_rmb(); + if (unlikely(*seq != n)) + goto retry; + } + return res; +} + +static void move_cursor(struct dentry *cursor, struct list_head *after) +{ + struct dentry *parent = cursor->d_parent; + unsigned n, *seq = &parent->d_inode->i_dir_seq; + spin_lock(&parent->d_lock); + for (;;) { + n = *seq; + if (!(n & 1) && cmpxchg(seq, n, n + 1) == n) + break; + cpu_relax(); + } + __list_del(cursor->d_child.prev, cursor->d_child.next); + if (after) + list_add(&cursor->d_child, after); + else + list_add_tail(&cursor->d_child, &parent->d_subdirs); + smp_store_release(seq, n + 2); + spin_unlock(&parent->d_lock); +} + loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) { struct dentry *dentry = file->f_path.dentry; @@ -99,25 +154,14 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) if (offset != file->f_pos) { file->f_pos = offset; if (file->f_pos >= 2) { - struct list_head *p; struct dentry *cursor = file->private_data; + struct dentry *to; loff_t n = file->f_pos - 2; - spin_lock(&dentry->d_lock); - /* d_lock not required for cursor */ - list_del(&cursor->d_child); - p = dentry->d_subdirs.next; - while (n && p != &dentry->d_subdirs) { - struct dentry *next; - next = list_entry(p, struct dentry, d_child); - spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); - if (simple_positive(next)) - n--; - spin_unlock(&next->d_lock); - p = p->next; - } - list_add_tail(&cursor->d_child, p); - spin_unlock(&dentry->d_lock); + inode_lock_shared(dentry->d_inode); + to = next_positive(dentry, &dentry->d_subdirs, n); + move_cursor(cursor, to ? &to->d_child : NULL); + inode_unlock_shared(dentry->d_inode); } } return offset; @@ -140,36 +184,25 @@ int dcache_readdir(struct file *file, struct dir_context *ctx) { struct dentry *dentry = file->f_path.dentry; struct dentry *cursor = file->private_data; - struct list_head *p, *q = &cursor->d_child; + struct list_head *p = &cursor->d_child; + struct dentry *next; + bool moved = false; if (!dir_emit_dots(file, ctx)) return 0; - spin_lock(&dentry->d_lock); - if (ctx->pos == 2) - list_move(q, &dentry->d_subdirs); - for (p = q->next; p != &dentry->d_subdirs; p = p->next) { - struct dentry *next = list_entry(p, struct dentry, d_child); - spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); - if (!simple_positive(next)) { - spin_unlock(&next->d_lock); - continue; - } - - spin_unlock(&next->d_lock); - spin_unlock(&dentry->d_lock); + if (ctx->pos == 2) + p = &dentry->d_subdirs; + while ((next = next_positive(dentry, p, 1)) != NULL) { if (!dir_emit(ctx, next->d_name.name, next->d_name.len, d_inode(next)->i_ino, dt_type(d_inode(next)))) - return 0; - spin_lock(&dentry->d_lock); - spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); - /* next is still alive */ - list_move(q, p); - spin_unlock(&next->d_lock); - p = q; + break; + moved = true; + p = &next->d_child; ctx->pos++; } - spin_unlock(&dentry->d_lock); + if (moved) + move_cursor(cursor, p); return 0; } EXPORT_SYMBOL(dcache_readdir); diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 154a107cd376..fc4084ef4736 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -335,12 +335,17 @@ static struct notifier_block lockd_inet6addr_notifier = { }; #endif -static void lockd_svc_exit_thread(void) +static void lockd_unregister_notifiers(void) { unregister_inetaddr_notifier(&lockd_inetaddr_notifier); #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&lockd_inet6addr_notifier); #endif +} + +static void lockd_svc_exit_thread(void) +{ + lockd_unregister_notifiers(); svc_exit_thread(nlmsvc_rqst); } @@ -462,7 +467,7 @@ int lockd_up(struct net *net) * Note: svc_serv structures have an initial use count of 1, * so we exit through here on both success and failure. */ -err_net: +err_put: svc_destroy(serv); err_create: mutex_unlock(&nlmsvc_mutex); @@ -470,7 +475,9 @@ err_create: err_start: lockd_down_net(serv, net); - goto err_net; +err_net: + lockd_unregister_notifiers(); + goto err_put; } EXPORT_SYMBOL_GPL(lockd_up); diff --git a/fs/locks.c b/fs/locks.c index 7c5f91be9b65..ee1b15f6fc13 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1628,7 +1628,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr { struct file_lock *fl, *my_fl = NULL, *lease; struct dentry *dentry = filp->f_path.dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = file_inode(filp); struct file_lock_context *ctx; bool is_deleg = (*flp)->fl_flags & FL_DELEG; int error; diff --git a/fs/namespace.c b/fs/namespace.c index 783004af5707..419f746d851d 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1562,6 +1562,7 @@ void __detach_mounts(struct dentry *dentry) goto out_unlock; lock_mount_hash(); + event++; while (!hlist_empty(&mp->m_list)) { mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); if (mnt->mnt.mnt_flags & MNT_UMOUNT) { diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 1dbeab6cf96e..c831c2e5f803 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -59,16 +59,37 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr) if (err) goto out; + if (attr->ia_valid & ATTR_SIZE) { + struct inode *realinode = d_inode(ovl_dentry_real(dentry)); + + err = -ETXTBSY; + if (atomic_read(&realinode->i_writecount) < 0) + goto out_drop_write; + } + err = ovl_copy_up(dentry); if (!err) { + struct inode *winode = NULL; + upperdentry = ovl_dentry_upper(dentry); + if (attr->ia_valid & ATTR_SIZE) { + winode = d_inode(upperdentry); + err = get_write_access(winode); + if (err) + goto out_drop_write; + } + inode_lock(upperdentry->d_inode); err = notify_change(upperdentry, attr, NULL); if (!err) ovl_copyattr(upperdentry->d_inode, dentry->d_inode); inode_unlock(upperdentry->d_inode); + + if (winode) + put_write_access(winode); } +out_drop_write: ovl_drop_write(dentry); out: return err; @@ -121,16 +142,18 @@ int ovl_permission(struct inode *inode, int mask) err = vfs_getattr(&realpath, &stat); if (err) - return err; + goto out_dput; + err = -ESTALE; if ((stat.mode ^ inode->i_mode) & S_IFMT) - return -ESTALE; + goto out_dput; inode->i_mode = stat.mode; inode->i_uid = stat.uid; inode->i_gid = stat.gid; - return generic_permission(inode, mask); + err = generic_permission(inode, mask); + goto out_dput; } /* Careful in RCU walk mode */ diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index ce02f46029da..9a7693d5f8ff 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -1082,11 +1082,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (err < 0) goto out_put_workdir; - if (!err) { - pr_err("overlayfs: upper fs needs to support d_type.\n"); - err = -EINVAL; - goto out_put_workdir; - } + /* + * We allowed this configuration and don't want to + * break users over kernel upgrade. So warn instead + * of erroring out. + */ + if (!err) + pr_warn("overlayfs: upper fs needs to support d_type.\n"); } } diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 9094599a1150..33466bfc6440 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -309,6 +309,7 @@ INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \ INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ @@ -322,15 +323,12 @@ INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ #define INTEL_KBL_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \ - INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */ + INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */ #define INTEL_KBL_GT4_IDS(info) \ - INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \ - INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \ - INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \ - INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */ + INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */ #define INTEL_KBL_IDS(info) \ INTEL_KBL_GT1_IDS(info), \ diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index fe389ac31489..92e7e97ca8ff 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -18,13 +18,13 @@ #ifndef __ASM_ARM_KVM_PMU_H #define __ASM_ARM_KVM_PMU_H -#ifdef CONFIG_KVM_ARM_PMU - #include <linux/perf_event.h> #include <asm/perf_event.h> #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) +#ifdef CONFIG_KVM_ARM_PMU + struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ struct perf_event *perf_event; diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h index c18a4c19d6fc..ce9230af09c2 100644 --- a/include/linux/mfd/da9052/da9052.h +++ b/include/linux/mfd/da9052/da9052.h @@ -171,7 +171,7 @@ static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg, static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, unsigned reg_cnt, unsigned char *val) { - int ret; + int ret = 0; int i; for (i = 0; i < reg_cnt; i++) { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f21c45941887..81e8396574f4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -654,6 +654,7 @@ struct mlx5_cmd_work_ent { void *uout; int uout_size; mlx5_cmd_cbk_t callback; + struct delayed_work cb_timeout_work; void *context; int idx; struct completion done; diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 908b67c847cd..c038ae36b10e 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -464,6 +464,8 @@ static inline bool pwm_can_sleep(struct pwm_device *pwm) static inline void pwm_apply_args(struct pwm_device *pwm) { + struct pwm_state state = { }; + /* * PWM users calling pwm_apply_args() expect to have a fresh config * where the polarity and period are set according to pwm_args info. @@ -476,18 +478,20 @@ static inline void pwm_apply_args(struct pwm_device *pwm) * at startup (even if they are actually enabled), thus authorizing * polarity setting. * - * Instead of setting ->enabled to false, we call pwm_disable() - * before pwm_set_polarity() to ensure that everything is configured - * as expected, and the PWM is really disabled when the user request - * it. + * To fulfill this requirement, we apply a new state which disables + * the PWM device and set the reference period and polarity config. * * Note that PWM users requiring a smooth handover between the * bootloader and the kernel (like critical regulators controlled by * PWM devices) will have to switch to the atomic API and avoid calling * pwm_apply_args(). */ - pwm_disable(pwm); - pwm_set_polarity(pwm, pwm->args.polarity); + + state.enabled = false; + state.polarity = pwm->args.polarity; + state.period = pwm->args.period; + + pwm_apply_state(pwm, &state); } struct pwm_lookup { diff --git a/include/linux/reset.h b/include/linux/reset.h index ec0306ce7b92..45a4abeb6acb 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -84,8 +84,8 @@ static inline struct reset_control *__devm_reset_control_get( #endif /* CONFIG_RESET_CONTROLLER */ /** - * reset_control_get - Lookup and obtain an exclusive reference to a - * reset controller. + * reset_control_get_exclusive - Lookup and obtain an exclusive reference + * to a reset controller. * @dev: device to be reset by the controller * @id: reset line name * @@ -98,8 +98,8 @@ static inline struct reset_control *__devm_reset_control_get( * * Use of id names is optional. */ -static inline struct reset_control *__must_check reset_control_get( - struct device *dev, const char *id) +static inline struct reset_control * +__must_check reset_control_get_exclusive(struct device *dev, const char *id) { #ifndef CONFIG_RESET_CONTROLLER WARN_ON(1); @@ -107,12 +107,6 @@ static inline struct reset_control *__must_check reset_control_get( return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); } -static inline struct reset_control *reset_control_get_optional( - struct device *dev, const char *id) -{ - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); -} - /** * reset_control_get_shared - Lookup and obtain a shared reference to a * reset controller. @@ -141,9 +135,21 @@ static inline struct reset_control *reset_control_get_shared( return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); } +static inline struct reset_control *reset_control_get_optional_exclusive( + struct device *dev, const char *id) +{ + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); +} + +static inline struct reset_control *reset_control_get_optional_shared( + struct device *dev, const char *id) +{ + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); +} + /** - * of_reset_control_get - Lookup and obtain an exclusive reference to a - * reset controller. + * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference + * to a reset controller. * @node: device to be reset by the controller * @id: reset line name * @@ -151,15 +157,41 @@ static inline struct reset_control *reset_control_get_shared( * * Use of id names is optional. */ -static inline struct reset_control *of_reset_control_get( +static inline struct reset_control *of_reset_control_get_exclusive( struct device_node *node, const char *id) { return __of_reset_control_get(node, id, 0, 0); } /** - * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to - * a reset controller by index. + * of_reset_control_get_shared - Lookup and obtain an shared reference + * to a reset controller. + * @node: device to be reset by the controller + * @id: reset line name + * + * When a reset-control is shared, the behavior of reset_control_assert / + * deassert is changed, the reset-core will keep track of a deassert_count + * and only (re-)assert the reset after reset_control_assert has been called + * as many times as reset_control_deassert was called. Also see the remark + * about shared reset-controls in the reset_control_assert docs. + * + * Calling reset_control_assert without first calling reset_control_deassert + * is not allowed on a shared reset control. Calling reset_control_reset is + * also not allowed on a shared reset control. + * Returns a struct reset_control or IS_ERR() condition containing errno. + * + * Use of id names is optional. + */ +static inline struct reset_control *of_reset_control_get_shared( + struct device_node *node, const char *id) +{ + return __of_reset_control_get(node, id, 0, 1); +} + +/** + * of_reset_control_get_exclusive_by_index - Lookup and obtain an exclusive + * reference to a reset controller + * by index. * @node: device to be reset by the controller * @index: index of the reset controller * @@ -167,49 +199,60 @@ static inline struct reset_control *of_reset_control_get( * in whatever order. Returns a struct reset_control or IS_ERR() condition * containing errno. */ -static inline struct reset_control *of_reset_control_get_by_index( +static inline struct reset_control *of_reset_control_get_exclusive_by_index( struct device_node *node, int index) { return __of_reset_control_get(node, NULL, index, 0); } /** - * devm_reset_control_get - resource managed reset_control_get() - * @dev: device to be reset by the controller - * @id: reset line name + * of_reset_control_get_shared_by_index - Lookup and obtain an shared + * reference to a reset controller + * by index. + * @node: device to be reset by the controller + * @index: index of the reset controller + * + * When a reset-control is shared, the behavior of reset_control_assert / + * deassert is changed, the reset-core will keep track of a deassert_count + * and only (re-)assert the reset after reset_control_assert has been called + * as many times as reset_control_deassert was called. Also see the remark + * about shared reset-controls in the reset_control_assert docs. + * + * Calling reset_control_assert without first calling reset_control_deassert + * is not allowed on a shared reset control. Calling reset_control_reset is + * also not allowed on a shared reset control. + * Returns a struct reset_control or IS_ERR() condition containing errno. * - * Managed reset_control_get(). For reset controllers returned from this - * function, reset_control_put() is called automatically on driver detach. - * See reset_control_get() for more information. + * This is to be used to perform a list of resets for a device or power domain + * in whatever order. Returns a struct reset_control or IS_ERR() condition + * containing errno. */ -static inline struct reset_control *__must_check devm_reset_control_get( - struct device *dev, const char *id) -{ -#ifndef CONFIG_RESET_CONTROLLER - WARN_ON(1); -#endif - return __devm_reset_control_get(dev, id, 0, 0); -} - -static inline struct reset_control *devm_reset_control_get_optional( - struct device *dev, const char *id) +static inline struct reset_control *of_reset_control_get_shared_by_index( + struct device_node *node, int index) { - return __devm_reset_control_get(dev, id, 0, 0); + return __of_reset_control_get(node, NULL, index, 1); } /** - * devm_reset_control_get_by_index - resource managed reset_control_get + * devm_reset_control_get_exclusive - resource managed + * reset_control_get_exclusive() * @dev: device to be reset by the controller - * @index: index of the reset controller + * @id: reset line name * - * Managed reset_control_get(). For reset controllers returned from this - * function, reset_control_put() is called automatically on driver detach. - * See reset_control_get() for more information. + * Managed reset_control_get_exclusive(). For reset controllers returned + * from this function, reset_control_put() is called automatically on driver + * detach. + * + * See reset_control_get_exclusive() for more information. */ -static inline struct reset_control *devm_reset_control_get_by_index( - struct device *dev, int index) +static inline struct reset_control * +__must_check devm_reset_control_get_exclusive(struct device *dev, + const char *id) { - return __devm_reset_control_get(dev, NULL, index, 0); +#ifndef CONFIG_RESET_CONTROLLER + WARN_ON(1); +#endif + return __devm_reset_control_get(dev, id, 0, 0); } /** @@ -227,6 +270,36 @@ static inline struct reset_control *devm_reset_control_get_shared( return __devm_reset_control_get(dev, id, 0, 1); } +static inline struct reset_control *devm_reset_control_get_optional_exclusive( + struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, 0); +} + +static inline struct reset_control *devm_reset_control_get_optional_shared( + struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, 1); +} + +/** + * devm_reset_control_get_exclusive_by_index - resource managed + * reset_control_get_exclusive() + * @dev: device to be reset by the controller + * @index: index of the reset controller + * + * Managed reset_control_get_exclusive(). For reset controllers returned from + * this function, reset_control_put() is called automatically on driver + * detach. + * + * See reset_control_get_exclusive() for more information. + */ +static inline struct reset_control * +devm_reset_control_get_exclusive_by_index(struct device *dev, int index) +{ + return __devm_reset_control_get(dev, NULL, index, 0); +} + /** * devm_reset_control_get_shared_by_index - resource managed * reset_control_get_shared @@ -237,10 +310,60 @@ static inline struct reset_control *devm_reset_control_get_shared( * this function, reset_control_put() is called automatically on driver detach. * See reset_control_get_shared() for more information. */ -static inline struct reset_control *devm_reset_control_get_shared_by_index( - struct device *dev, int index) +static inline struct reset_control * +devm_reset_control_get_shared_by_index(struct device *dev, int index) { return __devm_reset_control_get(dev, NULL, index, 1); } +/* + * TEMPORARY calls to use during transition: + * + * of_reset_control_get() => of_reset_control_get_exclusive() + * + * These inline function calls will be removed once all consumers + * have been moved over to the new explicit API. + */ +static inline struct reset_control *reset_control_get( + struct device *dev, const char *id) +{ + return reset_control_get_exclusive(dev, id); +} + +static inline struct reset_control *reset_control_get_optional( + struct device *dev, const char *id) +{ + return reset_control_get_optional_exclusive(dev, id); +} + +static inline struct reset_control *of_reset_control_get( + struct device_node *node, const char *id) +{ + return of_reset_control_get_exclusive(node, id); +} + +static inline struct reset_control *of_reset_control_get_by_index( + struct device_node *node, int index) +{ + return of_reset_control_get_exclusive_by_index(node, index); +} + +static inline struct reset_control *devm_reset_control_get( + struct device *dev, const char *id) +{ + return devm_reset_control_get_exclusive(dev, id); +} + +static inline struct reset_control *devm_reset_control_get_optional( + struct device *dev, const char *id) +{ + return devm_reset_control_get_optional_exclusive(dev, id); + +} + +static inline struct reset_control *devm_reset_control_get_by_index( + struct device *dev, int index) +{ + return devm_reset_control_get_exclusive_by_index(dev, index); +} #endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 638b0e004310..6f0b3e0adc73 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1079,6 +1079,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) } void __skb_get_hash(struct sk_buff *skb); +u32 __skb_get_hash_symmetric(struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); u32 __skb_get_poff(const struct sk_buff *skb, void *data, const struct flow_keys *keys, int hlen); @@ -2887,6 +2888,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb, } /** + * skb_push_rcsum - push skb and update receive checksum + * @skb: buffer to update + * @len: length of data pulled + * + * This function performs an skb_push on the packet and updates + * the CHECKSUM_COMPLETE checksum. It should be used on + * receive path processing instead of skb_push unless you know + * that the checksum difference is zero (e.g., a valid IP header) + * or you are setting ip_summed to CHECKSUM_NONE. + */ +static inline unsigned char *skb_push_rcsum(struct sk_buff *skb, + unsigned int len) +{ + skb_push(skb, len); + skb_postpush_rcsum(skb, skb->data, len); + return skb->data; +} + +/** * pskb_trim_rcsum - trim received skb and update checksum * @skb: buffer to trim * @len: new length diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 966889a20ea3..e479033bd782 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -180,11 +180,11 @@ struct ehci_regs { * PORTSCx */ /* HOSTPC: offset 0x84 */ - u32 hostpc[1]; /* HOSTPC extension */ + u32 hostpc[0]; /* HOSTPC extension */ #define HOSTPC_PHCD (1<<22) /* Phy clock disable */ #define HOSTPC_PSPD (3<<25) /* Port speed detection */ - u32 reserved5[16]; + u32 reserved5[17]; /* USBMODE_EX: offset 0xc8 */ u32 usbmode_ex; /* USB Device mode extension */ diff --git a/include/net/bonding.h b/include/net/bonding.h index 791800ddd6d9..6360c259da6d 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -34,6 +34,9 @@ #define BOND_DEFAULT_MIIMON 100 +#ifndef __long_aligned +#define __long_aligned __attribute__((aligned((sizeof(long))))) +#endif /* * Less bad way to call ioctl from within the kernel; this needs to be * done some other way to get the call out of interrupt context. @@ -138,7 +141,9 @@ struct bond_params { struct reciprocal_value reciprocal_packets_per_slave; u16 ad_actor_sys_prio; u16 ad_user_port_key; - u8 ad_actor_system[ETH_ALEN]; + + /* 2 bytes of padding : see ether_addr_equal_64bits() */ + u8 ad_actor_system[ETH_ALEN + 2]; }; struct bond_parm_tbl { diff --git a/include/net/ip.h b/include/net/ip.h index 37165fba3741..08f36cd2b874 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -313,10 +313,9 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, return min(dst->dev->mtu, IP_MAX_MTU); } -static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) +static inline unsigned int ip_skb_dst_mtu(struct sock *sk, + const struct sk_buff *skb) { - struct sock *sk = skb->sk; - if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 5974fae54e12..27e17363263a 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -105,6 +105,9 @@ * * 7.24 * - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support + * + * 7.25 + * - add FUSE_PARALLEL_DIROPS */ #ifndef _LINUX_FUSE_H @@ -140,7 +143,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 24 +#define FUSE_KERNEL_MINOR_VERSION 25 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -234,6 +237,7 @@ struct fuse_file_lock { * FUSE_ASYNC_DIO: asynchronous direct I/O submission * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens + * FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -253,6 +257,7 @@ struct fuse_file_lock { #define FUSE_ASYNC_DIO (1 << 15) #define FUSE_WRITEBACK_CACHE (1 << 16) #define FUSE_NO_OPEN_SUPPORT (1 << 17) +#define FUSE_PARALLEL_DIROPS (1 << 18) /** * CUSE INIT request/reply flags diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 2d25979273a6..77e7f69bf80d 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -700,7 +700,7 @@ static int br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)) { - unsigned int mtu = ip_skb_dst_mtu(skb); + unsigned int mtu = ip_skb_dst_mtu(sk, skb); struct iphdr *iph = ip_hdr(skb); if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) || diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index a669dea146c6..61ad43f61c5e 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -651,6 +651,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest, } EXPORT_SYMBOL(make_flow_keys_digest); +static struct flow_dissector flow_keys_dissector_symmetric __read_mostly; + +u32 __skb_get_hash_symmetric(struct sk_buff *skb) +{ + struct flow_keys keys; + + __flow_hash_secret_init(); + + memset(&keys, 0, sizeof(keys)); + __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys, + NULL, 0, 0, 0, + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); + + return __flow_hash_from_keys(&keys, hashrnd); +} +EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); + /** * __skb_get_hash: calculate a flow hash * @skb: sk_buff to calculate flow hash from @@ -868,6 +885,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { }, }; +static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = { + { + .key_id = FLOW_DISSECTOR_KEY_CONTROL, + .offset = offsetof(struct flow_keys, control), + }, + { + .key_id = FLOW_DISSECTOR_KEY_BASIC, + .offset = offsetof(struct flow_keys, basic), + }, + { + .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, + .offset = offsetof(struct flow_keys, addrs.v4addrs), + }, + { + .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, + .offset = offsetof(struct flow_keys, addrs.v6addrs), + }, + { + .key_id = FLOW_DISSECTOR_KEY_PORTS, + .offset = offsetof(struct flow_keys, ports), + }, +}; + static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { { .key_id = FLOW_DISSECTOR_KEY_CONTROL, @@ -889,6 +929,9 @@ static int __init init_default_flow_dissectors(void) skb_flow_dissector_init(&flow_keys_dissector, flow_keys_dissector_keys, ARRAY_SIZE(flow_keys_dissector_keys)); + skb_flow_dissector_init(&flow_keys_dissector_symmetric, + flow_keys_dissector_symmetric_keys, + ARRAY_SIZE(flow_keys_dissector_symmetric_keys)); skb_flow_dissector_init(&flow_keys_buf_dissector, flow_keys_buf_dissector_keys, ARRAY_SIZE(flow_keys_buf_dissector_keys)); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index e7ec6d3ad5f0..3864b4b68fa1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3017,24 +3017,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page, EXPORT_SYMBOL_GPL(skb_append_pagefrags); /** - * skb_push_rcsum - push skb and update receive checksum - * @skb: buffer to update - * @len: length of data pulled - * - * This function performs an skb_push on the packet and updates - * the CHECKSUM_COMPLETE checksum. It should be used on - * receive path processing instead of skb_push unless you know - * that the checksum difference is zero (e.g., a valid IP header) - * or you are setting ip_summed to CHECKSUM_NONE. - */ -static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len) -{ - skb_push(skb, len); - skb_postpush_rcsum(skb, skb->data, len); - return skb->data; -} - -/** * skb_pull_rcsum - pull skb and update receive checksum * @skb: buffer to update * @len: length of data pulled diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index df4803437888..a796fc7cbc35 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -41,6 +41,7 @@ #include <net/dn_fib.h> #include <net/dn_neigh.h> #include <net/dn_dev.h> +#include <net/nexthop.h> #define RT_MIN_TABLE 1 @@ -150,14 +151,13 @@ static int dn_fib_count_nhs(const struct nlattr *attr) struct rtnexthop *nhp = nla_data(attr); int nhs = 0, nhlen = nla_len(attr); - while(nhlen >= (int)sizeof(struct rtnexthop)) { - if ((nhlen -= nhp->rtnh_len) < 0) - return 0; + while (rtnh_ok(nhp, nhlen)) { nhs++; - nhp = RTNH_NEXT(nhp); + nhp = rtnh_next(nhp, &nhlen); } - return nhs; + /* leftover implies invalid nexthop configuration, discard it */ + return nhlen > 0 ? 0 : nhs; } static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr, @@ -167,21 +167,24 @@ static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr, int nhlen = nla_len(attr); change_nexthops(fi) { - int attrlen = nhlen - sizeof(struct rtnexthop); - if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0) + int attrlen; + + if (!rtnh_ok(nhp, nhlen)) return -EINVAL; nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags; nh->nh_oif = nhp->rtnh_ifindex; nh->nh_weight = nhp->rtnh_hops + 1; - if (attrlen) { + attrlen = rtnh_attrlen(nhp); + if (attrlen > 0) { struct nlattr *gw_attr; gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY); nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0; } - nhp = RTNH_NEXT(nhp); + + nhp = rtnh_next(nhp, &nhlen); } endfor_nexthops(fi); return 0; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index cbac493c913a..e23f141c9ba5 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -271,7 +271,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk return dst_output(net, sk, skb); } #endif - mtu = ip_skb_dst_mtu(skb); + mtu = ip_skb_dst_mtu(sk, skb); if (skb_is_gso(skb)) return ip_finish_output_gso(net, sk, skb, mtu); @@ -541,7 +541,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, iph = ip_hdr(skb); - mtu = ip_skb_dst_mtu(skb); + mtu = ip_skb_dst_mtu(sk, skb); if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) mtu = IPCB(skb)->frag_max_size; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 1bcef2369d64..771be1fa4176 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -177,6 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) } } + free_percpu(non_pcpu_rt->rt6i_pcpu); non_pcpu_rt->rt6i_pcpu = NULL; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 48b58957adf4..9d92c4c46871 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1341,7 +1341,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { - return reciprocal_scale(skb_get_hash(skb), num); + return reciprocal_scale(__skb_get_hash_symmetric(skb), num); } static unsigned int fanout_demux_lb(struct packet_fanout *f, diff --git a/net/rds/tcp.c b/net/rds/tcp.c index d278432f080b..d24f6c142d03 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -651,7 +651,7 @@ static int rds_tcp_init(void) ret = rds_tcp_recv_init(); if (ret) - goto out_slab; + goto out_pernet; ret = rds_trans_register(&rds_tcp_transport); if (ret) @@ -663,8 +663,9 @@ static int rds_tcp_init(void) out_recv: rds_tcp_recv_exit(); -out_slab: +out_pernet: unregister_pernet_subsys(&rds_tcp_net_ops); +out_slab: kmem_cache_destroy(rds_tcp_conn_slab); out: return ret; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 5b135d357e1e..70cfbbf96af2 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -181,7 +181,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, if (!(at & AT_EGRESS)) { if (m->tcfm_ok_push) - skb_push(skb2, skb->mac_len); + skb_push_rcsum(skb2, skb->mac_len); } /* mirror is always swallowed */ diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 3ad9fab1985f..1fd464764765 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -604,7 +604,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); - nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]), + nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME], TIPC_MAX_LINK_NAME); return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, diff --git a/sound/core/timer.c b/sound/core/timer.c index e722022d325d..9a6157ea6881 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -1955,6 +1955,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, qhead = tu->qhead++; tu->qhead %= tu->queue_size; + tu->qused--; spin_unlock_irq(&tu->qlock); if (tu->tread) { @@ -1968,7 +1969,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, } spin_lock_irq(&tu->qlock); - tu->qused--; if (err < 0) goto _error; result += unit; diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c index 4a054d720112..d3125c169684 100644 --- a/sound/pci/au88x0/au88x0_core.c +++ b/sound/pci/au88x0/au88x0_core.c @@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma) int page, p, pp, delta, i; page = - (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) & - WT_SUBBUF_MASK) - >> WT_SUBBUF_SHIFT; + (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) + >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK; if (dma->nr_periods >= 4) delta = (page - dma->period_real) & 3; else { diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index 1cb85aeb0cea..286f5e3686a3 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c @@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev) u32 pipe_alloc_mask; int err; - commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL); + commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL); if (commpage_bak == NULL) return -ENOMEM; commpage = chip->comm_page; - memcpy(commpage_bak, commpage, sizeof(struct comm_page)); + memcpy(commpage_bak, commpage, sizeof(*commpage)); err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device); if (err < 0) { diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 320445f3bf73..79c7b340acc2 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -3977,6 +3977,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid, for (n = 0; n < spec->paths.used; n++) { path = snd_array_elem(&spec->paths, n); + if (!path->depth) + continue; if (path->path[0] == nid || path->path[path->depth - 1] == nid) { bool pin_old = path->pin_enabled; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 94089fc71884..e320c44714b1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -367,9 +367,10 @@ enum { #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) #define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171) #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) +#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ - IS_KBL(pci) || IS_KBL_LP(pci) + IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) static char *driver_short_names[] = { [AZX_DRIVER_ICH] = "HDA Intel", @@ -2190,6 +2191,9 @@ static const struct pci_device_id azx_ids[] = { /* Kabylake-LP */ { PCI_DEVICE(0x8086, 0x9d71), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + /* Kabylake-H */ + { PCI_DEVICE(0x8086, 0xa2f0), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, /* Broxton-P(Apollolake) */ { PCI_DEVICE(0x8086, 0x5a98), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 900bfbc3368c..5fac786e4982 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5651,6 +5651,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460), + SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 4d82a58ff6b0..f3fb98f0a995 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -483,9 +483,10 @@ config SND_SOC_DMIC tristate config SND_SOC_HDMI_CODEC - tristate - select SND_PCM_ELD - select SND_PCM_IEC958 + tristate + select SND_PCM_ELD + select SND_PCM_IEC958 + select HDMI config SND_SOC_ES8328 tristate "Everest Semi ES8328 CODEC" diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c index 647f69de6baa..5013d2ba0c10 100644 --- a/sound/soc/codecs/ak4613.c +++ b/sound/soc/codecs/ak4613.c @@ -146,6 +146,7 @@ static const struct regmap_config ak4613_regmap_cfg = { .max_register = 0x16, .reg_defaults = ak4613_reg, .num_reg_defaults = ARRAY_SIZE(ak4613_reg), + .cache_type = REGCACHE_RBTREE, }; static const struct of_device_id ak4613_of_match[] = { @@ -530,7 +531,6 @@ static int ak4613_i2c_remove(struct i2c_client *client) static struct i2c_driver ak4613_i2c_driver = { .driver = { .name = "ak4613-codec", - .owner = THIS_MODULE, .of_match_table = ak4613_of_match, }, .probe = ak4613_i2c_probe, diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c index d6f4abbbf8a7..fb3885fe0afb 100644 --- a/sound/soc/codecs/cx20442.c +++ b/sound/soc/codecs/cx20442.c @@ -226,6 +226,7 @@ static int v253_open(struct tty_struct *tty) if (!tty->disc_data) return -ENODEV; + tty->receive_room = 16; if (tty->ops->write(tty, v253_init, len) != len) { ret = -EIO; goto err; diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index 181cd3bf0b92..2abb742fc47b 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c @@ -1474,6 +1474,11 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec) * exit, we call pm_runtime_suspend() so that will do for us */ hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev)); + if (!hlink) { + dev_err(&edev->hdac.dev, "hdac link not found\n"); + return -EIO; + } + snd_hdac_ext_bus_link_get(edev->ebus, hlink); ret = create_fill_widget_route_map(dapm); @@ -1634,6 +1639,11 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev) /* hold the ref while we probe */ hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev)); + if (!hlink) { + dev_err(&edev->hdac.dev, "hdac link not found\n"); + return -EIO; + } + snd_hdac_ext_bus_link_get(edev->ebus, hlink); hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL); @@ -1744,6 +1754,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev) } hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev)); + if (!hlink) { + dev_err(dev, "hdac link not found\n"); + return -EIO; + } + snd_hdac_ext_bus_link_put(ebus, hlink); return 0; @@ -1765,6 +1780,11 @@ static int hdac_hdmi_runtime_resume(struct device *dev) return 0; hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev)); + if (!hlink) { + dev_err(dev, "hdac link not found\n"); + return -EIO; + } + snd_hdac_ext_bus_link_get(ebus, hlink); err = snd_hdac_display_power(bus, true); diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 3c6594da6c9c..d70847c9eeb0 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -253,7 +253,7 @@ static const struct reg_default rt5650_reg[] = { { 0x2b, 0x5454 }, { 0x2c, 0xaaa0 }, { 0x2d, 0x0000 }, - { 0x2f, 0x1002 }, + { 0x2f, 0x5002 }, { 0x31, 0x5000 }, { 0x32, 0x0000 }, { 0x33, 0x0000 }, diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index 49a9e7049e2b..0af5ddbef1da 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c @@ -619,7 +619,7 @@ static const struct snd_kcontrol_new rt5670_snd_controls[] = { RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1), SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL, RT5670_L_VOL_SFT, RT5670_R_VOL_SFT, - 39, 0, out_vol_tlv), + 39, 1, out_vol_tlv), /* OUTPUT Control */ SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1, RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1), diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index da60e3fe5ee7..e7fe6b7b95b7 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c @@ -1872,7 +1872,7 @@ static struct snd_soc_dai_driver wm5102_dai[] = { .capture = { .stream_name = "Audio Trace CPU", .channels_min = 1, - .channels_max = 6, + .channels_max = 4, .rates = WM5102_RATES, .formats = WM5102_FORMATS, }, diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index b5820e4d5471..d54f1b46c9ec 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c @@ -1723,6 +1723,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = { { "OUT2L", NULL, "SYSCLK" }, { "OUT2R", NULL, "SYSCLK" }, { "OUT3L", NULL, "SYSCLK" }, + { "OUT3R", NULL, "SYSCLK" }, { "OUT4L", NULL, "SYSCLK" }, { "OUT4R", NULL, "SYSCLK" }, { "OUT5L", NULL, "SYSCLK" }, diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c index f6f9395ea38e..1c600819f768 100644 --- a/sound/soc/codecs/wm8940.c +++ b/sound/soc/codecs/wm8940.c @@ -743,6 +743,7 @@ static const struct regmap_config wm8940_regmap = { .max_register = WM8940_MONOMIX, .reg_defaults = wm8940_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults), + .cache_type = REGCACHE_RBTREE, .readable_reg = wm8940_readable_register, .volatile_reg = wm8940_volatile_register, diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index 0f66fda2c772..237dc67002ef 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c @@ -1513,8 +1513,9 @@ static struct davinci_mcasp_pdata am33xx_mcasp_pdata = { }; static struct davinci_mcasp_pdata dra7_mcasp_pdata = { - .tx_dma_offset = 0x200, - .rx_dma_offset = 0x284, + /* The CFG port offset will be calculated if it is needed */ + .tx_dma_offset = 0, + .rx_dma_offset = 0, .version = MCASP_VERSION_4, }; @@ -1734,6 +1735,52 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp) return PCM_EDMA; } +static u32 davinci_mcasp_txdma_offset(struct davinci_mcasp_pdata *pdata) +{ + int i; + u32 offset = 0; + + if (pdata->version != MCASP_VERSION_4) + return pdata->tx_dma_offset; + + for (i = 0; i < pdata->num_serializer; i++) { + if (pdata->serial_dir[i] == TX_MODE) { + if (!offset) { + offset = DAVINCI_MCASP_TXBUF_REG(i); + } else { + pr_err("%s: Only one serializer allowed!\n", + __func__); + break; + } + } + } + + return offset; +} + +static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata) +{ + int i; + u32 offset = 0; + + if (pdata->version != MCASP_VERSION_4) + return pdata->rx_dma_offset; + + for (i = 0; i < pdata->num_serializer; i++) { + if (pdata->serial_dir[i] == RX_MODE) { + if (!offset) { + offset = DAVINCI_MCASP_RXBUF_REG(i); + } else { + pr_err("%s: Only one serializer allowed!\n", + __func__); + break; + } + } + } + + return offset; +} + static int davinci_mcasp_probe(struct platform_device *pdev) { struct snd_dmaengine_dai_dma_data *dma_data; @@ -1862,7 +1909,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev) if (dat) dma_data->addr = dat->start; else - dma_data->addr = mem->start + pdata->tx_dma_offset; + dma_data->addr = mem->start + davinci_mcasp_txdma_offset(pdata); dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK]; res = platform_get_resource(pdev, IORESOURCE_DMA, 0); @@ -1883,7 +1930,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev) if (dat) dma_data->addr = dat->start; else - dma_data->addr = mem->start + pdata->rx_dma_offset; + dma_data->addr = + mem->start + davinci_mcasp_rxdma_offset(pdata); dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE]; res = platform_get_resource(pdev, IORESOURCE_DMA, 1); diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h index 1e8787fb3fb7..afddc8010c54 100644 --- a/sound/soc/davinci/davinci-mcasp.h +++ b/sound/soc/davinci/davinci-mcasp.h @@ -85,9 +85,9 @@ (n << 2)) /* Transmit Buffer for Serializer n */ -#define DAVINCI_MCASP_TXBUF_REG 0x200 +#define DAVINCI_MCASP_TXBUF_REG(n) (0x200 + (n << 2)) /* Receive Buffer for Serializer n */ -#define DAVINCI_MCASP_RXBUF_REG 0x280 +#define DAVINCI_MCASP_RXBUF_REG(n) (0x280 + (n << 2)) /* McASP FIFO Registers */ #define DAVINCI_MCASP_V2_AFIFO_BASE (0x1010) diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 632ecc0e3956..bedec4a32581 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -952,16 +952,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev, ssi_private->i2s_mode = CCSR_SSI_SCR_NET; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: + regmap_update_bits(regs, CCSR_SSI_STCCR, + CCSR_SSI_SxCCR_DC_MASK, + CCSR_SSI_SxCCR_DC(2)); + regmap_update_bits(regs, CCSR_SSI_SRCCR, + CCSR_SSI_SxCCR_DC_MASK, + CCSR_SSI_SxCCR_DC(2)); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFS: case SND_SOC_DAIFMT_CBS_CFS: ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER; - regmap_update_bits(regs, CCSR_SSI_STCCR, - CCSR_SSI_SxCCR_DC_MASK, - CCSR_SSI_SxCCR_DC(2)); - regmap_update_bits(regs, CCSR_SSI_SRCCR, - CCSR_SSI_SxCCR_DC_MASK, - CCSR_SSI_SxCCR_DC(2)); break; case SND_SOC_DAIFMT_CBM_CFM: ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE; diff --git a/sound/soc/intel/atom/sst-mfld-platform-compress.c b/sound/soc/intel/atom/sst-mfld-platform-compress.c index 395168986462..1bead81bb510 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-compress.c +++ b/sound/soc/intel/atom/sst-mfld-platform-compress.c @@ -182,24 +182,29 @@ static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd) case SNDRV_PCM_TRIGGER_START: if (stream->compr_ops->stream_start) return stream->compr_ops->stream_start(sst->dev, stream->id); + break; case SNDRV_PCM_TRIGGER_STOP: if (stream->compr_ops->stream_drop) return stream->compr_ops->stream_drop(sst->dev, stream->id); + break; case SND_COMPR_TRIGGER_DRAIN: if (stream->compr_ops->stream_drain) return stream->compr_ops->stream_drain(sst->dev, stream->id); + break; case SND_COMPR_TRIGGER_PARTIAL_DRAIN: if (stream->compr_ops->stream_partial_drain) return stream->compr_ops->stream_partial_drain(sst->dev, stream->id); + break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (stream->compr_ops->stream_pause) return stream->compr_ops->stream_pause(sst->dev, stream->id); + break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (stream->compr_ops->stream_pause_release) return stream->compr_ops->stream_pause_release(sst->dev, stream->id); - default: - return -EINVAL; + break; } + return -EINVAL; } static int sst_platform_compr_pointer(struct snd_compr_stream *cstream, diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c index 965ce40ce752..8b95e09e23e8 100644 --- a/sound/soc/intel/skylake/bxt-sst.c +++ b/sound/soc/intel/skylake/bxt-sst.c @@ -291,6 +291,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); + INIT_LIST_HEAD(&sst->module_list); ret = skl_ipc_init(dev, skl); if (ret) return ret; diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c index 49354d17ea55..c4c51a4d3c8f 100644 --- a/sound/soc/sh/rcar/adg.c +++ b/sound/soc/sh/rcar/adg.c @@ -518,7 +518,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv, } } - rsnd_mod_bset(adg_mod, SSICKR, 0x00FF0000, ckr); + rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr); rsnd_mod_write(adg_mod, BRRA, rbga); rsnd_mod_write(adg_mod, BRRB, rbgb); |