diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml index 29f12d650442..1a5209139e13 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml @@ -223,12 +223,6 @@ allOf: - required: - pwms - - oneOf: - - required: - - interrupts - - required: - - io-backends - - if: properties: compatible: diff --git a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml index d1a6103fc37a..f3242dc0e7e6 100644 --- a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml +++ b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml @@ -21,7 +21,7 @@ properties: vlogic-supply: true interrupts: - minItems: 1 + maxItems: 1 description: Interrupt mapping for the trigger interrupt from the internal oscillator. diff --git a/Documentation/hwmon/ina238.rst b/Documentation/hwmon/ina238.rst index d1b93cf8627f..9a24da4786a4 100644 --- a/Documentation/hwmon/ina238.rst +++ b/Documentation/hwmon/ina238.rst @@ -65,7 +65,7 @@ Additional sysfs entries for sq52206 ------------------------------------ ======================= ======================================================= -energy1_input Energy measurement (mJ) +energy1_input Energy measurement (uJ) power1_input_highest Peak Power (uW) ======================= ======================================================= diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 43ed57e048a8..544fb11351d9 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -2008,6 +2008,13 @@ If the KVM_CAP_VM_TSC_CONTROL capability is advertised, this can also be used as a vm ioctl to set the initial tsc frequency of subsequently created vCPUs. +For TSC protected Confidential Computing (CoCo) VMs where TSC frequency +is configured once at VM scope and remains unchanged during VM's +lifetime, the vm ioctl should be used to configure the TSC frequency +and the vcpu ioctl is not supported. + +Example of such CoCo VMs: TDX guests. + 4.56 KVM_GET_TSC_KHZ -------------------- @@ -7230,8 +7237,8 @@ inputs and outputs of the TDVMCALL. Currently the following values of placed in fields from ``r11`` to ``r14`` of the ``get_tdvmcall_info`` field of the union. -* ``TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT``: the guest has requested to -set up a notification interrupt for vector ``vector``. + * ``TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT``: the guest has requested to + set up a notification interrupt for vector ``vector``. KVM may add support for more values in the future that may cause a userspace exit, even without calls to ``KVM_ENABLE_CAP`` or similar. In this case, diff --git a/Documentation/virt/kvm/review-checklist.rst b/Documentation/virt/kvm/review-checklist.rst index dc01aea4057b..debac54e14e7 100644 --- a/Documentation/virt/kvm/review-checklist.rst +++ b/Documentation/virt/kvm/review-checklist.rst @@ -7,7 +7,7 @@ Review checklist for kvm patches 1. The patch must follow Documentation/process/coding-style.rst and Documentation/process/submitting-patches.rst. -2. Patches should be against kvm.git master branch. +2. Patches should be against kvm.git master or next branches. 3. If the patch introduces or modifies a new userspace API: - the API must be documented in Documentation/virt/kvm/api.rst @@ -18,10 +18,10 @@ Review checklist for kvm patches 5. New features must default to off (userspace should explicitly request them). Performance improvements can and should default to on. -6. New cpu features should be exposed via KVM_GET_SUPPORTED_CPUID2 +6. New cpu features should be exposed via KVM_GET_SUPPORTED_CPUID2, + or its equivalent for non-x86 architectures -7. Emulator changes should be accompanied by unit tests for qemu-kvm.git - kvm/test directory. +7. The feature should be testable (see below). 8. Changes should be vendor neutral when possible. Changes to common code are better than duplicating changes to vendor code. @@ -36,6 +36,87 @@ Review checklist for kvm patches 11. New guest visible features must either be documented in a hardware manual or be accompanied by documentation. -12. Features must be robust against reset and kexec - for example, shared - host/guest memory must be unshared to prevent the host from writing to - guest memory that the guest has not reserved for this purpose. +Testing of KVM code +------------------- + +All features contributed to KVM, and in many cases bugfixes too, should be +accompanied by some kind of tests and/or enablement in open source guests +and VMMs. KVM is covered by multiple test suites: + +*Selftests* + These are low level tests that allow granular testing of kernel APIs. + This includes API failure scenarios, invoking APIs after specific + guest instructions, and testing multiple calls to ``KVM_CREATE_VM`` + within a single test. They are included in the kernel tree at + ``tools/testing/selftests/kvm``. + +``kvm-unit-tests`` + A collection of small guests that test CPU and emulated device features + from a guest's perspective. They run under QEMU or ``kvmtool``, and + are generally not KVM-specific: they can be run with any accelerator + that QEMU support or even on bare metal, making it possible to compare + behavior across hypervisors and processor families. + +Functional test suites + Various sets of functional tests exist, such as QEMU's ``tests/functional`` + suite and `avocado-vt `__. + These typically involve running a full operating system in a virtual + machine. + +The best testing approach depends on the feature's complexity and +operation. Here are some examples and guidelines: + +New instructions (no new registers or APIs) + The corresponding CPU features (if applicable) should be made available + in QEMU. If the instructions require emulation support or other code in + KVM, it is worth adding coverage to ``kvm-unit-tests`` or selftests; + the latter can be a better choice if the instructions relate to an API + that already has good selftest coverage. + +New hardware features (new registers, no new APIs) + These should be tested via ``kvm-unit-tests``; this more or less implies + supporting them in QEMU and/or ``kvmtool``. In some cases selftests + can be used instead, similar to the previous case, or specifically to + test corner cases in guest state save/restore. + +Bug fixes and performance improvements + These usually do not introduce new APIs, but it's worth sharing + any benchmarks and tests that will validate your contribution, + ideally in the form of regression tests. Tests and benchmarks + can be included in either ``kvm-unit-tests`` or selftests, depending + on the specifics of your change. Selftests are especially useful for + regression tests because they are included directly in Linux's tree. + +Large scale internal changes + While it's difficult to provide a single policy, you should ensure that + the changed code is covered by either ``kvm-unit-tests`` or selftests. + In some cases the affected code is run for any guests and functional + tests suffice. Explain your testing process in the cover letter, + as that can help identify gaps in existing test suites. + +New APIs + It is important to demonstrate your use case. This can be as simple as + explaining that the feature is already in use on bare metal, or it can be + a proof-of-concept implementation in userspace. The latter need not be + open source, though that is of course preferrable for easier testing. + Selftests should test corner cases of the APIs, and should also cover + basic host and guest operation if no open source VMM uses the feature. + +Bigger features, usually spanning host and guest + These should be supported by Linux guests, with limited exceptions for + Hyper-V features that are testable on Windows guests. It is strongly + suggested that the feature be usable with an open source host VMM, such + as at least one of QEMU or crosvm, and guest firmware. Selftests should + test at least API error cases. Guest operation can be covered by + either selftests of ``kvm-unit-tests`` (this is especially important for + paravirtualized and Windows-only features). Strong selftest coverage + can also be a replacement for implementation in an open source VMM, + but this is generally not recommended. + +Following the above suggestions for testing in selftests and +``kvm-unit-tests`` will make it easier for the maintainers to review +and accept your code. In fact, even before you contribute your changes +upstream it will make it easier for you to develop for KVM. + +Of course, the KVM maintainers reserve the right to require more tests, +though they may also waive the requirement from time to time. diff --git a/MAINTAINERS b/MAINTAINERS index 1bc1698bc5ae..255344c1e413 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5581,6 +5581,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git F: drivers/char/ F: drivers/misc/ F: include/linux/miscdevice.h +F: rust/kernel/miscdevice.rs F: samples/rust/rust_misc_device.rs X: drivers/char/agp/ X: drivers/char/hw_random/ @@ -12200,9 +12201,8 @@ F: drivers/dma/idxd/* F: include/uapi/linux/idxd.h INTEL IN FIELD SCAN (IFS) DEVICE -M: Jithu Joseph +M: Tony Luck R: Ashok Raj -R: Tony Luck S: Maintained F: drivers/platform/x86/intel/ifs F: include/trace/events/intel_ifs.h @@ -12542,8 +12542,7 @@ T: git https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next.git/ F: drivers/net/wireless/intel/iwlwifi/ INTEL WMI SLIM BOOTLOADER (SBL) FIRMWARE UPDATE DRIVER -M: Jithu Joseph -S: Maintained +S: Orphan W: https://slimbootloader.github.io/security/firmware-update.html F: drivers/platform/x86/intel/wmi/sbl-fw-update.c @@ -17405,6 +17404,7 @@ F: include/linux/ethtool.h F: include/linux/framer/framer-provider.h F: include/linux/framer/framer.h F: include/linux/in.h +F: include/linux/in6.h F: include/linux/indirect_call_wrapper.h F: include/linux/inet.h F: include/linux/inet_diag.h @@ -25923,6 +25923,8 @@ F: fs/hostfs/ USERSPACE COPYIN/COPYOUT (UIOVEC) M: Alexander Viro +L: linux-block@vger.kernel.org +L: linux-fsdevel@vger.kernel.org S: Maintained F: include/linux/uio.h F: lib/iov_iter.c diff --git a/Makefile b/Makefile index c09766beb7ef..be33e8c868ae 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 16 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 76c2f0da821f..c20bd6f21e60 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2624,7 +2624,7 @@ static bool access_mdcr(struct kvm_vcpu *vcpu, */ if (hpmn > vcpu->kvm->arch.nr_pmu_counters) { hpmn = vcpu->kvm->arch.nr_pmu_counters; - u64_replace_bits(val, hpmn, MDCR_EL2_HPMN); + u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN); } __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val); diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index d71ea0f4466f..1c5544401530 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -98,6 +98,7 @@ config RISCV select CLONE_BACKWARDS select COMMON_CLK select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND + select DYNAMIC_FTRACE if FUNCTION_TRACER select EDAC_SUPPORT select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE) select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE @@ -162,7 +163,7 @@ config RISCV select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FUNCTION_GRAPH_TRACER if HAVE_DYNAMIC_FTRACE_WITH_ARGS select HAVE_FUNCTION_GRAPH_FREGS - select HAVE_FUNCTION_TRACER if !XIP_KERNEL + select HAVE_FUNCTION_TRACER if !XIP_KERNEL && HAVE_DYNAMIC_FTRACE select HAVE_EBPF_JIT if MMU select HAVE_GUP_FAST if MMU select HAVE_FUNCTION_ARG_ACCESS_API diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h index 3b643b9efc07..5acce285e56e 100644 --- a/arch/riscv/include/asm/kvm_aia.h +++ b/arch/riscv/include/asm/kvm_aia.h @@ -87,6 +87,9 @@ DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available); extern struct kvm_device_ops kvm_riscv_aia_device_ops; +bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu); +void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu); int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu); @@ -161,7 +164,6 @@ void kvm_riscv_aia_destroy_vm(struct kvm *kvm); int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner, void __iomem **hgei_va, phys_addr_t *hgei_pa); void kvm_riscv_aia_free_hgei(int cpu, int hgei); -void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable); void kvm_riscv_aia_enable(void); void kvm_riscv_aia_disable(void); diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 85cfebc32e4c..bcbf8b1ec115 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -306,6 +306,9 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; } +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} + #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 525e50db24f7..b88a6218b7f2 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -311,8 +311,8 @@ do { \ do { \ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \ !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \ - __inttype(x) val = (__inttype(x))x; \ - if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(val), sizeof(*__gu_ptr))) \ + __inttype(x) ___val = (__inttype(x))x; \ + if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(___val), sizeof(*__gu_ptr))) \ goto label; \ break; \ } \ diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 4c6c24380cfd..8d18d6727f0f 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -14,6 +14,18 @@ #include #ifdef CONFIG_DYNAMIC_FTRACE +void ftrace_arch_code_modify_prepare(void) + __acquires(&text_mutex) +{ + mutex_lock(&text_mutex); +} + +void ftrace_arch_code_modify_post_process(void) + __releases(&text_mutex) +{ + mutex_unlock(&text_mutex); +} + unsigned long ftrace_call_adjust(unsigned long addr) { if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) @@ -29,10 +41,8 @@ unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip) void arch_ftrace_update_code(int command) { - mutex_lock(&text_mutex); command |= FTRACE_MAY_SLEEP; ftrace_modify_all_code(command); - mutex_unlock(&text_mutex); flush_icache_all(); } @@ -149,6 +159,8 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) unsigned int nops[2], offset; int ret; + guard(mutex)(&text_mutex); + ret = ftrace_rec_set_nop_ops(rec); if (ret) return ret; @@ -157,9 +169,7 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) nops[0] = to_auipc_t0(offset); nops[1] = RISCV_INSN_NOP4; - mutex_lock(&text_mutex); ret = patch_insn_write((void *)pc, nops, 2 * MCOUNT_INSN_SIZE); - mutex_unlock(&text_mutex); return ret; } diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 9c83848797a7..80230de167de 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -151,7 +152,9 @@ asmlinkage __visible __trap_section void name(struct pt_regs *regs) \ { \ if (user_mode(regs)) { \ irqentry_enter_from_user_mode(regs); \ + local_irq_enable(); \ do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ + local_irq_disable(); \ irqentry_exit_to_user_mode(regs); \ } else { \ irqentry_state_t state = irqentry_nmi_enter(regs); \ @@ -173,17 +176,14 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re if (user_mode(regs)) { irqentry_enter_from_user_mode(regs); - local_irq_enable(); handled = riscv_v_first_use_handler(regs); - - local_irq_disable(); - if (!handled) do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, "Oops - illegal instruction"); + local_irq_disable(); irqentry_exit_to_user_mode(regs); } else { irqentry_state_t state = irqentry_nmi_enter(regs); @@ -308,9 +308,11 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) { if (user_mode(regs)) { irqentry_enter_from_user_mode(regs); + local_irq_enable(); handle_break(regs); + local_irq_disable(); irqentry_exit_to_user_mode(regs); } else { irqentry_state_t state = irqentry_nmi_enter(regs); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 93043924fe6c..f760e4fcc052 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -461,7 +461,7 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs) } if (!fp) - SET_RD(insn, regs, val.data_ulong << shift >> shift); + SET_RD(insn, regs, (long)(val.data_ulong << shift) >> shift); else if (len == 8) set_f64_rd(insn, regs, val.data_u64); else diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c index 19afd1f23537..dad318185660 100644 --- a/arch/riscv/kvm/aia.c +++ b/arch/riscv/kvm/aia.c @@ -30,28 +30,6 @@ unsigned int kvm_riscv_aia_nr_hgei; unsigned int kvm_riscv_aia_max_ids; DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available); -static int aia_find_hgei(struct kvm_vcpu *owner) -{ - int i, hgei; - unsigned long flags; - struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei); - - raw_spin_lock_irqsave(&hgctrl->lock, flags); - - hgei = -1; - for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) { - if (hgctrl->owners[i] == owner) { - hgei = i; - break; - } - } - - raw_spin_unlock_irqrestore(&hgctrl->lock, flags); - - put_cpu_ptr(&aia_hgei); - return hgei; -} - static inline unsigned long aia_hvictl_value(bool ext_irq_pending) { unsigned long hvictl; @@ -95,7 +73,6 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu) bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask) { - int hgei; unsigned long seip; if (!kvm_riscv_aia_available()) @@ -114,11 +91,7 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask) if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip) return false; - hgei = aia_find_hgei(vcpu); - if (hgei > 0) - return !!(ncsr_read(CSR_HGEIP) & BIT(hgei)); - - return false; + return kvm_riscv_vcpu_aia_imsic_has_interrupt(vcpu); } void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu) @@ -164,6 +137,9 @@ void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu) csr_write(CSR_HVIPRIO2H, csr->hviprio2h); #endif } + + if (kvm_riscv_aia_initialized(vcpu->kvm)) + kvm_riscv_vcpu_aia_imsic_load(vcpu, cpu); } void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu) @@ -174,6 +150,9 @@ void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu) if (!kvm_riscv_aia_available()) return; + if (kvm_riscv_aia_initialized(vcpu->kvm)) + kvm_riscv_vcpu_aia_imsic_put(vcpu); + if (kvm_riscv_nacl_available()) { nsh = nacl_shmem(); csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT); @@ -472,22 +451,6 @@ void kvm_riscv_aia_free_hgei(int cpu, int hgei) raw_spin_unlock_irqrestore(&hgctrl->lock, flags); } -void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable) -{ - int hgei; - - if (!kvm_riscv_aia_available()) - return; - - hgei = aia_find_hgei(owner); - if (hgei > 0) { - if (enable) - csr_set(CSR_HGEIE, BIT(hgei)); - else - csr_clear(CSR_HGEIE, BIT(hgei)); - } -} - static irqreturn_t hgei_interrupt(int irq, void *dev_id) { int i; diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c index 29ef9c2133a9..2ff865943ebb 100644 --- a/arch/riscv/kvm/aia_imsic.c +++ b/arch/riscv/kvm/aia_imsic.c @@ -676,6 +676,48 @@ static void imsic_swfile_update(struct kvm_vcpu *vcpu, imsic_swfile_extirq_update(vcpu); } +bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu) +{ + struct imsic *imsic = vcpu->arch.aia_context.imsic_state; + unsigned long flags; + bool ret = false; + + /* + * The IMSIC SW-file directly injects interrupt via hvip so + * only check for interrupt when IMSIC VS-file is being used. + */ + + read_lock_irqsave(&imsic->vsfile_lock, flags); + if (imsic->vsfile_cpu > -1) + ret = !!(csr_read(CSR_HGEIP) & BIT(imsic->vsfile_hgei)); + read_unlock_irqrestore(&imsic->vsfile_lock, flags); + + return ret; +} + +void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu) +{ + /* + * No need to explicitly clear HGEIE CSR bits because the + * hgei interrupt handler (aka hgei_interrupt()) will always + * clear it for us. + */ +} + +void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu) +{ + struct imsic *imsic = vcpu->arch.aia_context.imsic_state; + unsigned long flags; + + if (!kvm_vcpu_is_blocking(vcpu)) + return; + + read_lock_irqsave(&imsic->vsfile_lock, flags); + if (imsic->vsfile_cpu > -1) + csr_set(CSR_HGEIE, BIT(imsic->vsfile_hgei)); + read_unlock_irqrestore(&imsic->vsfile_lock, flags); +} + void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu) { unsigned long flags; @@ -781,6 +823,9 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu) * producers to the new IMSIC VS-file. */ + /* Ensure HGEIE CSR bit is zero before using the new IMSIC VS-file */ + csr_clear(CSR_HGEIE, BIT(new_vsfile_hgei)); + /* Zero-out new IMSIC VS-file */ imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index e0a01af426ff..0462863206ca 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -207,16 +207,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) return kvm_riscv_vcpu_timer_pending(vcpu); } -void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) -{ - kvm_riscv_aia_wakeon_hgei(vcpu, true); -} - -void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) -{ - kvm_riscv_aia_wakeon_hgei(vcpu, false); -} - int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c index ff672fa71fcc..85a7262115e1 100644 --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -345,8 +345,24 @@ void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu) /* * The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync() * upon every VM exit so no need to save here. + * + * If VS-timer expires when no VCPU running on a host CPU then + * WFI executed by such host CPU will be effective NOP resulting + * in no power savings. This is because as-per RISC-V Privileged + * specificaiton: "WFI is also required to resume execution for + * locally enabled interrupts pending at any privilege level, + * regardless of the global interrupt enable at each privilege + * level." + * + * To address the above issue, vstimecmp CSR must be set to -1UL + * over here when VCPU is scheduled-out or exits to user space. */ + csr_write(CSR_VSTIMECMP, -1UL); +#if defined(CONFIG_32BIT) + csr_write(CSR_VSTIMECMPH, -1UL); +#endif + /* timer should be enabled for the remaining operations */ if (unlikely(!t->init_done)) return; diff --git a/arch/riscv/tools/relocs_check.sh b/arch/riscv/tools/relocs_check.sh index baeb2e7b2290..742993e6a8cb 100755 --- a/arch/riscv/tools/relocs_check.sh +++ b/arch/riscv/tools/relocs_check.sh @@ -14,7 +14,9 @@ bad_relocs=$( ${srctree}/scripts/relocs_check.sh "$@" | # These relocations are okay # R_RISCV_RELATIVE - grep -F -w -v 'R_RISCV_RELATIVE' + # R_RISCV_NONE + grep -F -w -v 'R_RISCV_RELATIVE +R_RISCV_NONE' ) if [ -z "$bad_relocs" ]; then diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index c7f8313ba449..0c9a35782c83 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -566,7 +566,15 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target) { memcpy(plt, &bpf_plt, sizeof(*plt)); plt->ret = ret; - plt->target = target; + /* + * (target == NULL) implies that the branch to this PLT entry was + * patched and became a no-op. However, some CPU could have jumped + * to this PLT entry before patching and may be still executing it. + * + * Since the intention in this case is to make the PLT entry a no-op, + * make the target point to the return label instead of NULL. + */ + plt->target = target ?: ret; } /* diff --git a/arch/x86/coco/sev/Makefile b/arch/x86/coco/sev/Makefile index db3255b979bd..342d79f0ab6a 100644 --- a/arch/x86/coco/sev/Makefile +++ b/arch/x86/coco/sev/Makefile @@ -5,5 +5,6 @@ obj-y += core.o sev-nmi.o vc-handle.o # Clang 14 and older may fail to respect __no_sanitize_undefined when inlining UBSAN_SANITIZE_sev-nmi.o := n -# GCC may fail to respect __no_sanitize_address when inlining +# GCC may fail to respect __no_sanitize_address or __no_kcsan when inlining KASAN_SANITIZE_sev-nmi.o := n +KCSAN_SANITIZE_sev-nmi.o := n diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 3d1d3547095a..afdbda2dd7b7 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -34,6 +34,7 @@ #include #include #include +#include void *hv_hypercall_pg; EXPORT_SYMBOL_GPL(hv_hypercall_pg); diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c index 31f0d29cbc5e..090f5ac9f492 100644 --- a/arch/x86/hyperv/irqdomain.c +++ b/arch/x86/hyperv/irqdomain.c @@ -10,6 +10,7 @@ #include #include +#include #include static int hv_map_interrupt(union hv_device_id device_id, bool level, @@ -46,7 +47,7 @@ static int hv_map_interrupt(union hv_device_id device_id, bool level, if (nr_bank < 0) { local_irq_restore(flags); pr_err("%s: unable to generate VP set\n", __func__); - return EINVAL; + return -EINVAL; } intr_desc->target.flags = HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; @@ -66,7 +67,7 @@ static int hv_map_interrupt(union hv_device_id device_id, bool level, if (!hv_result_success(status)) hv_status_err(status, "\n"); - return hv_result(status); + return hv_result_to_errno(status); } static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry) @@ -88,7 +89,10 @@ static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry) status = hv_do_hypercall(HVCALL_UNMAP_DEVICE_INTERRUPT, input, NULL); local_irq_restore(flags); - return hv_result(status); + if (!hv_result_success(status)) + hv_status_err(status, "\n"); + + return hv_result_to_errno(status); } #ifdef CONFIG_PCI_MSI @@ -169,13 +173,34 @@ static union hv_device_id hv_build_pci_dev_id(struct pci_dev *dev) return dev_id; } -static int hv_map_msi_interrupt(struct pci_dev *dev, int cpu, int vector, - struct hv_interrupt_entry *entry) +/** + * hv_map_msi_interrupt() - "Map" the MSI IRQ in the hypervisor. + * @data: Describes the IRQ + * @out_entry: Hypervisor (MSI) interrupt entry (can be NULL) + * + * Map the IRQ in the hypervisor by issuing a MAP_DEVICE_INTERRUPT hypercall. + * + * Return: 0 on success, -errno on failure + */ +int hv_map_msi_interrupt(struct irq_data *data, + struct hv_interrupt_entry *out_entry) { - union hv_device_id device_id = hv_build_pci_dev_id(dev); + struct irq_cfg *cfg = irqd_cfg(data); + struct hv_interrupt_entry dummy; + union hv_device_id device_id; + struct msi_desc *msidesc; + struct pci_dev *dev; + int cpu; - return hv_map_interrupt(device_id, false, cpu, vector, entry); + msidesc = irq_data_get_msi_desc(data); + dev = msi_desc_to_pci_dev(msidesc); + device_id = hv_build_pci_dev_id(dev); + cpu = cpumask_first(irq_data_get_effective_affinity_mask(data)); + + return hv_map_interrupt(device_id, false, cpu, cfg->vector, + out_entry ? out_entry : &dummy); } +EXPORT_SYMBOL_GPL(hv_map_msi_interrupt); static inline void entry_to_msi_msg(struct hv_interrupt_entry *entry, struct msi_msg *msg) { @@ -188,13 +213,11 @@ static inline void entry_to_msi_msg(struct hv_interrupt_entry *entry, struct msi static int hv_unmap_msi_interrupt(struct pci_dev *dev, struct hv_interrupt_entry *old_entry); static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { + struct hv_interrupt_entry *stored_entry; + struct irq_cfg *cfg = irqd_cfg(data); struct msi_desc *msidesc; struct pci_dev *dev; - struct hv_interrupt_entry out_entry, *stored_entry; - struct irq_cfg *cfg = irqd_cfg(data); - const cpumask_t *affinity; - int cpu; - u64 status; + int ret; msidesc = irq_data_get_msi_desc(data); dev = msi_desc_to_pci_dev(msidesc); @@ -204,9 +227,6 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) return; } - affinity = irq_data_get_effective_affinity_mask(data); - cpu = cpumask_first_and(affinity, cpu_online_mask); - if (data->chip_data) { /* * This interrupt is already mapped. Let's unmap first. @@ -219,14 +239,12 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) stored_entry = data->chip_data; data->chip_data = NULL; - status = hv_unmap_msi_interrupt(dev, stored_entry); + ret = hv_unmap_msi_interrupt(dev, stored_entry); kfree(stored_entry); - if (status != HV_STATUS_SUCCESS) { - hv_status_debug(status, "failed to unmap\n"); + if (ret) return; - } } stored_entry = kzalloc(sizeof(*stored_entry), GFP_ATOMIC); @@ -235,15 +253,14 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) return; } - status = hv_map_msi_interrupt(dev, cpu, cfg->vector, &out_entry); - if (status != HV_STATUS_SUCCESS) { + ret = hv_map_msi_interrupt(data, stored_entry); + if (ret) { kfree(stored_entry); return; } - *stored_entry = out_entry; data->chip_data = stored_entry; - entry_to_msi_msg(&out_entry, msg); + entry_to_msi_msg(data->chip_data, msg); return; } @@ -257,7 +274,6 @@ static void hv_teardown_msi_irq(struct pci_dev *dev, struct irq_data *irqd) { struct hv_interrupt_entry old_entry; struct msi_msg msg; - u64 status; if (!irqd->chip_data) { pr_debug("%s: no chip data\n!", __func__); @@ -270,10 +286,7 @@ static void hv_teardown_msi_irq(struct pci_dev *dev, struct irq_data *irqd) kfree(irqd->chip_data); irqd->chip_data = NULL; - status = hv_unmap_msi_interrupt(dev, &old_entry); - - if (status != HV_STATUS_SUCCESS) - hv_status_err(status, "\n"); + (void)hv_unmap_msi_interrupt(dev, &old_entry); } static void hv_msi_free_irq(struct irq_domain *domain, diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index e93a2f488ff7..ade6c665c97e 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c index 1083dc8646f9..8ccbb7c4fc27 100644 --- a/arch/x86/hyperv/nested.c +++ b/arch/x86/hyperv/nested.c @@ -11,6 +11,7 @@ #include +#include #include #include #include diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index e1752ba47e67..abc4659f5809 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -112,12 +112,6 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) return hv_status; } -/* Hypercall to the L0 hypervisor */ -static inline u64 hv_do_nested_hypercall(u64 control, void *input, void *output) -{ - return hv_do_hypercall(control | HV_HYPERCALL_NESTED, input, output); -} - /* Fast hypercall with 8 bytes of input and no output */ static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1) { @@ -165,13 +159,6 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) return _hv_do_fast_hypercall8(control, input1); } -static inline u64 hv_do_fast_nested_hypercall8(u16 code, u64 input1) -{ - u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED; - - return _hv_do_fast_hypercall8(control, input1); -} - /* Fast hypercall with 16 bytes of input */ static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2) { @@ -223,13 +210,6 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) return _hv_do_fast_hypercall16(control, input1, input2); } -static inline u64 hv_do_fast_nested_hypercall16(u16 code, u64 input1, u64 input2) -{ - u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED; - - return _hv_do_fast_hypercall16(control, input1, input2); -} - extern struct hv_vp_assist_page **hv_vp_assist_page; static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) @@ -262,6 +242,8 @@ static inline void hv_apic_init(void) {} struct irq_domain *hv_create_pci_msi_domain(void); +int hv_map_msi_interrupt(struct irq_data *data, + struct hv_interrupt_entry *out_entry); int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector, struct hv_interrupt_entry *entry); int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry); diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index f31ccdeb905b..ec79aacc446f 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -173,7 +173,6 @@ static void td_init_cpuid_entry2(struct kvm_cpuid_entry2 *entry, unsigned char i tdx_clear_unsupported_cpuid(entry); } -#define TDVMCALLINFO_GET_QUOTE BIT(0) #define TDVMCALLINFO_SETUP_EVENT_NOTIFY_INTERRUPT BIT(1) static int init_kvm_tdx_caps(const struct tdx_sys_info_td_conf *td_conf, @@ -192,7 +191,6 @@ static int init_kvm_tdx_caps(const struct tdx_sys_info_td_conf *td_conf, caps->cpuid.nent = td_conf->num_cpuid_config; caps->user_tdvmcallinfo_1_r11 = - TDVMCALLINFO_GET_QUOTE | TDVMCALLINFO_SETUP_EVENT_NOTIFY_INTERRUPT; for (i = 0; i < td_conf->num_cpuid_config; i++) @@ -2271,25 +2269,26 @@ static int tdx_get_capabilities(struct kvm_tdx_cmd *cmd) const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf; struct kvm_tdx_capabilities __user *user_caps; struct kvm_tdx_capabilities *caps = NULL; + u32 nr_user_entries; int ret = 0; /* flags is reserved for future use */ if (cmd->flags) return -EINVAL; - caps = kmalloc(sizeof(*caps) + + caps = kzalloc(sizeof(*caps) + sizeof(struct kvm_cpuid_entry2) * td_conf->num_cpuid_config, GFP_KERNEL); if (!caps) return -ENOMEM; user_caps = u64_to_user_ptr(cmd->data); - if (copy_from_user(caps, user_caps, sizeof(*caps))) { + if (get_user(nr_user_entries, &user_caps->cpuid.nent)) { ret = -EFAULT; goto out; } - if (caps->cpuid.nent < td_conf->num_cpuid_config) { + if (nr_user_entries < td_conf->num_cpuid_config) { ret = -E2BIG; goto out; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 357b9e3a6cef..93636f77c42d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6188,6 +6188,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, u32 user_tsc_khz; r = -EINVAL; + + if (vcpu->arch.guest_tsc_protected) + goto out; + user_tsc_khz = (u32)arg; if (kvm_caps.has_tsc_control && diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 5fa2cca43653..d6b2a665b499 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -1526,7 +1526,7 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports, sched_poll.nr_ports * sizeof(*ports), &e)) { *r = -EFAULT; - return true; + goto out; } for (i = 0; i < sched_poll.nr_ports; i++) { diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index b2b9b89d6967..c611444480b3 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -960,4 +960,5 @@ void blk_unregister_queue(struct gendisk *disk) elevator_set_none(q); blk_debugfs_remove(disk); + kobject_put(&disk->queue_kobj); } diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c index 4cdff387deff..440cf9fb91aa 100644 --- a/drivers/acpi/riscv/cppc.c +++ b/drivers/acpi/riscv/cppc.c @@ -37,10 +37,8 @@ static int __init sbi_cppc_init(void) { if (sbi_spec_version >= sbi_mk_version(2, 0) && sbi_probe_extension(SBI_EXT_CPPC) > 0) { - pr_info("SBI CPPC extension detected\n"); cppc_ext_present = true; } else { - pr_info("SBI CPPC extension NOT detected!!\n"); cppc_ext_present = false; } diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index f2843f814675..1f3f782a04ba 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1173,6 +1173,8 @@ err_name: err_map: kfree(map); err: + if (bus && bus->free_on_exit) + kfree(bus); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(__regmap_init); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 500840e4a74e..8d994cae3b83 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -308,14 +308,13 @@ end_io: static void lo_rw_aio_do_completion(struct loop_cmd *cmd) { struct request *rq = blk_mq_rq_from_pdu(cmd); - struct loop_device *lo = rq->q->queuedata; if (!atomic_dec_and_test(&cmd->ref)) return; kfree(cmd->bvec); cmd->bvec = NULL; if (req_op(rq) == REQ_OP_WRITE) - file_end_write(lo->lo_backing_file); + kiocb_end_write(&cmd->iocb); if (likely(!blk_should_fake_timeout(rq->q))) blk_mq_complete_request(rq); } @@ -391,7 +390,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, } if (rw == ITER_SOURCE) { - file_start_write(lo->lo_backing_file); + kiocb_start_write(&cmd->iocb); ret = file->f_op->write_iter(&cmd->iocb, &iter); } else ret = file->f_op->read_iter(&cmd->iocb, &iter); diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 7671bd158545..c1c0a4759c7e 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -943,6 +943,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, struct fsl_mc_obj_desc endpoint_desc = {{ 0 }}; struct dprc_endpoint endpoint1 = {{ 0 }}; struct dprc_endpoint endpoint2 = {{ 0 }}; + struct fsl_mc_bus *mc_bus; int state, err; mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); @@ -966,6 +967,8 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, strcpy(endpoint_desc.type, endpoint2.type); endpoint_desc.id = endpoint2.id; endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev); + if (endpoint) + return endpoint; /* * We know that the device has an endpoint because we verified by @@ -973,17 +976,13 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, * yet discovered by the fsl-mc bus, thus the lookup returned NULL. * Force a rescan of the devices in this container and retry the lookup. */ - if (!endpoint) { - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); - - if (mutex_trylock(&mc_bus->scan_mutex)) { - err = dprc_scan_objects(mc_bus_dev, true); - mutex_unlock(&mc_bus->scan_mutex); - } - - if (err < 0) - return ERR_PTR(err); + mc_bus = to_fsl_mc_bus(mc_bus_dev); + if (mutex_trylock(&mc_bus->scan_mutex)) { + err = dprc_scan_objects(mc_bus_dev, true); + mutex_unlock(&mc_bus->scan_mutex); } + if (err < 0) + return ERR_PTR(err); endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev); /* diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index 09549451dd51..2edc13ca184e 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c index 3383a7ce27ff..c83fd14dd7ad 100644 --- a/drivers/comedi/comedi_fops.c +++ b/drivers/comedi/comedi_fops.c @@ -1556,21 +1556,27 @@ static int do_insnlist_ioctl(struct comedi_device *dev, } for (i = 0; i < n_insns; ++i) { + unsigned int n = insns[i].n; + if (insns[i].insn & INSN_MASK_WRITE) { if (copy_from_user(data, insns[i].data, - insns[i].n * sizeof(unsigned int))) { + n * sizeof(unsigned int))) { dev_dbg(dev->class_dev, "copy_from_user failed\n"); ret = -EFAULT; goto error; } + if (n < MIN_SAMPLES) { + memset(&data[n], 0, (MIN_SAMPLES - n) * + sizeof(unsigned int)); + } } ret = parse_insn(dev, insns + i, data, file); if (ret < 0) goto error; if (insns[i].insn & INSN_MASK_READ) { if (copy_to_user(insns[i].data, data, - insns[i].n * sizeof(unsigned int))) { + n * sizeof(unsigned int))) { dev_dbg(dev->class_dev, "copy_to_user failed\n"); ret = -EFAULT; @@ -1589,6 +1595,16 @@ error: return i; } +#define MAX_INSNS MAX_SAMPLES +static int check_insnlist_len(struct comedi_device *dev, unsigned int n_insns) +{ + if (n_insns > MAX_INSNS) { + dev_dbg(dev->class_dev, "insnlist length too large\n"); + return -EINVAL; + } + return 0; +} + /* * COMEDI_INSN ioctl * synchronous instruction @@ -1633,6 +1649,10 @@ static int do_insn_ioctl(struct comedi_device *dev, ret = -EFAULT; goto error; } + if (insn->n < MIN_SAMPLES) { + memset(&data[insn->n], 0, + (MIN_SAMPLES - insn->n) * sizeof(unsigned int)); + } } ret = parse_insn(dev, insn, data, file); if (ret < 0) @@ -2239,6 +2259,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, rc = -EFAULT; break; } + rc = check_insnlist_len(dev, insnlist.n_insns); + if (rc) + break; insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL); if (!insns) { rc = -ENOMEM; @@ -3142,6 +3165,9 @@ static int compat_insnlist(struct file *file, unsigned long arg) if (copy_from_user(&insnlist32, compat_ptr(arg), sizeof(insnlist32))) return -EFAULT; + rc = check_insnlist_len(dev, insnlist32.n_insns); + if (rc) + return rc; insns = kcalloc(insnlist32.n_insns, sizeof(*insns), GFP_KERNEL); if (!insns) return -ENOMEM; diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c index 376130bfba8a..9e4b7c840a8f 100644 --- a/drivers/comedi/drivers.c +++ b/drivers/comedi/drivers.c @@ -339,10 +339,10 @@ int comedi_dio_insn_config(struct comedi_device *dev, unsigned int *data, unsigned int mask) { - unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec); + unsigned int chan = CR_CHAN(insn->chanspec); - if (!mask) - mask = chan_mask; + if (!mask && chan < 32) + mask = 1U << chan; switch (data[0]) { case INSN_CONFIG_DIO_INPUT: @@ -382,7 +382,7 @@ EXPORT_SYMBOL_GPL(comedi_dio_insn_config); unsigned int comedi_dio_update_state(struct comedi_subdevice *s, unsigned int *data) { - unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1) + unsigned int chanmask = (s->n_chan < 32) ? ((1U << s->n_chan) - 1) : 0xffffffff; unsigned int mask = data[0] & chanmask; unsigned int bits = data[1]; @@ -615,6 +615,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, unsigned int _data[2]; int ret; + if (insn->n == 0) + return 0; + memset(_data, 0, sizeof(_data)); memset(&_insn, 0, sizeof(_insn)); _insn.insn = INSN_BITS; @@ -625,8 +628,8 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, if (insn->insn == INSN_WRITE) { if (!(s->subdev_flags & SDF_WRITABLE)) return -EINVAL; - _data[0] = 1 << (chan - base_chan); /* mask */ - _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */ + _data[0] = 1U << (chan - base_chan); /* mask */ + _data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */ } ret = s->insn_bits(dev, s, &_insn, _data); @@ -709,7 +712,7 @@ static int __comedi_device_postconfig(struct comedi_device *dev) if (s->type == COMEDI_SUBD_DO) { if (s->n_chan < 32) - s->io_bits = (1 << s->n_chan) - 1; + s->io_bits = (1U << s->n_chan) - 1; else s->io_bits = 0xffffffff; } diff --git a/drivers/comedi/drivers/aio_iiro_16.c b/drivers/comedi/drivers/aio_iiro_16.c index b00fab0b89d4..739cc4db52ac 100644 --- a/drivers/comedi/drivers/aio_iiro_16.c +++ b/drivers/comedi/drivers/aio_iiro_16.c @@ -177,7 +177,8 @@ static int aio_iiro_16_attach(struct comedi_device *dev, * Digital input change of state interrupts are optionally supported * using IRQ 2-7, 10-12, 14, or 15. */ - if ((1 << it->options[1]) & 0xdcfc) { + if (it->options[1] > 0 && it->options[1] < 16 && + (1 << it->options[1]) & 0xdcfc) { ret = request_irq(it->options[1], aio_iiro_16_cos, 0, dev->board_name, dev); if (ret == 0) diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c index 9747e6d1f6eb..7984950f0f99 100644 --- a/drivers/comedi/drivers/comedi_test.c +++ b/drivers/comedi/drivers/comedi_test.c @@ -792,7 +792,7 @@ static void waveform_detach(struct comedi_device *dev) { struct waveform_private *devpriv = dev->private; - if (devpriv) { + if (devpriv && dev->n_subdevices) { timer_delete_sync(&devpriv->ai_timer); timer_delete_sync(&devpriv->ao_timer); } diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c index b8ea737ad3d1..1b638f5b5a4f 100644 --- a/drivers/comedi/drivers/das16m1.c +++ b/drivers/comedi/drivers/das16m1.c @@ -522,7 +522,8 @@ static int das16m1_attach(struct comedi_device *dev, devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE; /* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */ - if ((1 << it->options[1]) & 0xdcfc) { + if (it->options[1] >= 2 && it->options[1] <= 15 && + (1 << it->options[1]) & 0xdcfc) { ret = request_irq(it->options[1], das16m1_interrupt, 0, dev->board_name, dev); if (ret == 0) diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c index 68f95330de45..7660487e563c 100644 --- a/drivers/comedi/drivers/das6402.c +++ b/drivers/comedi/drivers/das6402.c @@ -567,7 +567,8 @@ static int das6402_attach(struct comedi_device *dev, das6402_reset(dev); /* IRQs 2,3,5,6,7, 10,11,15 are valid for "enhanced" mode */ - if ((1 << it->options[1]) & 0x8cec) { + if (it->options[1] > 0 && it->options[1] < 16 && + (1 << it->options[1]) & 0x8cec) { ret = request_irq(it->options[1], das6402_interrupt, 0, dev->board_name, dev); if (ret == 0) { diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c index 0df639c6a595..abca61a72cf7 100644 --- a/drivers/comedi/drivers/pcl812.c +++ b/drivers/comedi/drivers/pcl812.c @@ -1149,7 +1149,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) if (IS_ERR(dev->pacer)) return PTR_ERR(dev->pacer); - if ((1 << it->options[1]) & board->irq_bits) { + if (it->options[1] > 0 && it->options[1] < 16 && + (1 << it->options[1]) & board->irq_bits) { ret = request_irq(it->options[1], pcl812_interrupt, 0, dev->board_name, dev); if (ret == 0) diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index 4e1ba35deda9..b19bc60cc627 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -45,7 +45,6 @@ struct psci_cpuidle_domain_state { static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); static DEFINE_PER_CPU(struct psci_cpuidle_domain_state, psci_domain_state); static bool psci_cpuidle_use_syscore; -static bool psci_cpuidle_use_cpuhp; void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx, u32 state) @@ -124,8 +123,12 @@ static int psci_idle_cpuhp_up(unsigned int cpu) { struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); - if (pd_dev) - pm_runtime_get_sync(pd_dev); + if (pd_dev) { + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + pm_runtime_get_sync(pd_dev); + else + dev_pm_genpd_resume(pd_dev); + } return 0; } @@ -135,7 +138,11 @@ static int psci_idle_cpuhp_down(unsigned int cpu) struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); if (pd_dev) { - pm_runtime_put_sync(pd_dev); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + pm_runtime_put_sync(pd_dev); + else + dev_pm_genpd_suspend(pd_dev); + /* Clear domain state to start fresh at next online. */ psci_clear_domain_state(); } @@ -196,9 +203,6 @@ static void psci_idle_init_cpuhp(void) { int err; - if (!psci_cpuidle_use_cpuhp) - return; - err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, "cpuidle/psci:online", psci_idle_cpuhp_up, @@ -259,10 +263,8 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv, * s2ram and s2idle. */ drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state; - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) drv->states[state_count - 1].enter = psci_enter_domain_idle_state; - psci_cpuidle_use_cpuhp = true; - } return 0; } @@ -339,7 +341,6 @@ static void psci_cpu_deinit_idle(int cpu) dt_idle_detach_cpu(data->dev); psci_cpuidle_use_syscore = false; - psci_cpuidle_use_cpuhp = false; } static int psci_idle_init_cpu(struct device *dev, int cpu) diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index af37477ffd8d..be21e4e2016c 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -314,30 +314,30 @@ static int chcr_compute_partial_hash(struct shash_desc *desc, if (digest_size == SHA1_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha1_st); + crypto_shash_export_core(desc, &sha1_st); memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE); } else if (digest_size == SHA224_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha256_st); + crypto_shash_export_core(desc, &sha256_st); memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); } else if (digest_size == SHA256_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha256_st); + crypto_shash_export_core(desc, &sha256_st); memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); } else if (digest_size == SHA384_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha512_st); + crypto_shash_export_core(desc, &sha512_st); memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); } else if (digest_size == SHA512_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: - crypto_shash_export(desc, (void *)&sha512_st); + crypto_shash_export_core(desc, &sha512_st); memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); } else { error = -EINVAL; diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c index 3c4bba4a8779..c03a69851114 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c @@ -5,11 +5,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -154,19 +154,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, switch (ctx->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: - if (crypto_shash_export(shash, &ctx->sha1)) + if (crypto_shash_export_core(shash, &ctx->sha1)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(ctx->sha1.state[i]); break; case ICP_QAT_HW_AUTH_ALGO_SHA256: - if (crypto_shash_export(shash, &ctx->sha256)) + if (crypto_shash_export_core(shash, &ctx->sha256)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(ctx->sha256.state[i]); break; case ICP_QAT_HW_AUTH_ALGO_SHA512: - if (crypto_shash_export(shash, &ctx->sha512)) + if (crypto_shash_export_core(shash, &ctx->sha512)) return -EFAULT; for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]); @@ -190,19 +190,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, switch (ctx->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: - if (crypto_shash_export(shash, &ctx->sha1)) + if (crypto_shash_export_core(shash, &ctx->sha1)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(ctx->sha1.state[i]); break; case ICP_QAT_HW_AUTH_ALGO_SHA256: - if (crypto_shash_export(shash, &ctx->sha256)) + if (crypto_shash_export_core(shash, &ctx->sha256)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(ctx->sha256.state[i]); break; case ICP_QAT_HW_AUTH_ALGO_SHA512: - if (crypto_shash_export(shash, &ctx->sha512)) + if (crypto_shash_export_core(shash, &ctx->sha512)) return -EFAULT; for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]); diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c index 49f09998e5c0..3371e0a76d3c 100644 --- a/drivers/dma/dw-edma/dw-edma-pcie.c +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -161,12 +161,16 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; - struct dw_edma_pcie_data vsec_data; + struct dw_edma_pcie_data *vsec_data __free(kfree) = NULL; struct device *dev = &pdev->dev; struct dw_edma_chip *chip; int err, nr_irqs; int i, mask; + vsec_data = kmalloc(sizeof(*vsec_data), GFP_KERNEL); + if (!vsec_data) + return -ENOMEM; + /* Enable PCI device */ err = pcim_enable_device(pdev); if (err) { @@ -174,23 +178,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, return err; } - memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data)); + memcpy(vsec_data, pdata, sizeof(struct dw_edma_pcie_data)); /* * Tries to find if exists a PCIe Vendor-Specific Extended Capability * for the DMA, if one exists, then reconfigures it. */ - dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data); + dw_edma_pcie_get_vsec_dma_data(pdev, vsec_data); /* Mapping PCI BAR regions */ - mask = BIT(vsec_data.rg.bar); - for (i = 0; i < vsec_data.wr_ch_cnt; i++) { - mask |= BIT(vsec_data.ll_wr[i].bar); - mask |= BIT(vsec_data.dt_wr[i].bar); + mask = BIT(vsec_data->rg.bar); + for (i = 0; i < vsec_data->wr_ch_cnt; i++) { + mask |= BIT(vsec_data->ll_wr[i].bar); + mask |= BIT(vsec_data->dt_wr[i].bar); } - for (i = 0; i < vsec_data.rd_ch_cnt; i++) { - mask |= BIT(vsec_data.ll_rd[i].bar); - mask |= BIT(vsec_data.dt_rd[i].bar); + for (i = 0; i < vsec_data->rd_ch_cnt; i++) { + mask |= BIT(vsec_data->ll_rd[i].bar); + mask |= BIT(vsec_data->dt_rd[i].bar); } err = pcim_iomap_regions(pdev, mask, pci_name(pdev)); if (err) { @@ -213,7 +217,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, return -ENOMEM; /* IRQs allocation */ - nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs, + nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data->irqs, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (nr_irqs < 1) { pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n", @@ -224,22 +228,22 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, /* Data structure initialization */ chip->dev = dev; - chip->mf = vsec_data.mf; + chip->mf = vsec_data->mf; chip->nr_irqs = nr_irqs; chip->ops = &dw_edma_pcie_plat_ops; - chip->ll_wr_cnt = vsec_data.wr_ch_cnt; - chip->ll_rd_cnt = vsec_data.rd_ch_cnt; + chip->ll_wr_cnt = vsec_data->wr_ch_cnt; + chip->ll_rd_cnt = vsec_data->rd_ch_cnt; - chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar]; + chip->reg_base = pcim_iomap_table(pdev)[vsec_data->rg.bar]; if (!chip->reg_base) return -ENOMEM; for (i = 0; i < chip->ll_wr_cnt; i++) { struct dw_edma_region *ll_region = &chip->ll_region_wr[i]; struct dw_edma_region *dt_region = &chip->dt_region_wr[i]; - struct dw_edma_block *ll_block = &vsec_data.ll_wr[i]; - struct dw_edma_block *dt_block = &vsec_data.dt_wr[i]; + struct dw_edma_block *ll_block = &vsec_data->ll_wr[i]; + struct dw_edma_block *dt_block = &vsec_data->dt_wr[i]; ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar]; if (!ll_region->vaddr.io) @@ -263,8 +267,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, for (i = 0; i < chip->ll_rd_cnt; i++) { struct dw_edma_region *ll_region = &chip->ll_region_rd[i]; struct dw_edma_region *dt_region = &chip->dt_region_rd[i]; - struct dw_edma_block *ll_block = &vsec_data.ll_rd[i]; - struct dw_edma_block *dt_block = &vsec_data.dt_rd[i]; + struct dw_edma_block *ll_block = &vsec_data->ll_rd[i]; + struct dw_edma_block *dt_block = &vsec_data->dt_rd[i]; ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar]; if (!ll_region->vaddr.io) @@ -298,31 +302,31 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf); pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n", - vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz, + vsec_data->rg.bar, vsec_data->rg.off, vsec_data->rg.sz, chip->reg_base); for (i = 0; i < chip->ll_wr_cnt; i++) { pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", - i, vsec_data.ll_wr[i].bar, - vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz, + i, vsec_data->ll_wr[i].bar, + vsec_data->ll_wr[i].off, chip->ll_region_wr[i].sz, chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr); pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", - i, vsec_data.dt_wr[i].bar, - vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz, + i, vsec_data->dt_wr[i].bar, + vsec_data->dt_wr[i].off, chip->dt_region_wr[i].sz, chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr); } for (i = 0; i < chip->ll_rd_cnt; i++) { pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", - i, vsec_data.ll_rd[i].bar, - vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz, + i, vsec_data->ll_rd[i].bar, + vsec_data->ll_rd[i].off, chip->ll_region_rd[i].sz, chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr); pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", - i, vsec_data.dt_rd[i].bar, - vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz, + i, vsec_data->dt_rd[i].bar, + vsec_data->dt_rd[i].off, chip->dt_region_rd[i].sz, chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr); } diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index 47c8adfdc155..9f0c41ca7770 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -449,9 +449,9 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c, return ret; spin_lock_irqsave(&cvc->pc->lock, flags); - spin_lock_irqsave(&cvc->vc.lock, flags); + spin_lock(&cvc->vc.lock); vd = mtk_cqdma_find_active_desc(c, cookie); - spin_unlock_irqrestore(&cvc->vc.lock, flags); + spin_unlock(&cvc->vc.lock); spin_unlock_irqrestore(&cvc->pc->lock, flags); if (vd) { diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 0d6324c4e2be..7a2488a0d6a3 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1351,7 +1351,7 @@ static int nbpf_probe(struct platform_device *pdev) if (irqs == 1) { eirq = irqbuf[0]; - for (i = 0; i <= num_channels; i++) + for (i = 0; i < num_channels; i++) nbpf->chan[i].irq = irqbuf[0]; } else { eirq = platform_get_irq_byname(pdev, "error"); @@ -1361,16 +1361,15 @@ static int nbpf_probe(struct platform_device *pdev) if (irqs == num_channels + 1) { struct nbpf_channel *chan; - for (i = 0, chan = nbpf->chan; i <= num_channels; + for (i = 0, chan = nbpf->chan; i < num_channels; i++, chan++) { /* Skip the error IRQ */ if (irqbuf[i] == eirq) i++; + if (i >= ARRAY_SIZE(irqbuf)) + return -EINVAL; chan->irq = irqbuf[i]; } - - if (chan != nbpf->chan + num_channels) - return -EINVAL; } else { /* 2 IRQs and more than one channel */ if (irqbuf[0] == eirq) @@ -1378,7 +1377,7 @@ static int nbpf_probe(struct platform_device *pdev) else irq = irqbuf[0]; - for (i = 0; i <= num_channels; i++) + for (i = 0; i < num_channels; i++) nbpf->chan[i].irq = irq; } } diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c index 219667315b2c..c13545dce349 100644 --- a/drivers/gpio/gpiolib-acpi-quirks.c +++ b/drivers/gpio/gpiolib-acpi-quirks.c @@ -331,6 +331,19 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = { .ignore_interrupt = "AMDI0030:00@11", }, }, + { + /* + * Wakeup only works when keyboard backlight is turned off + * https://gitlab.freedesktop.org/drm/amd/-/issues/4169 + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 15"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_interrupt = "AMDI0030:00@8", + }, + }, {} /* Terminating entry */ }; diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c index 4d5f83b17624..72422c5db364 100644 --- a/drivers/gpio/gpiolib-devres.c +++ b/drivers/gpio/gpiolib-devres.c @@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_unhinge); */ void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs) { - devm_remove_action(dev, devm_gpiod_release_array, descs); + devm_release_action(dev, devm_gpiod_release_array, descs); } EXPORT_SYMBOL_GPL(devm_gpiod_put_array); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 78f8755996f0..aa32df7e2fb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5193,6 +5193,8 @@ exit: dev->dev->power.disable_depth--; #endif } + + amdgpu_vram_mgr_clear_reset_blocks(adev); adev->in_suspend = false; if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 426834806fbf..6ac0ce361a2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -427,6 +427,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, { unsigned long flags; ktime_t deadline; + bool ret; if (unlikely(ring->adev->debug_disable_soft_recovery)) return false; @@ -441,12 +442,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, dma_fence_set_error(fence, -ENODATA); spin_unlock_irqrestore(fence->lock, flags); - atomic_inc(&ring->adev->gpu_reset_counter); while (!dma_fence_is_signaled(fence) && ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) ring->funcs->soft_recovery(ring, vmid); - return dma_fence_is_signaled(fence); + ret = dma_fence_is_signaled(fence); + /* increment the counter only if soft reset worked */ + if (ret) + atomic_inc(&ring->adev->gpu_reset_counter); + + return ret; } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 208b7d1d8a27..450e4bf093b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -154,6 +154,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, uint64_t start, uint64_t size); int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev); bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, struct ttm_resource *res); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index abdc52b0895a..07c936e90d8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -782,6 +782,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr) return atomic64_read(&mgr->vis_usage); } +/** + * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks + * + * @adev: amdgpu device pointer + * + * Reset the cleared drm buddy blocks. + */ +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev) +{ + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; + struct drm_buddy *mm = &mgr->mm; + + mutex_lock(&mgr->lock); + drm_buddy_reset_clear(mm, false); + mutex_unlock(&mgr->lock); +} + /** * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection * diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5ee2237d8ee8..bc983ecf3d99 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4640,6 +4640,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); } return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 87058271b00c..2551823382f8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -728,7 +728,16 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, * support programmable degamma anywhere. */ is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; - drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0, + /* Dont't enable DRM CRTC degamma property for DCN401 since the + * pre-blending degamma LUT doesn't apply to cursor, and therefore + * can't work similar to a post-blending degamma LUT as in other hw + * versions. + * TODO: revisit it once KMS plane color API is merged. + */ + drm_crtc_enable_color_mgmt(&acrtc->base, + (is_dcn && + dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ? + MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c index a3b8e3d4a429..4b17d2fcd565 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c @@ -1565,7 +1565,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct( clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); if (!clk_mgr->base.bw_params) { BREAK_TO_DEBUGGER(); - kfree(clk_mgr); + kfree(clk_mgr401); return NULL; } @@ -1576,6 +1576,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct( if (!clk_mgr->wm_range_table) { BREAK_TO_DEBUGGER(); kfree(clk_mgr->base.bw_params); + kfree(clk_mgr401); return NULL; } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index de9c23537465..834b42a4d31f 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1373,7 +1373,7 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev, regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, 0); mutex_unlock(&pdata->comms_mutex); - }; + } drm_bridge_add(&pdata->bridge); diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index dc622c78db9d..ea78c6c8ca7a 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -725,7 +725,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, * monitor doesn't power down exactly after the throw away read. */ if (!aux->is_remote) { - ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS); + ret = drm_dp_dpcd_probe(aux, DP_TRAINING_PATTERN_SET); if (ret < 0) return ret; } diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index 241c855f891f..66aff35f8647 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -404,6 +404,49 @@ drm_get_buddy(struct drm_buddy_block *block) } EXPORT_SYMBOL(drm_get_buddy); +/** + * drm_buddy_reset_clear - reset blocks clear state + * + * @mm: DRM buddy manager + * @is_clear: blocks clear state + * + * Reset the clear state based on @is_clear value for each block + * in the freelist. + */ +void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) +{ + u64 root_size, size, start; + unsigned int order; + int i; + + size = mm->size; + for (i = 0; i < mm->n_roots; ++i) { + order = ilog2(size) - ilog2(mm->chunk_size); + start = drm_buddy_block_offset(mm->roots[i]); + __force_merge(mm, start, start + size, order); + + root_size = mm->chunk_size << order; + size -= root_size; + } + + for (i = 0; i <= mm->max_order; ++i) { + struct drm_buddy_block *block; + + list_for_each_entry_reverse(block, &mm->free_list[i], link) { + if (is_clear != drm_buddy_block_is_clear(block)) { + if (is_clear) { + mark_cleared(block); + mm->clear_avail += drm_buddy_block_size(mm, block); + } else { + clear_reset(block); + mm->clear_avail -= drm_buddy_block_size(mm, block); + } + } + } + } +} +EXPORT_SYMBOL(drm_buddy_reset_clear); + /** * drm_buddy_free_block - free a block * diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index b7f033d4352a..4f0320df858f 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj) if (drm_gem_is_imported(gem_obj)) { if (dma_obj->vaddr) - dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map); + dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map); drm_prime_gem_destroy(gem_obj, dma_obj->sgt); } else if (dma_obj->vaddr) { if (dma_obj->map_noncoherent) diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 6f72e7a0f427..6ff22e04029e 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -419,6 +419,7 @@ EXPORT_SYMBOL(drm_gem_fb_vunmap); static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir, unsigned int num_planes) { + struct dma_buf_attachment *import_attach; struct drm_gem_object *obj; int ret; @@ -427,9 +428,10 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat obj = drm_gem_fb_get_obj(fb, num_planes); if (!obj) continue; + import_attach = obj->import_attach; if (!drm_gem_is_imported(obj)) continue; - ret = dma_buf_end_cpu_access(obj->dma_buf, dir); + ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir); if (ret) drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n", ret, num_planes, dir); @@ -452,6 +454,7 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat */ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir) { + struct dma_buf_attachment *import_attach; struct drm_gem_object *obj; unsigned int i; int ret; @@ -462,9 +465,10 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct ret = -EINVAL; goto err___drm_gem_fb_end_cpu_access; } + import_attach = obj->import_attach; if (!drm_gem_is_imported(obj)) continue; - ret = dma_buf_begin_cpu_access(obj->dma_buf, dir); + ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir); if (ret) goto err___drm_gem_fb_end_cpu_access; } diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index aa43265f4f4f..a5dbee6974ab 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -349,7 +349,7 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, int ret = 0; if (drm_gem_is_imported(obj)) { - ret = dma_buf_vmap(obj->dma_buf, map); + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); } else { pgprot_t prot = PAGE_KERNEL; @@ -409,7 +409,7 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, struct drm_gem_object *obj = &shmem->base; if (drm_gem_is_imported(obj)) { - dma_buf_vunmap(obj->dma_buf, map); + dma_buf_vunmap(obj->import_attach->dmabuf, map); } else { dma_resv_assert_held(shmem->base.resv); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index d828502268b8..a0a5d725eab0 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -453,7 +453,13 @@ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev, } mutex_lock(&dev->object_name_lock); - /* re-export the original imported/exported object */ + /* re-export the original imported object */ + if (obj->import_attach) { + dmabuf = obj->import_attach->dmabuf; + get_dma_buf(dmabuf); + goto out_have_obj; + } + if (obj->dma_buf) { get_dma_buf(obj->dma_buf); dmabuf = obj->dma_buf; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 917ad527c961..40a50c60dfff 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr); if (etnaviv_obj->vaddr) - dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map); + dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: @@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) lockdep_assert_held(&etnaviv_obj->lock); - ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map); + ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map); if (ret) return NULL; return map.vaddr; diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index 8f6fba4217ec..bc7527542fdc 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -719,6 +719,39 @@ int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, return 0; } +void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state); + int i; + + /* no need to wait for disabling the plane by CPU */ + if (!mtk_crtc->cmdq_client.chan) + return; + + if (!mtk_crtc->enabled) + return; + + /* set pending plane state to disabled */ + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *mtk_plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state); + + if (mtk_plane->index == plane->index) { + memcpy(mtk_plane_state, plane_state, sizeof(*plane_state)); + break; + } + } + mtk_crtc_update_config(mtk_crtc, false); + + /* wait for planes to be disabled by CMDQ */ + wait_event_timeout(mtk_crtc->cb_blocking_queue, + mtk_crtc->cmdq_vblank_cnt == 0, + msecs_to_jiffies(500)); +#endif +} + void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, struct drm_atomic_state *state) { @@ -930,7 +963,8 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev, mtk_ddp_comp_supported_rotations(comp), mtk_ddp_comp_get_blend_modes(comp), mtk_ddp_comp_get_formats(comp), - mtk_ddp_comp_get_num_formats(comp), i); + mtk_ddp_comp_get_num_formats(comp), + mtk_ddp_comp_is_afbc_supported(comp), i); if (ret) return ret; diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h index 388e900b6f4d..828f109b83e7 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_crtc.h @@ -21,6 +21,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path, unsigned int num_conn_routes); int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_plane_state *state); +void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane); void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, struct drm_atomic_state *plane_state); struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c index edc6417639e6..ac6620e10262 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -366,6 +366,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = { .get_blend_modes = mtk_ovl_get_blend_modes, .get_formats = mtk_ovl_get_formats, .get_num_formats = mtk_ovl_get_num_formats, + .is_afbc_supported = mtk_ovl_is_afbc_supported, }; static const struct mtk_ddp_comp_funcs ddp_postmask = { diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h index 39720b27f4e9..7289b3dcf22f 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -83,6 +83,7 @@ struct mtk_ddp_comp_funcs { u32 (*get_blend_modes)(struct device *dev); const u32 *(*get_formats)(struct device *dev); size_t (*get_num_formats)(struct device *dev); + bool (*is_afbc_supported)(struct device *dev); void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next); void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next); void (*add)(struct device *dev, struct mtk_mutex *mutex); @@ -294,6 +295,14 @@ size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp) return 0; } +static inline bool mtk_ddp_comp_is_afbc_supported(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->is_afbc_supported) + return comp->funcs->is_afbc_supported(comp->dev); + + return false; +} + static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex) { if (comp->funcs && comp->funcs->add) { diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 04217a36939c..679d413bf10b 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -106,6 +106,7 @@ void mtk_ovl_disable_vblank(struct device *dev); u32 mtk_ovl_get_blend_modes(struct device *dev); const u32 *mtk_ovl_get_formats(struct device *dev); size_t mtk_ovl_get_num_formats(struct device *dev); +bool mtk_ovl_is_afbc_supported(struct device *dev); void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex); void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index d0581c4e3c99..e0236353d499 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -236,6 +236,13 @@ size_t mtk_ovl_get_num_formats(struct device *dev) return ovl->data->num_formats; } +bool mtk_ovl_is_afbc_supported(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->supports_afbc; +} + int mtk_ovl_clk_enable(struct device *dev) { struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 6fb85bc6487a..a2fdceadf209 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -1095,7 +1095,6 @@ static const u32 mt8183_output_fmts[] = { }; static const u32 mt8195_dpi_output_fmts[] = { - MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_RGB888_2X12_LE, MEDIA_BUS_FMT_RGB888_2X12_BE, @@ -1103,18 +1102,19 @@ static const u32 mt8195_dpi_output_fmts[] = { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV10_1X20, MEDIA_BUS_FMT_YUYV12_1X24, + MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_YUV8_1X24, MEDIA_BUS_FMT_YUV10_1X30, }; static const u32 mt8195_dp_intf_output_fmts[] = { - MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_RGB888_2X12_LE, MEDIA_BUS_FMT_RGB888_2X12_BE, MEDIA_BUS_FMT_RGB101010_1X30, MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV10_1X20, + MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_YUV8_1X24, MEDIA_BUS_FMT_YUV10_1X30, }; diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c index 655106bbb76d..cbc4f37da8ba 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -285,9 +285,14 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + mtk_plane_state->pending.enable = false; wmb(); /* Make sure the above parameter is set before update */ mtk_plane_state->pending.dirty = true; + + mtk_crtc_plane_disable(old_state->crtc, plane); } static void mtk_plane_atomic_update(struct drm_plane *plane, @@ -321,7 +326,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, unsigned int supported_rotations, const u32 blend_modes, - const u32 *formats, size_t num_formats, unsigned int plane_idx) + const u32 *formats, size_t num_formats, + bool supports_afbc, unsigned int plane_idx) { int err; @@ -332,7 +338,9 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, err = drm_universal_plane_init(dev, plane, possible_crtcs, &mtk_plane_funcs, formats, - num_formats, modifiers, type, NULL); + num_formats, + supports_afbc ? modifiers : NULL, + type, NULL); if (err) { DRM_ERROR("failed to initialize plane\n"); return err; diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h index 3b13b89989c7..95c5fa5295d8 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_plane.h @@ -49,5 +49,6 @@ to_mtk_plane_state(struct drm_plane_state *state) int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, unsigned int supported_rotations, const u32 blend_modes, - const u32 *formats, size_t num_formats, unsigned int plane_idx); + const u32 *formats, size_t num_formats, + bool supports_afbc, unsigned int plane_idx); #endif diff --git a/drivers/gpu/drm/nouveau/nvif/chan.c b/drivers/gpu/drm/nouveau/nvif/chan.c index baa10227d51a..80c01017d642 100644 --- a/drivers/gpu/drm/nouveau/nvif/chan.c +++ b/drivers/gpu/drm/nouveau/nvif/chan.c @@ -39,6 +39,9 @@ nvif_chan_gpfifo_post(struct nvif_chan *chan) const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size; const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max; + if (!chan->func->gpfifo.post) + return 0; + return chan->func->gpfifo.post(chan, gpptr, pbptr); } diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 5657106c2f7d..15e2d505550f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -841,7 +841,6 @@ int panfrost_job_init(struct panfrost_device *pfdev) .num_rqs = DRM_SCHED_PRIORITY_COUNT, .credit_limit = 2, .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), - .timeout_wq = pfdev->reset.wq, .name = "pan_js", .dev = pfdev->dev, }; @@ -879,6 +878,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0); if (!pfdev->reset.wq) return -ENOMEM; + args.timeout_wq = pfdev->reset.wq; for (j = 0; j < NUM_JOB_SLOTS; j++) { js->queue[j].fence_context = dma_fence_context_alloc(1); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index bbd39348a7ab..7a3e510327b7 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -26,7 +26,6 @@ * Jerome Glisse */ -#include #include #include #include @@ -1635,11 +1634,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, pci_set_power_state(pdev, PCI_D3hot); } - if (notify_clients) { - console_lock(); - drm_client_dev_suspend(dev, true); - console_unlock(); - } + if (notify_clients) + drm_client_dev_suspend(dev, false); + return 0; } @@ -1661,17 +1658,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (notify_clients) { - console_lock(); - } if (resume) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - if (pci_enable_device(pdev)) { - if (notify_clients) - console_unlock(); + if (pci_enable_device(pdev)) return -1; - } } /* resume AGP if in use */ radeon_agp_resume(rdev); @@ -1747,10 +1738,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) radeon_pm_compute_clocks(rdev); - if (notify_clients) { - drm_client_dev_resume(dev, true); - console_unlock(); - } + if (notify_clients) + drm_client_dev_resume(dev, false); return 0; } diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index e671aa241720..ac678de7fe5e 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -355,17 +355,6 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity) } EXPORT_SYMBOL(drm_sched_entity_destroy); -/* drm_sched_entity_clear_dep - callback to clear the entities dependency */ -static void drm_sched_entity_clear_dep(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct drm_sched_entity *entity = - container_of(cb, struct drm_sched_entity, cb); - - entity->dependency = NULL; - dma_fence_put(f); -} - /* * drm_sched_entity_wakeup - callback to clear the entity's dependency and * wake up the scheduler @@ -376,7 +365,8 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct drm_sched_entity *entity = container_of(cb, struct drm_sched_entity, cb); - drm_sched_entity_clear_dep(f, cb); + entity->dependency = NULL; + dma_fence_put(f); drm_sched_wakeup(entity->rq->sched); } @@ -429,13 +419,6 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(entity->dependency); entity->dependency = fence; - if (!dma_fence_add_callback(fence, &entity->cb, - drm_sched_entity_clear_dep)) - return true; - - /* Ignore it when it is already scheduled */ - dma_fence_put(fence); - return false; } if (!dma_fence_add_callback(entity->dependency, &entity->cb, diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 1118a0250279..ce49282198cb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -204,15 +204,16 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj) { struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct dma_buf_attachment *attach = obj->import_attach; if (drm_gem_is_imported(obj)) { - struct dma_buf *dmabuf = obj->dma_buf; + struct dma_buf *dmabuf = attach->dmabuf; dma_resv_lock(dmabuf->resv, NULL); virtgpu_dma_buf_unmap(bo); dma_resv_unlock(dmabuf->resv); - dma_buf_detach(dmabuf, obj->import_attach); + dma_buf_detach(dmabuf, attach); dma_buf_put(dmabuf); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index c55382167c1b..e417921af584 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -85,10 +85,10 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) int ret; if (drm_gem_is_imported(obj)) { - ret = dma_buf_vmap(obj->dma_buf, map); + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); if (!ret) { if (drm_WARN_ON(obj->dev, map->is_iomem)) { - dma_buf_vunmap(obj->dma_buf, map); + dma_buf_vunmap(obj->import_attach->dmabuf, map); return -EIO; } } @@ -102,7 +102,7 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { if (drm_gem_is_imported(obj)) - dma_buf_vunmap(obj->dma_buf, map); + dma_buf_vunmap(obj->import_attach->dmabuf, map); else drm_gem_ttm_vunmap(obj, map); } diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 6c4cb9576fb6..e3517ce2e18c 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -417,6 +417,8 @@ int xe_gt_init_early(struct xe_gt *gt) if (err) return err; + xe_mocs_init_early(gt); + return 0; } @@ -630,12 +632,6 @@ int xe_gt_init(struct xe_gt *gt) if (err) return err; - err = xe_gt_pagefault_init(gt); - if (err) - return err; - - xe_mocs_init_early(gt); - err = xe_gt_sysfs_init(gt); if (err) return err; @@ -644,6 +640,10 @@ int xe_gt_init(struct xe_gt *gt) if (err) return err; + err = xe_gt_pagefault_init(gt); + if (err) + return err; + err = xe_gt_idle_init(>->gtidle); if (err) return err; @@ -839,6 +839,9 @@ static int gt_reset(struct xe_gt *gt) goto err_out; } + if (IS_SRIOV_PF(gt_to_xe(gt))) + xe_gt_sriov_pf_stop_prepare(gt); + xe_uc_gucrc_disable(>->uc); xe_uc_stop_prepare(>->uc); xe_gt_pagefault_reset(gt); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c index c08efca6420e..35489fa81825 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c @@ -172,6 +172,25 @@ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid) pf_clear_vf_scratch_regs(gt, vfid); } +static void pf_cancel_restart(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + + if (cancel_work_sync(>->sriov.pf.workers.restart)) + xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n"); +} + +/** + * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support. + * @gt: the &xe_gt + * + * This function can only be called on the PF. + */ +void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt) +{ + pf_cancel_restart(gt); +} + static void pf_restart(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h index f474509411c0..e2b2ff8132dc 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h @@ -13,6 +13,7 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt); int xe_gt_sriov_pf_init(struct xe_gt *gt); void xe_gt_sriov_pf_init_hw(struct xe_gt *gt); void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid); +void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt); void xe_gt_sriov_pf_restart(struct xe_gt *gt); #else static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt) @@ -29,6 +30,10 @@ static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt) { } +static inline void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt) +{ +} + static inline void xe_gt_sriov_pf_restart(struct xe_gt *gt) { } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 2420a548cacc..53a44702c04a 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -2364,6 +2364,21 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, return err; } +static int pf_push_self_config(struct xe_gt *gt) +{ + int err; + + err = pf_push_full_vf_config(gt, PFID); + if (err) { + xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n", + ERR_PTR(err)); + return err; + } + + xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n"); + return 0; +} + static void fini_config(void *arg) { struct xe_gt *gt = arg; @@ -2387,9 +2402,17 @@ static void fini_config(void *arg) int xe_gt_sriov_pf_config_init(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + int err; xe_gt_assert(gt, IS_SRIOV_PF(xe)); + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + err = pf_push_self_config(gt); + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + + if (err) + return err; + return devm_add_action_or_reset(xe->drm.dev, fini_config, gt); } @@ -2407,6 +2430,10 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt) unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); unsigned int fail = 0, skip = 0; + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + pf_push_self_config(gt); + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + for (n = 1; n <= total_vfs; n++) { if (xe_gt_sriov_pf_config_is_empty(gt, n)) skip++; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 66bc02302c55..07a5161c7d5b 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -1817,8 +1817,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, xe_bo_assert_held(bo); /* Use bounce buffer for small access and unaligned access */ - if (len & XE_CACHELINE_MASK || - ((uintptr_t)buf | offset) & XE_CACHELINE_MASK) { + if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) || + !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) { int buf_offset = 0; /* @@ -1848,7 +1848,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, err = xe_migrate_access_memory(m, bo, offset & ~XE_CACHELINE_MASK, (void *)ptr, - sizeof(bounce), 0); + sizeof(bounce), write); if (err) return err; } else { diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index bc1689db4cd7..7b50c7c1ee21 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -110,13 +110,14 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i) return i; } -static int emit_flush_invalidate(u32 *dw, int i) +static int emit_flush_invalidate(u32 addr, u32 val, u32 *dw, int i) { dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | - MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX; - dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR; - dw[i++] = 0; + MI_FLUSH_IMM_DW; + + dw[i++] = addr | MI_FLUSH_DW_USE_GTT; dw[i++] = 0; + dw[i++] = val; return i; } @@ -397,23 +398,20 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job, static void emit_migration_job_gen12(struct xe_sched_job *job, struct xe_lrc *lrc, u32 seqno) { + u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc); u32 dw[MAX_JOB_SIZE_DW], i = 0; i = emit_copy_timestamp(lrc, dw, i); - i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), - seqno, dw, i); + i = emit_store_imm_ggtt(saddr, seqno, dw, i); dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */ i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i); - if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) { - /* XXX: Do we need this? Leaving for now. */ - dw[i++] = preparser_disable(true); - i = emit_flush_invalidate(dw, i); - dw[i++] = preparser_disable(false); - } + dw[i++] = preparser_disable(true); + i = emit_flush_invalidate(saddr, seqno, dw, i); + dw[i++] = preparser_disable(false); i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i); diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig index 1cd188b73b74..57623ca7f350 100644 --- a/drivers/hv/Kconfig +++ b/drivers/hv/Kconfig @@ -9,7 +9,7 @@ config HYPERV select PARAVIRT select X86_HV_CALLBACK_VECTOR if X86 select OF_EARLY_FLATTREE if OF - select SYSFB if !HYPERV_VTL_MODE + select SYSFB if EFI && !HYPERV_VTL_MODE help Select this option to run Linux as a Hyper-V client operating system. diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 35f26fa1ffe7..7c7c66e0dc3f 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 6e084c207414..65dd299e2944 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index be490c598785..1fe3573ae52a 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -519,7 +519,10 @@ void vmbus_set_event(struct vmbus_channel *channel) else WARN_ON_ONCE(1); } else { - hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event); + u64 control = HVCALL_SIGNAL_EVENT; + + control |= hv_nested ? HV_HYPERCALL_NESTED : 0; + hv_do_fast_hypercall8(control, channel->sig_event); } } EXPORT_SYMBOL_GPL(vmbus_set_event); diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 308c8f279df8..b14c5f9e0ef2 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -85,8 +85,10 @@ int hv_post_message(union hv_connection_id connection_id, else status = HV_STATUS_INVALID_PARAMETER; } else { - status = hv_do_hypercall(HVCALL_POST_MESSAGE, - aligned_msg, NULL); + u64 control = HVCALL_POST_MESSAGE; + + control |= hv_nested ? HV_HYPERCALL_NESTED : 0; + status = hv_do_hypercall(control, aligned_msg, NULL); } local_irq_restore(flags); diff --git a/drivers/hv/hv_proc.c b/drivers/hv/hv_proc.c index 7d7ecb6f6137..fbb4eb3901bb 100644 --- a/drivers/hv/hv_proc.c +++ b/drivers/hv/hv_proc.c @@ -6,6 +6,7 @@ #include #include #include +#include #include /* diff --git a/drivers/hv/mshv_common.c b/drivers/hv/mshv_common.c index 2575e6d7a71f..6f227a8a5af7 100644 --- a/drivers/hv/mshv_common.c +++ b/drivers/hv/mshv_common.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "mshv.h" diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c index a222a16107f6..c9c274f29c3c 100644 --- a/drivers/hv/mshv_root_hv_call.c +++ b/drivers/hv/mshv_root_hv_call.c @@ -9,6 +9,7 @@ #include #include +#include #include #include "mshv_root.h" diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 3c9b02471760..23ce1fb70de1 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "hyperv_vmbus.h" diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 33b524b4eb5e..2ed5a1e89d69 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2509,7 +2509,7 @@ static int vmbus_acpi_add(struct platform_device *pdev) return 0; } #endif - +#ifndef HYPERVISOR_CALLBACK_VECTOR static int vmbus_set_irq(struct platform_device *pdev) { struct irq_data *data; @@ -2534,6 +2534,7 @@ static int vmbus_set_irq(struct platform_device *pdev) return 0; } +#endif static int vmbus_device_add(struct platform_device *pdev) { @@ -2549,11 +2550,11 @@ static int vmbus_device_add(struct platform_device *pdev) if (ret) return ret; - if (!__is_defined(HYPERVISOR_CALLBACK_VECTOR)) - ret = vmbus_set_irq(pdev); +#ifndef HYPERVISOR_CALLBACK_VECTOR + ret = vmbus_set_irq(pdev); if (ret) return ret; - +#endif for_each_of_range(&parser, &range) { struct resource *res; diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c index e1a7f7aa7f80..b7b911f8359c 100644 --- a/drivers/hwmon/corsair-cpro.c +++ b/drivers/hwmon/corsair-cpro.c @@ -89,6 +89,7 @@ struct ccp_device { struct mutex mutex; /* whenever buffer is used, lock before send_usb_cmd */ u8 *cmd_buffer; u8 *buffer; + int buffer_recv_size; /* number of received bytes in buffer */ int target[6]; DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS); DECLARE_BITMAP(fan_cnct, NUM_FANS); @@ -146,6 +147,9 @@ static int send_usb_cmd(struct ccp_device *ccp, u8 command, u8 byte1, u8 byte2, if (!t) return -ETIMEDOUT; + if (ccp->buffer_recv_size != IN_BUFFER_SIZE) + return -EPROTO; + return ccp_get_errno(ccp); } @@ -157,6 +161,7 @@ static int ccp_raw_event(struct hid_device *hdev, struct hid_report *report, u8 spin_lock(&ccp->wait_input_report_lock); if (!completion_done(&ccp->wait_input_report)) { memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size)); + ccp->buffer_recv_size = size; complete_all(&ccp->wait_input_report); } spin_unlock(&ccp->wait_input_report_lock); diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c index a4a41742786b..9a5fd16a4ec2 100644 --- a/drivers/hwmon/ina238.c +++ b/drivers/hwmon/ina238.c @@ -97,7 +97,7 @@ * Power (mW) = 0.2 * register value * 20000 / rshunt / 4 * gain * (Specific for SQ52206) * Power (mW) = 0.24 * register value * 20000 / rshunt / 4 * gain - * Energy (mJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain + * Energy (uJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain * 1000 */ #define INA238_CALIBRATION_VALUE 16384 #define INA238_FIXED_SHUNT 20000 @@ -500,9 +500,9 @@ static ssize_t energy1_input_show(struct device *dev, if (ret) return ret; - /* result in mJ */ - energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 * - data->config->power_calculate_factor, 4 * 100 * data->rshunt); + /* result in uJ */ + energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 * 10 * + data->config->power_calculate_factor, 4 * data->rshunt); return sysfs_emit(buf, "%llu\n", energy); } diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index 2bc8cccb01fd..52d4000902d5 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -226,15 +226,15 @@ static int ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset, } if (value) { - if (ret & UCD9000_GPIO_CONFIG_STATUS) + if (ret & UCD9000_GPIO_CONFIG_OUT_VALUE) return 0; - ret |= UCD9000_GPIO_CONFIG_STATUS; + ret |= UCD9000_GPIO_CONFIG_OUT_VALUE; } else { - if (!(ret & UCD9000_GPIO_CONFIG_STATUS)) + if (!(ret & UCD9000_GPIO_CONFIG_OUT_VALUE)) return 0; - ret &= ~UCD9000_GPIO_CONFIG_STATUS; + ret &= ~UCD9000_GPIO_CONFIG_OUT_VALUE; } ret |= UCD9000_GPIO_CONFIG_ENABLE; diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 8b01df3cc8e9..5fcc9f6c33e5 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1472,7 +1472,9 @@ omap_i2c_probe(struct platform_device *pdev) } /* reset ASAP, clearing any IRQs */ - omap_i2c_init(omap); + r = omap_i2c_init(omap); + if (r) + goto err_mux_state_deselect; if (omap->rev < OMAP_I2C_OMAP1_REV_2) r = devm_request_irq(&pdev->dev, omap->irq, omap_i2c_omap1_isr, @@ -1515,12 +1517,13 @@ omap_i2c_probe(struct platform_device *pdev) err_unuse_clocks: omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); +err_mux_state_deselect: if (omap->mux_state) mux_state_deselect(omap->mux_state); err_put_pm: - pm_runtime_dont_use_autosuspend(omap->dev); pm_runtime_put_sync(omap->dev); err_disable_pm: + pm_runtime_dont_use_autosuspend(omap->dev); pm_runtime_disable(&pdev->dev); return r; diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c index 157c64e27d0b..f84ec056e36d 100644 --- a/drivers/i2c/busses/i2c-stm32.c +++ b/drivers/i2c/busses/i2c-stm32.c @@ -102,7 +102,6 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma, void *dma_async_param) { struct dma_async_tx_descriptor *txdesc; - struct device *chan_dev; int ret; if (rd_wr) { @@ -116,11 +115,10 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma, } dma->dma_len = len; - chan_dev = dma->chan_using->device->dev; - dma->dma_buf = dma_map_single(chan_dev, buf, dma->dma_len, + dma->dma_buf = dma_map_single(dev, buf, dma->dma_len, dma->dma_data_dir); - if (dma_mapping_error(chan_dev, dma->dma_buf)) { + if (dma_mapping_error(dev, dma->dma_buf)) { dev_err(dev, "DMA mapping failed\n"); return -EINVAL; } @@ -150,7 +148,7 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma, return 0; err: - dma_unmap_single(chan_dev, dma->dma_buf, dma->dma_len, + dma_unmap_single(dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir); return ret; } diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index e4aaeb2262d0..73a7b8894c0d 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -739,12 +739,13 @@ static void stm32f7_i2c_disable_dma_req(struct stm32f7_i2c_dev *i2c_dev) static void stm32f7_i2c_dma_callback(void *arg) { - struct stm32f7_i2c_dev *i2c_dev = (struct stm32f7_i2c_dev *)arg; + struct stm32f7_i2c_dev *i2c_dev = arg; struct stm32_i2c_dma *dma = i2c_dev->dma; - struct device *dev = dma->chan_using->device->dev; stm32f7_i2c_disable_dma_req(i2c_dev); - dma_unmap_single(dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir); + dmaengine_terminate_async(dma->chan_using); + dma_unmap_single(i2c_dev->dev, dma->dma_buf, dma->dma_len, + dma->dma_data_dir); complete(&dma->dma_complete); } @@ -1510,7 +1511,6 @@ static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev, u16 addr = f7_msg->addr; void __iomem *base = i2c_dev->base; struct device *dev = i2c_dev->dev; - struct stm32_i2c_dma *dma = i2c_dev->dma; /* Bus error */ if (status & STM32F7_I2C_ISR_BERR) { @@ -1551,10 +1551,8 @@ static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev, } /* Disable dma */ - if (i2c_dev->use_dma) { - stm32f7_i2c_disable_dma_req(i2c_dev); - dmaengine_terminate_async(dma->chan_using); - } + if (i2c_dev->use_dma) + stm32f7_i2c_dma_callback(i2c_dev); i2c_dev->master_mode = false; complete(&i2c_dev->complete); @@ -1600,7 +1598,6 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data) { struct stm32f7_i2c_dev *i2c_dev = data; struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; - struct stm32_i2c_dma *dma = i2c_dev->dma; void __iomem *base = i2c_dev->base; u32 status, mask; int ret; @@ -1619,10 +1616,8 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data) dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n", __func__, f7_msg->addr); writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR); - if (i2c_dev->use_dma) { - stm32f7_i2c_disable_dma_req(i2c_dev); - dmaengine_terminate_async(dma->chan_using); - } + if (i2c_dev->use_dma) + stm32f7_i2c_dma_callback(i2c_dev); f7_msg->result = -ENXIO; } @@ -1640,8 +1635,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data) ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ); if (!ret) { dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__); - stm32f7_i2c_disable_dma_req(i2c_dev); - dmaengine_terminate_async(dma->chan_using); + stm32f7_i2c_dma_callback(i2c_dev); f7_msg->result = -ETIMEDOUT; } } diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index 12598feaa693..b10a30960e1e 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -877,6 +877,8 @@ static int fxls8962af_buffer_predisable(struct iio_dev *indio_dev) if (ret) return ret; + synchronize_irq(data->irq); + ret = __fxls8962af_fifo_set_mode(data, false); if (data->enable_event) diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 99cb661fabb2..a7961c610ed2 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -1353,6 +1353,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev) union acpi_object *ont; union acpi_object *elements; acpi_status status; + struct device *parent = indio_dev->dev.parent; int ret = -EINVAL; unsigned int val; int i, j; @@ -1371,7 +1372,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev) }; - adev = ACPI_COMPANION(indio_dev->dev.parent); + adev = ACPI_COMPANION(parent); if (!adev) return -ENXIO; @@ -1380,8 +1381,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev) if (status == AE_NOT_FOUND) { return -ENXIO; } else if (ACPI_FAILURE(status)) { - dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n", - status); + dev_warn(parent, "failed to execute _ONT: %d\n", status); return status; } @@ -1457,12 +1457,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev) } ret = 0; - dev_info(&indio_dev->dev, "computed mount matrix from ACPI\n"); + dev_info(parent, "computed mount matrix from ACPI\n"); out: kfree(buffer.pointer); if (ret) - dev_dbg(&indio_dev->dev, + dev_dbg(parent, "failed to apply ACPI orientation data: %d\n", ret); return ret; diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c index d96bd12dfea6..cabf5511d116 100644 --- a/drivers/iio/adc/ad7380.c +++ b/drivers/iio/adc/ad7380.c @@ -1953,8 +1953,9 @@ static int ad7380_probe(struct spi_device *spi) if (st->chip_info->has_hardware_gain) { device_for_each_child_node_scoped(dev, node) { - unsigned int channel, gain; + unsigned int channel; int gain_idx; + u16 gain; ret = fwnode_property_read_u32(node, "reg", &channel); if (ret) @@ -1966,7 +1967,7 @@ static int ad7380_probe(struct spi_device *spi) "Invalid channel number %i\n", channel); - ret = fwnode_property_read_u32(node, "adi,gain-milli", + ret = fwnode_property_read_u16(node, "adi,gain-milli", &gain); if (ret && ret != -EINVAL) return dev_err_probe(dev, ret, diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c index edd0c3a35ab7..202561cad401 100644 --- a/drivers/iio/adc/ad7949.c +++ b/drivers/iio/adc/ad7949.c @@ -308,7 +308,6 @@ static void ad7949_disable_reg(void *reg) static int ad7949_spi_probe(struct spi_device *spi) { - u32 spi_ctrl_mask = spi->controller->bits_per_word_mask; struct device *dev = &spi->dev; const struct ad7949_adc_spec *spec; struct ad7949_adc_chip *ad7949_adc; @@ -337,11 +336,11 @@ static int ad7949_spi_probe(struct spi_device *spi) ad7949_adc->resolution = spec->resolution; /* Set SPI bits per word */ - if (spi_ctrl_mask & SPI_BPW_MASK(ad7949_adc->resolution)) { + if (spi_is_bpw_supported(spi, ad7949_adc->resolution)) { spi->bits_per_word = ad7949_adc->resolution; - } else if (spi_ctrl_mask == SPI_BPW_MASK(16)) { + } else if (spi_is_bpw_supported(spi, 16)) { spi->bits_per_word = 16; - } else if (spi_ctrl_mask == SPI_BPW_MASK(8)) { + } else if (spi_is_bpw_supported(spi, 8)) { spi->bits_per_word = 8; } else { dev_err(dev, "unable to find common BPW with spi controller\n"); diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index 4116c44197b8..2dbaa0b5b3d6 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -445,7 +445,7 @@ static int axi_adc_raw_read(struct iio_backend *back, u32 *val) static int ad7606_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val) { struct adi_axi_adc_state *st = iio_backend_get_priv(back); - int addr; + u32 addr, reg_val; guard(mutex)(&st->lock); @@ -455,7 +455,9 @@ static int ad7606_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val) */ addr = FIELD_PREP(ADI_AXI_REG_ADDRESS_MASK, reg) | ADI_AXI_REG_READ_BIT; axi_adc_raw_write(back, addr); - axi_adc_raw_read(back, val); + axi_adc_raw_read(back, ®_val); + + *val = FIELD_GET(ADI_AXI_REG_VALUE_MASK, reg_val); /* Write 0x0 on the bus to get back to ADC mode */ axi_adc_raw_write(back, 0); diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c index 71584ffd3632..1b49325ec1ce 100644 --- a/drivers/iio/adc/axp20x_adc.c +++ b/drivers/iio/adc/axp20x_adc.c @@ -187,6 +187,7 @@ static struct iio_map axp717_maps[] = { .consumer_channel = "batt_chrg_i", .adc_channel_label = "batt_chrg_i", }, + { } }; /* diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c index a7e9912fb44a..9dd547e62b6c 100644 --- a/drivers/iio/adc/max1363.c +++ b/drivers/iio/adc/max1363.c @@ -511,10 +511,10 @@ static const struct iio_event_spec max1363_events[] = { MAX1363_CHAN_U(1, _s1, 1, bits, ev_spec, num_ev_spec), \ MAX1363_CHAN_U(2, _s2, 2, bits, ev_spec, num_ev_spec), \ MAX1363_CHAN_U(3, _s3, 3, bits, ev_spec, num_ev_spec), \ - MAX1363_CHAN_B(0, 1, d0m1, 4, bits, ev_spec, num_ev_spec), \ - MAX1363_CHAN_B(2, 3, d2m3, 5, bits, ev_spec, num_ev_spec), \ - MAX1363_CHAN_B(1, 0, d1m0, 6, bits, ev_spec, num_ev_spec), \ - MAX1363_CHAN_B(3, 2, d3m2, 7, bits, ev_spec, num_ev_spec), \ + MAX1363_CHAN_B(0, 1, d0m1, 12, bits, ev_spec, num_ev_spec), \ + MAX1363_CHAN_B(2, 3, d2m3, 13, bits, ev_spec, num_ev_spec), \ + MAX1363_CHAN_B(1, 0, d1m0, 18, bits, ev_spec, num_ev_spec), \ + MAX1363_CHAN_B(3, 2, d3m2, 19, bits, ev_spec, num_ev_spec), \ IIO_CHAN_SOFT_TIMESTAMP(8) \ } @@ -532,23 +532,23 @@ static const struct iio_chan_spec max1363_channels[] = /* Applies to max1236, max1237 */ static const enum max1363_modes max1236_mode_list[] = { _s0, _s1, _s2, _s3, - s0to1, s0to2, s0to3, + s0to1, s0to2, s2to3, s0to3, d0m1, d2m3, d1m0, d3m2, d0m1to2m3, d1m0to3m2, - s2to3, }; /* Applies to max1238, max1239 */ static const enum max1363_modes max1238_mode_list[] = { _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11, s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, + s6to7, s6to8, s6to9, s6to10, s6to11, s0to7, s0to8, s0to9, s0to10, s0to11, d0m1, d2m3, d4m5, d6m7, d8m9, d10m11, d1m0, d3m2, d5m4, d7m6, d9m8, d11m10, - d0m1to2m3, d0m1to4m5, d0m1to6m7, d0m1to8m9, d0m1to10m11, - d1m0to3m2, d1m0to5m4, d1m0to7m6, d1m0to9m8, d1m0to11m10, - s6to7, s6to8, s6to9, s6to10, s6to11, - d6m7to8m9, d6m7to10m11, d7m6to9m8, d7m6to11m10, + d0m1to2m3, d0m1to4m5, d0m1to6m7, d6m7to8m9, + d0m1to8m9, d6m7to10m11, d0m1to10m11, d1m0to3m2, + d1m0to5m4, d1m0to7m6, d7m6to9m8, d1m0to9m8, + d7m6to11m10, d1m0to11m10, }; #define MAX1363_12X_CHANS(bits) { \ @@ -584,16 +584,15 @@ static const struct iio_chan_spec max1238_channels[] = MAX1363_12X_CHANS(12); static const enum max1363_modes max11607_mode_list[] = { _s0, _s1, _s2, _s3, - s0to1, s0to2, s0to3, - s2to3, + s0to1, s0to2, s2to3, + s0to3, d0m1, d2m3, d1m0, d3m2, d0m1to2m3, d1m0to3m2, }; static const enum max1363_modes max11608_mode_list[] = { _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, - s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s0to7, - s6to7, + s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s6to7, s0to7, d0m1, d2m3, d4m5, d6m7, d1m0, d3m2, d5m4, d7m6, d0m1to2m3, d0m1to4m5, d0m1to6m7, @@ -609,14 +608,14 @@ static const enum max1363_modes max11608_mode_list[] = { MAX1363_CHAN_U(5, _s5, 5, bits, NULL, 0), \ MAX1363_CHAN_U(6, _s6, 6, bits, NULL, 0), \ MAX1363_CHAN_U(7, _s7, 7, bits, NULL, 0), \ - MAX1363_CHAN_B(0, 1, d0m1, 8, bits, NULL, 0), \ - MAX1363_CHAN_B(2, 3, d2m3, 9, bits, NULL, 0), \ - MAX1363_CHAN_B(4, 5, d4m5, 10, bits, NULL, 0), \ - MAX1363_CHAN_B(6, 7, d6m7, 11, bits, NULL, 0), \ - MAX1363_CHAN_B(1, 0, d1m0, 12, bits, NULL, 0), \ - MAX1363_CHAN_B(3, 2, d3m2, 13, bits, NULL, 0), \ - MAX1363_CHAN_B(5, 4, d5m4, 14, bits, NULL, 0), \ - MAX1363_CHAN_B(7, 6, d7m6, 15, bits, NULL, 0), \ + MAX1363_CHAN_B(0, 1, d0m1, 12, bits, NULL, 0), \ + MAX1363_CHAN_B(2, 3, d2m3, 13, bits, NULL, 0), \ + MAX1363_CHAN_B(4, 5, d4m5, 14, bits, NULL, 0), \ + MAX1363_CHAN_B(6, 7, d6m7, 15, bits, NULL, 0), \ + MAX1363_CHAN_B(1, 0, d1m0, 18, bits, NULL, 0), \ + MAX1363_CHAN_B(3, 2, d3m2, 19, bits, NULL, 0), \ + MAX1363_CHAN_B(5, 4, d5m4, 20, bits, NULL, 0), \ + MAX1363_CHAN_B(7, 6, d7m6, 21, bits, NULL, 0), \ IIO_CHAN_SOFT_TIMESTAMP(16) \ } static const struct iio_chan_spec max11602_channels[] = MAX1363_8X_CHANS(8); diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index bd3458965bff..21c04a98b3b6 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -430,10 +430,9 @@ static int stm32_adc_irq_probe(struct platform_device *pdev, return -ENOMEM; } - for (i = 0; i < priv->cfg->num_irqs; i++) { - irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler); - irq_set_handler_data(priv->irq[i], priv); - } + for (i = 0; i < priv->cfg->num_irqs; i++) + irq_set_chained_handler_and_data(priv->irq[i], + stm32_adc_irq_handler, priv); return 0; } diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 8ce1dccfea4f..dac593be5695 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -154,7 +154,7 @@ static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs) return err; st_accel_set_fullscale_error: - dev_err(&indio_dev->dev, "failed to set new fullscale.\n"); + dev_err(indio_dev->dev.parent, "failed to set new fullscale.\n"); return err; } @@ -231,8 +231,7 @@ int st_sensors_power_enable(struct iio_dev *indio_dev) ARRAY_SIZE(regulator_names), regulator_names); if (err) - return dev_err_probe(&indio_dev->dev, err, - "unable to enable supplies\n"); + return dev_err_probe(parent, err, "unable to enable supplies\n"); return 0; } @@ -241,13 +240,14 @@ EXPORT_SYMBOL_NS(st_sensors_power_enable, "IIO_ST_SENSORS"); static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev, struct st_sensors_platform_data *pdata) { + struct device *parent = indio_dev->dev.parent; struct st_sensor_data *sdata = iio_priv(indio_dev); /* Sensor does not support interrupts */ if (!sdata->sensor_settings->drdy_irq.int1.addr && !sdata->sensor_settings->drdy_irq.int2.addr) { if (pdata->drdy_int_pin) - dev_info(&indio_dev->dev, + dev_info(parent, "DRDY on pin INT%d specified, but sensor does not support interrupts\n", pdata->drdy_int_pin); return 0; @@ -256,29 +256,27 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev, switch (pdata->drdy_int_pin) { case 1: if (!sdata->sensor_settings->drdy_irq.int1.mask) { - dev_err(&indio_dev->dev, - "DRDY on INT1 not available.\n"); + dev_err(parent, "DRDY on INT1 not available.\n"); return -EINVAL; } sdata->drdy_int_pin = 1; break; case 2: if (!sdata->sensor_settings->drdy_irq.int2.mask) { - dev_err(&indio_dev->dev, - "DRDY on INT2 not available.\n"); + dev_err(parent, "DRDY on INT2 not available.\n"); return -EINVAL; } sdata->drdy_int_pin = 2; break; default: - dev_err(&indio_dev->dev, "DRDY on pdata not valid.\n"); + dev_err(parent, "DRDY on pdata not valid.\n"); return -EINVAL; } if (pdata->open_drain) { if (!sdata->sensor_settings->drdy_irq.int1.addr_od && !sdata->sensor_settings->drdy_irq.int2.addr_od) - dev_err(&indio_dev->dev, + dev_err(parent, "open drain requested but unsupported.\n"); else sdata->int_pin_open_drain = true; @@ -336,6 +334,7 @@ EXPORT_SYMBOL_NS(st_sensors_dev_name_probe, "IIO_ST_SENSORS"); int st_sensors_init_sensor(struct iio_dev *indio_dev, struct st_sensors_platform_data *pdata) { + struct device *parent = indio_dev->dev.parent; struct st_sensor_data *sdata = iio_priv(indio_dev); struct st_sensors_platform_data *of_pdata; int err = 0; @@ -343,7 +342,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, mutex_init(&sdata->odr_lock); /* If OF/DT pdata exists, it will take precedence of anything else */ - of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata); + of_pdata = st_sensors_dev_probe(parent, pdata); if (IS_ERR(of_pdata)) return PTR_ERR(of_pdata); if (of_pdata) @@ -370,7 +369,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, if (err < 0) return err; } else - dev_info(&indio_dev->dev, "Full-scale not possible\n"); + dev_info(parent, "Full-scale not possible\n"); err = st_sensors_set_odr(indio_dev, sdata->odr); if (err < 0) @@ -405,7 +404,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, mask = sdata->sensor_settings->drdy_irq.int2.mask_od; } - dev_info(&indio_dev->dev, + dev_info(parent, "set interrupt line to open drain mode on pin %d\n", sdata->drdy_int_pin); err = st_sensors_write_data_with_mask(indio_dev, addr, @@ -593,21 +592,20 @@ EXPORT_SYMBOL_NS(st_sensors_get_settings_index, "IIO_ST_SENSORS"); int st_sensors_verify_id(struct iio_dev *indio_dev) { struct st_sensor_data *sdata = iio_priv(indio_dev); + struct device *parent = indio_dev->dev.parent; int wai, err; if (sdata->sensor_settings->wai_addr) { err = regmap_read(sdata->regmap, sdata->sensor_settings->wai_addr, &wai); if (err < 0) { - dev_err(&indio_dev->dev, - "failed to read Who-Am-I register.\n"); - return err; + return dev_err_probe(parent, err, + "failed to read Who-Am-I register.\n"); } if (sdata->sensor_settings->wai != wai) { - dev_warn(&indio_dev->dev, - "%s: WhoAmI mismatch (0x%x).\n", - indio_dev->name, wai); + dev_warn(parent, "%s: WhoAmI mismatch (0x%x).\n", + indio_dev->name, wai); } } diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index 9d4bf822a15d..8a8ab688d798 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c @@ -127,7 +127,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, sdata->trig = devm_iio_trigger_alloc(parent, "%s-trigger", indio_dev->name); if (sdata->trig == NULL) { - dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n"); + dev_err(parent, "failed to allocate iio trigger.\n"); return -ENOMEM; } @@ -143,7 +143,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, case IRQF_TRIGGER_FALLING: case IRQF_TRIGGER_LOW: if (!sdata->sensor_settings->drdy_irq.addr_ihl) { - dev_err(&indio_dev->dev, + dev_err(parent, "falling/low specified for IRQ but hardware supports only rising/high: will request rising/high\n"); if (irq_trig == IRQF_TRIGGER_FALLING) irq_trig = IRQF_TRIGGER_RISING; @@ -156,21 +156,19 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, sdata->sensor_settings->drdy_irq.mask_ihl, 1); if (err < 0) return err; - dev_info(&indio_dev->dev, + dev_info(parent, "interrupts on the falling edge or active low level\n"); } break; case IRQF_TRIGGER_RISING: - dev_info(&indio_dev->dev, - "interrupts on the rising edge\n"); + dev_info(parent, "interrupts on the rising edge\n"); break; case IRQF_TRIGGER_HIGH: - dev_info(&indio_dev->dev, - "interrupts active high level\n"); + dev_info(parent, "interrupts active high level\n"); break; default: /* This is the most preferred mode, if possible */ - dev_err(&indio_dev->dev, + dev_err(parent, "unsupported IRQ trigger specified (%lx), enforce rising edge\n", irq_trig); irq_trig = IRQF_TRIGGER_RISING; } @@ -179,7 +177,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, if (irq_trig == IRQF_TRIGGER_FALLING || irq_trig == IRQF_TRIGGER_RISING) { if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) { - dev_err(&indio_dev->dev, + dev_err(parent, "edge IRQ not supported w/o stat register.\n"); return -EOPNOTSUPP; } @@ -214,13 +212,13 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, sdata->trig->name, sdata->trig); if (err) { - dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n"); + dev_err(parent, "failed to request trigger IRQ.\n"); return err; } err = devm_iio_trigger_register(parent, sdata->trig); if (err < 0) { - dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); + dev_err(parent, "failed to register iio trigger.\n"); return err; } indio_dev->trig = iio_trigger_get(sdata->trig); diff --git a/drivers/iio/dac/ad3530r.c b/drivers/iio/dac/ad3530r.c index f9752a571aa5..6134613777b8 100644 --- a/drivers/iio/dac/ad3530r.c +++ b/drivers/iio/dac/ad3530r.c @@ -166,7 +166,9 @@ static ssize_t ad3530r_set_dac_powerdown(struct iio_dev *indio_dev, AD3530R_OUTPUT_OPERATING_MODE_0 : AD3530R_OUTPUT_OPERATING_MODE_1; pdmode = powerdown ? st->chan[chan->channel].powerdown_mode : 0; - mask = AD3530R_OP_MODE_CHAN_MSK(chan->channel); + mask = chan->channel < AD3531R_MAX_CHANNELS ? + AD3530R_OP_MODE_CHAN_MSK(chan->channel) : + AD3530R_OP_MODE_CHAN_MSK(chan->channel - 4); val = field_prep(mask, pdmode); ret = regmap_update_bits(st->regmap, reg, mask, val); diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c index c1eb9ef9db08..266e1b29bf91 100644 --- a/drivers/iio/industrialio-backend.c +++ b/drivers/iio/industrialio-backend.c @@ -155,11 +155,14 @@ static ssize_t iio_backend_debugfs_write_reg(struct file *file, ssize_t rc; int ret; + if (count >= sizeof(buf)) + return -ENOSPC; + rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count); if (rc < 0) return rc; - buf[count] = '\0'; + buf[rc] = '\0'; ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 178e99b111de..5ffda104d4b2 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -411,12 +411,15 @@ static ssize_t iio_debugfs_write_reg(struct file *file, char buf[80]; int ret; + if (count >= sizeof(buf)) + return -EINVAL; + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count); if (ret < 0) return ret; - buf[count] = '\0'; + buf[ret] = '\0'; ret = sscanf(buf, "%i %i", ®, &val); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 5d9b7007a730..1d8c579b5433 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -172,12 +172,12 @@ static const struct xpad_device { { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX }, { 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX }, { 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 }, + { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX360 }, { 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 }, { 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX }, { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX }, { 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX }, { 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX }, - { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX }, { 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX }, { 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX }, { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX }, diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 1a41e59c77f8..3ebf37ddfc18 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -20,7 +20,7 @@ #include "internal.h" -#define ICC_DYN_ID_START 10000 +#define ICC_DYN_ID_START 100000 #define CREATE_TRACE_POINTS #include "trace.h" @@ -819,6 +819,9 @@ static struct icc_node *icc_node_create_nolock(int id) { struct icc_node *node; + if (id >= ICC_DYN_ID_START) + return ERR_PTR(-EINVAL); + /* check if node already exists */ node = node_find(id); if (node) @@ -906,10 +909,35 @@ void icc_node_destroy(int id) return; kfree(node->links); + if (node->id >= ICC_DYN_ID_START) + kfree(node->name); kfree(node); } EXPORT_SYMBOL_GPL(icc_node_destroy); +/** + * icc_node_set_name() - set node name + * @node: node + * @provider: node provider + * @name: node name + * + * Return: 0 on success, or -ENOMEM on allocation failure + */ +int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, const char *name) +{ + if (node->id >= ICC_DYN_ID_START) { + node->name = kasprintf(GFP_KERNEL, "%s@%s", name, + dev_name(provider->dev)); + if (!node->name) + return -ENOMEM; + } else { + node->name = name; + } + + return 0; +} +EXPORT_SYMBOL_GPL(icc_node_set_name); + /** * icc_link_nodes() - create link between two nodes * @src_node: source node @@ -1038,10 +1066,6 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider) node->avg_bw = node->init_avg; node->peak_bw = node->init_peak; - if (node->id >= ICC_DYN_ID_START) - node->name = devm_kasprintf(provider->dev, GFP_KERNEL, "%s@%s", - node->name, dev_name(provider->dev)); - if (node->avg_bw || node->peak_bw) { if (provider->pre_aggregate) provider->pre_aggregate(node); diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c index 88f311c11020..93c030608d3e 100644 --- a/drivers/interconnect/icc-clk.c +++ b/drivers/interconnect/icc-clk.c @@ -117,6 +117,7 @@ struct icc_provider *icc_clk_register(struct device *dev, node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_master", data[i].name); if (!node->name) { + icc_node_destroy(node->id); ret = -ENOMEM; goto err; } @@ -135,6 +136,7 @@ struct icc_provider *icc_clk_register(struct device *dev, node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_slave", data[i].name); if (!node->name) { + icc_node_destroy(node->id); ret = -ENOMEM; goto err; } diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index 41bfc6e7ee1d..001404e91041 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -293,7 +293,12 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev) goto err_remove_nodes; } - node->name = qn->name; + ret = icc_node_set_name(node, provider, qn->name); + if (ret) { + icc_node_destroy(node->id); + goto err_remove_nodes; + } + node->data = qn; icc_node_add(node, provider); diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c index baecbf2533f7..b33f00da1880 100644 --- a/drivers/interconnect/qcom/osm-l3.c +++ b/drivers/interconnect/qcom/osm-l3.c @@ -236,7 +236,12 @@ static int qcom_osm_l3_probe(struct platform_device *pdev) goto err; } - node->name = qnodes[i]->name; + ret = icc_node_set_name(node, provider, qnodes[i]->name); + if (ret) { + icc_node_destroy(node->id); + goto err; + } + /* Cast away const and add it back in qcom_osm_l3_set() */ node->data = (void *)qnodes[i]; icc_node_add(node, provider); diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c index 346f18d70e9e..905403a3a930 100644 --- a/drivers/interconnect/qcom/sc7280.c +++ b/drivers/interconnect/qcom/sc7280.c @@ -238,6 +238,7 @@ static struct qcom_icc_node xm_pcie3_1 = { .id = SC7280_MASTER_PCIE_1, .channels = 1, .buswidth = 8, + .num_links = 1, .links = { SC7280_SLAVE_ANOC_PCIE_GEM_NOC }, }; diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c index 9e041365d909..8e8f56186a36 100644 --- a/drivers/interconnect/samsung/exynos.c +++ b/drivers/interconnect/samsung/exynos.c @@ -134,6 +134,11 @@ static int exynos_generic_icc_probe(struct platform_device *pdev) priv->node = icc_node; icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn", bus_dev->of_node); + if (!icc_node->name) { + icc_node_destroy(pdev->id); + return -ENOMEM; + } + if (of_property_read_u32(bus_dev->of_node, "samsung,data-clock-ratio", &priv->bus_clk_ratio)) priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO; diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c index 761ab647f372..0961ac805944 100644 --- a/drivers/iommu/hyperv-iommu.c +++ b/drivers/iommu/hyperv-iommu.c @@ -193,15 +193,13 @@ struct hyperv_root_ir_data { static void hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) { - u64 status; - u32 vector; - struct irq_cfg *cfg; - int ioapic_id; - const struct cpumask *affinity; - int cpu; - struct hv_interrupt_entry entry; struct hyperv_root_ir_data *data = irq_data->chip_data; + struct hv_interrupt_entry entry; + const struct cpumask *affinity; struct IO_APIC_route_entry e; + struct irq_cfg *cfg; + int cpu, ioapic_id; + u32 vector; cfg = irqd_cfg(irq_data); affinity = irq_data_get_effective_affinity_mask(irq_data); @@ -214,23 +212,16 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) && data->entry.ioapic_rte.as_uint64) { entry = data->entry; - status = hv_unmap_ioapic_interrupt(ioapic_id, &entry); - - if (status != HV_STATUS_SUCCESS) - hv_status_debug(status, "failed to unmap\n"); + (void)hv_unmap_ioapic_interrupt(ioapic_id, &entry); data->entry.ioapic_rte.as_uint64 = 0; data->entry.source = 0; /* Invalid source */ } - status = hv_map_ioapic_interrupt(ioapic_id, data->is_level, cpu, - vector, &entry); - - if (status != HV_STATUS_SUCCESS) { - hv_status_err(status, "map failed\n"); + if (hv_map_ioapic_interrupt(ioapic_id, data->is_level, cpu, + vector, &entry)) return; - } data->entry = entry; @@ -322,10 +313,10 @@ static void hyperv_root_irq_remapping_free(struct irq_domain *domain, data = irq_data->chip_data; e = &data->entry; - if (e->source == HV_DEVICE_TYPE_IOAPIC - && e->ioapic_rte.as_uint64) - hv_unmap_ioapic_interrupt(data->ioapic_id, - &data->entry); + if (e->source == HV_DEVICE_TYPE_IOAPIC && + e->ioapic_rte.as_uint64) + (void)hv_unmap_ioapic_interrupt(data->ioapic_id, + &data->entry); kfree(data); } diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 043b9ec756ff..7f3f47db4c98 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -324,7 +324,7 @@ EXPORT_SYMBOL(memstick_init_req); static int h_memstick_read_dev_id(struct memstick_dev *card, struct memstick_request **mrq) { - struct ms_id_register id_reg; + struct ms_id_register id_reg = {}; if (!(*mrq)) { memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg, diff --git a/drivers/misc/amd-sbi/rmi-core.c b/drivers/misc/amd-sbi/rmi-core.c index b653a21a909e..3dec2fc00124 100644 --- a/drivers/misc/amd-sbi/rmi-core.c +++ b/drivers/misc/amd-sbi/rmi-core.c @@ -42,7 +42,6 @@ #define RD_MCA_CMD 0x86 /* CPUID MCAMSR mask & index */ -#define CPUID_MCA_THRD_MASK GENMASK(15, 0) #define CPUID_MCA_THRD_INDEX 32 #define CPUID_MCA_FUNC_MASK GENMASK(31, 0) #define CPUID_EXT_FUNC_INDEX 56 @@ -129,7 +128,7 @@ static int rmi_cpuid_read(struct sbrmi_data *data, goto exit_unlock; } - thread = msg->cpu_in_out << CPUID_MCA_THRD_INDEX & CPUID_MCA_THRD_MASK; + thread = msg->cpu_in_out >> CPUID_MCA_THRD_INDEX; /* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */ if (thread > 127) { @@ -210,7 +209,7 @@ static int rmi_mca_msr_read(struct sbrmi_data *data, goto exit_unlock; } - thread = msg->mcamsr_in_out << CPUID_MCA_THRD_INDEX & CPUID_MCA_THRD_MASK; + thread = msg->mcamsr_in_out >> CPUID_MCA_THRD_INDEX; /* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */ if (thread > 127) { @@ -321,6 +320,10 @@ int rmi_mailbox_xfer(struct sbrmi_data *data, ret = regmap_read(data->regmap, SBRMI_OUTBNDMSG7, &ec); if (ret || ec) goto exit_clear_alert; + + /* Clear the input value before updating the output data */ + msg->mb_in_out = 0; + /* * For a read operation, the initiator (BMC) reads the firmware * response Command Data Out[31:0] from SBRMI::OutBndMsg_inst[4:1] @@ -373,7 +376,8 @@ static int apml_rmi_reg_xfer(struct sbrmi_data *data, mutex_unlock(&data->lock); if (msg.rflag && !ret) - return copy_to_user(arg, &msg, sizeof(struct apml_reg_xfer_msg)); + if (copy_to_user(arg, &msg, sizeof(struct apml_reg_xfer_msg))) + return -EFAULT; return ret; } @@ -391,7 +395,9 @@ static int apml_mailbox_xfer(struct sbrmi_data *data, struct apml_mbox_msg __use if (ret && ret != -EPROTOTYPE) return ret; - return copy_to_user(arg, &msg, sizeof(struct apml_mbox_msg)); + if (copy_to_user(arg, &msg, sizeof(struct apml_mbox_msg))) + return -EFAULT; + return ret; } static int apml_cpuid_xfer(struct sbrmi_data *data, struct apml_cpuid_msg __user *arg) @@ -408,7 +414,9 @@ static int apml_cpuid_xfer(struct sbrmi_data *data, struct apml_cpuid_msg __user if (ret && ret != -EPROTOTYPE) return ret; - return copy_to_user(arg, &msg, sizeof(struct apml_cpuid_msg)); + if (copy_to_user(arg, &msg, sizeof(struct apml_cpuid_msg))) + return -EFAULT; + return ret; } static int apml_mcamsr_xfer(struct sbrmi_data *data, struct apml_mcamsr_msg __user *arg) @@ -425,7 +433,9 @@ static int apml_mcamsr_xfer(struct sbrmi_data *data, struct apml_mcamsr_msg __us if (ret && ret != -EPROTOTYPE) return ret; - return copy_to_user(arg, &msg, sizeof(struct apml_mcamsr_msg)); + if (copy_to_user(arg, &msg, sizeof(struct apml_mcamsr_msg))) + return -EFAULT; + return ret; } static long sbrmi_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index def054ddd256..4fced9b36c80 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -503,7 +503,8 @@ void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data) DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { - dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data); + dma_unmap_sg(dma_chan->device->dev, data->sg, data->sg_len, + dir_data); return; } diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 13a84b9309e0..e3877a1c72a9 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -913,7 +913,8 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) { return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || - dmi_match(DMI_SYS_VENDOR, "IRBIS")); + dmi_match(DMI_SYS_VENDOR, "IRBIS") || + dmi_match(DMI_SYS_VENDOR, "Positivo Tecnologia SA")); } static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot) diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 73385ff4c0f3..9e94998e8df7 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -613,7 +613,8 @@ static const struct sdhci_ops sdhci_am654_ops = { static const struct sdhci_pltfm_data sdhci_am654_pdata = { .ops = &sdhci_am654_ops, .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_DISABLE_HW_TIMEOUT, }; static const struct sdhci_am654_driver_data sdhci_am654_sr1_drvdata = { @@ -643,7 +644,8 @@ static const struct sdhci_ops sdhci_j721e_8bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { .ops = &sdhci_j721e_8bit_ops, .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_DISABLE_HW_TIMEOUT, }; static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = { @@ -667,7 +669,8 @@ static const struct sdhci_ops sdhci_j721e_4bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = { .ops = &sdhci_j721e_4bit_ops, .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_DISABLE_HW_TIMEOUT, }; static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = { diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig index 80f015cf6e54..c68132e38138 100644 --- a/drivers/mux/Kconfig +++ b/drivers/mux/Kconfig @@ -48,6 +48,7 @@ config MUX_GPIO config MUX_MMIO tristate "MMIO/Regmap register bitfield-controlled Multiplexer" depends on OF + select REGMAP_MMIO help MMIO/Regmap register bitfield-controlled Multiplexer controller. diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index ea8c807af4d8..3913971125de 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -145,13 +145,16 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, EXPORT_SYMBOL_GPL(can_change_state); /* CAN device restart for bus-off recovery */ -static void can_restart(struct net_device *dev) +static int can_restart(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct can_frame *cf; int err; + if (!priv->do_set_mode) + return -EOPNOTSUPP; + if (netif_carrier_ok(dev)) netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n"); @@ -173,10 +176,14 @@ static void can_restart(struct net_device *dev) if (err) { netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err)); netif_carrier_off(dev); + + return err; } else { netdev_dbg(dev, "Restarted\n"); priv->can_stats.restarts++; } + + return 0; } static void can_restart_work(struct work_struct *work) @@ -201,9 +208,8 @@ int can_restart_now(struct net_device *dev) return -EBUSY; cancel_delayed_work_sync(&priv->restart_work); - can_restart(dev); - return 0; + return can_restart(dev); } /* CAN bus-off diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index 13826e8a707b..d9f6ab3efb97 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -285,6 +285,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], } if (data[IFLA_CAN_RESTART_MS]) { + if (!priv->do_set_mode) { + NL_SET_ERR_MSG(extack, + "Device doesn't support restart from Bus Off"); + return -EOPNOTSUPP; + } + /* Do not allow changing restart delay while running */ if (dev->flags & IFF_UP) return -EBUSY; @@ -292,6 +298,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], } if (data[IFLA_CAN_RESTART]) { + if (!priv->do_set_mode) { + NL_SET_ERR_MSG(extack, + "Device doesn't support restart from Bus Off"); + return -EOPNOTSUPP; + } + /* Do not allow a restart while not running */ if (!(dev->flags & IFF_UP)) return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index a6ea477bce3c..b9973956c480 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -816,6 +816,9 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf) /* Tx SPB */ tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT), TX_SPB_CTRL_XF_CTRL2); + + if (intf->parent->tx_chan_offset) + tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR); tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT); tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index b82f121cadad..0f4efd505332 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4666,12 +4666,19 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) return PTR_ERR(dpmac_dev); } - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) + if (IS_ERR(dpmac_dev)) return 0; + if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) { + err = 0; + goto out_put_device; + } + mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL); - if (!mac) - return -ENOMEM; + if (!mac) { + err = -ENOMEM; + goto out_put_device; + } mac->mc_dev = dpmac_dev; mac->mc_io = priv->mc_io; @@ -4705,6 +4712,8 @@ err_close_mac: dpaa2_mac_close(mac); err_free_mac: kfree(mac); +out_put_device: + put_device(&dpmac_dev->dev); return err; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 147a93bf9fa9..4643a3380618 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -1448,12 +1448,19 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) return PTR_ERR(dpmac_dev); - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) + if (IS_ERR(dpmac_dev)) return 0; + if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) { + err = 0; + goto out_put_device; + } + mac = kzalloc(sizeof(*mac), GFP_KERNEL); - if (!mac) - return -ENOMEM; + if (!mac) { + err = -ENOMEM; + goto out_put_device; + } mac->mc_dev = dpmac_dev; mac->mc_io = port_priv->ethsw_data->mc_io; @@ -1483,6 +1490,8 @@ err_close_mac: dpaa2_mac_close(mac); err_free_mac: kfree(mac); +out_put_device: + put_device(&dpmac_dev->dev); return err; } diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 6ea306947417..1f411d7c4373 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1988,49 +1988,56 @@ static void gve_turnup_and_check_status(struct gve_priv *priv) gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); } -static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) +static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv, + unsigned int txqueue) { - struct gve_notify_block *block; - struct gve_tx_ring *tx = NULL; - struct gve_priv *priv; - u32 last_nic_done; - u32 current_time; u32 ntfy_idx; - netdev_info(dev, "Timeout on tx queue, %d", txqueue); - priv = netdev_priv(dev); if (txqueue > priv->tx_cfg.num_queues) - goto reset; + return NULL; ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); if (ntfy_idx >= priv->num_ntfy_blks) - goto reset; + return NULL; - block = &priv->ntfy_blocks[ntfy_idx]; - tx = block->tx; + return &priv->ntfy_blocks[ntfy_idx]; +} + +static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv, + unsigned int txqueue) +{ + struct gve_notify_block *block; + u32 current_time; + + block = gve_get_tx_notify_block(priv, txqueue); + + if (!block) + return false; current_time = jiffies_to_msecs(jiffies); - if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) - goto reset; + if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) + return false; - /* Check to see if there are missed completions, which will allow us to - * kick the queue. - */ - last_nic_done = gve_tx_load_event_counter(priv, tx); - if (last_nic_done - tx->done) { - netdev_info(dev, "Kicking queue %d", txqueue); - iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); - napi_schedule(&block->napi); - tx->last_kick_msec = current_time; - goto out; - } // Else reset. + netdev_info(priv->dev, "Kicking queue %d", txqueue); + napi_schedule(&block->napi); + block->tx->last_kick_msec = current_time; + return true; +} -reset: - gve_schedule_reset(priv); +static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct gve_notify_block *block; + struct gve_priv *priv; -out: - if (tx) - tx->queue_timeout++; + netdev_info(dev, "Timeout on tx queue, %d", txqueue); + priv = netdev_priv(dev); + + if (!gve_tx_timeout_try_q_kick(priv, txqueue)) + gve_schedule_reset(priv); + + block = gve_get_tx_notify_block(priv, txqueue); + if (block) + block->tx->queue_timeout++; priv->tx_timeo_cnt++; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 52f42fe1d56f..dd3ddb223a92 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -1039,6 +1040,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) { u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_tx_spare *tx_spare; struct page *page; dma_addr_t dma; @@ -1080,6 +1083,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) tx_spare->buf = page_address(page); tx_spare->len = PAGE_SIZE << order; ring->tx_spare = tx_spare; + ring->tx_copybreak = priv->tx_copybreak; return; dma_mapping_error: @@ -4874,6 +4878,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) devm_kfree(&pdev->dev, priv->tqp_vector); } +static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv) +{ +#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024) +#define HNS3_MAX_PACKET_SIZE (64 * 1024) + + struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev); + struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); + struct hnae3_handle *handle = priv->ae_handle; + + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) + return; + + if (!(domain && iommu_is_dma_domain(domain))) + return; + + priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE; + priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE; + + if (priv->tx_copybreak < priv->min_tx_copybreak) + priv->tx_copybreak = priv->min_tx_copybreak; + if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size) + handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size; +} + static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, unsigned int ring_type) { @@ -5107,6 +5135,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) int i, j; int ret; + hns3_update_tx_spare_buf_config(priv); for (i = 0; i < ring_num; i++) { ret = hns3_alloc_ring_memory(&priv->ring[i]); if (ret) { @@ -5313,6 +5342,8 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->ae_handle = handle; priv->tx_timeout_count = 0; priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; + priv->min_tx_copybreak = 0; + priv->min_tx_spare_buf_size = 0; set_bit(HNS3_NIC_STATE_DOWN, &priv->state); handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index d3bad5d1b888..933e3527ed82 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -596,6 +596,8 @@ struct hns3_nic_priv { struct hns3_enet_coalesce rx_coal; u32 tx_copybreak; u32 rx_copybreak; + u32 min_tx_copybreak; + u32 min_tx_spare_buf_size; }; union l3_hdr_info { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d3c71bc1855d..be917e4a8372 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9576,33 +9576,36 @@ static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport) return false; } -int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) +static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport, + bool request_en) { - struct hclge_dev *hdev = vport->back; bool need_en; int ret; - mutex_lock(&hdev->vport_lock); - - vport->req_vlan_fltr_en = request_en; - need_en = hclge_need_enable_vport_vlan_filter(vport); - if (need_en == vport->cur_vlan_fltr_en) { - mutex_unlock(&hdev->vport_lock); + if (need_en == vport->cur_vlan_fltr_en) return 0; - } ret = hclge_set_vport_vlan_filter(vport, need_en); - if (ret) { - mutex_unlock(&hdev->vport_lock); + if (ret) return ret; - } vport->cur_vlan_fltr_en = need_en; + return 0; +} + +int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + vport->req_vlan_fltr_en = request_en; + ret = __hclge_enable_vport_vlan_filter(vport, request_en); mutex_unlock(&hdev->vport_lock); - return 0; + return ret; } static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) @@ -10623,16 +10626,19 @@ static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) &vport->state)) continue; - ret = hclge_enable_vport_vlan_filter(vport, - vport->req_vlan_fltr_en); + mutex_lock(&hdev->vport_lock); + ret = __hclge_enable_vport_vlan_filter(vport, + vport->req_vlan_fltr_en); if (ret) { dev_err(&hdev->pdev->dev, "failed to sync vlan filter state for vport%u, ret = %d\n", vport->vport_id, ret); set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); + mutex_unlock(&hdev->vport_lock); return; } + mutex_unlock(&hdev->vport_lock); } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index ec581d4b696f..4bd52eab3914 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -497,14 +497,14 @@ int hclge_ptp_init(struct hclge_dev *hdev) if (ret) { dev_err(&hdev->pdev->dev, "failed to init freq, ret = %d\n", ret); - goto out; + goto out_clear_int; } ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg); if (ret) { dev_err(&hdev->pdev->dev, "failed to init ts mode, ret = %d\n", ret); - goto out; + goto out_clear_int; } ktime_get_real_ts64(&ts); @@ -512,7 +512,7 @@ int hclge_ptp_init(struct hclge_dev *hdev) if (ret) { dev_err(&hdev->pdev->dev, "failed to init ts time, ret = %d\n", ret); - goto out; + goto out_clear_int; } set_bit(HCLGE_STATE_PTP_EN, &hdev->state); @@ -520,6 +520,9 @@ int hclge_ptp_init(struct hclge_dev *hdev) return 0; +out_clear_int: + clear_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags); + hclge_ptp_int_en(hdev, false); out: hclge_ptp_destroy_clock(hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 33136a1e02cf..8fcf220a120d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -3094,11 +3094,7 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) { - struct hnae3_handle *nic = &hdev->nic; - struct hnae3_knic_private_info *kinfo = &nic->kinfo; - - return min_t(u32, hdev->rss_size_max, - hdev->num_tqps / kinfo->tc_info.num_tc); + return min(hdev->rss_size_max, hdev->num_tqps); } /** diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 8294a7c4f122..ba331899d186 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -638,6 +638,9 @@ /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ #define NVM_SUM 0xBABA +/* Uninitialized ("empty") checksum word value */ +#define NVM_CHECKSUM_UNINITIALIZED 0xFFFF + /* PBA (printed board assembly) number words */ #define NVM_PBA_OFFSET_0 8 #define NVM_PBA_OFFSET_1 9 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 364378133526..df4e7d781cb1 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -4274,6 +4274,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) ret_val = e1000e_update_nvm_checksum(hw); if (ret_val) return ret_val; + } else if (hw->mac.type == e1000_pch_tgp) { + return 0; } } diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index e609f4df86f4..16369e6d245a 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -558,6 +558,12 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) checksum += nvm_data; } + if (hw->mac.type == e1000_pch_tgp && + nvm_data == NVM_CHECKSUM_UNINITIALIZED) { + e_dbg("Uninitialized NVM Checksum on TGP platform - ignoring\n"); + return 0; + } + if (checksum != (u16)NVM_SUM) { e_dbg("NVM Checksum Invalid\n"); return -E1000_ERR_NVM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index b232edf68ab1..1a7e9532f8e3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3138,10 +3138,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) const u8 *addr = al->list[i].addr; /* Allow to delete VF primary MAC only if it was not set - * administratively by PF or if VF is trusted. + * administratively by PF. */ if (ether_addr_equal(addr, vf->default_lan_addr.addr)) { - if (i40e_can_vf_change_mac(vf)) + if (!vf->pf_set_mac) was_unimac_deleted = true; else continue; @@ -5008,7 +5008,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id, vf_stats->broadcast = stats->rx_broadcast; vf_stats->multicast = stats->rx_multicast; vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other; - vf_stats->tx_dropped = stats->tx_discards; + vf_stats->tx_dropped = stats->tx_errors; return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index 59323c019544..351824dc3c62 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -2301,6 +2301,8 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, return ICE_DDP_PKG_ERR; buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); + if (!buf_copy) + return ICE_DDP_PKG_ERR; state = ice_init_pkg(hw, buf_copy, len); if (!ice_is_init_pkg_successful(state)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index b1aeea7c4a91..e395ef5f356e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1947,8 +1947,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, pages_queue, token, force_polling); - if (callback) - return err; + if (callback && !err) + return 0; if (err > 0) /* Failed in FW, command didn't execute */ err = deliv_status_to_err(err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 0e3a977d5332..bee906661282 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1182,19 +1182,19 @@ static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw, static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, struct mlx5_core_dev *peer_dev) { + struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_handle **flows; - /* total vports is the same for both e-switches */ - int nvports = esw->total_vports; struct mlx5_flow_handle *flow; + struct mlx5_vport *peer_vport; struct mlx5_flow_spec *spec; - struct mlx5_vport *vport; int err, pfindex; unsigned long i; void *misc; - if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev)) + if (!MLX5_VPORT_MANAGER(peer_dev) && + !mlx5_core_is_ecpf_esw_manager(peer_dev)) return 0; spec = kvzalloc(sizeof(*spec), GFP_KERNEL); @@ -1203,7 +1203,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, peer_miss_rules_setup(esw, peer_dev, spec, &dest); - flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); + flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL); if (!flows) { err = -ENOMEM; goto alloc_flows_err; @@ -1213,10 +1213,10 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, - spec, MLX5_VPORT_PF); + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); + esw_set_peer_miss_rule_source_port(esw, peer_esw, spec, + MLX5_VPORT_PF); flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), spec, &flow_act, &dest, 1); @@ -1224,11 +1224,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_pf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } - if (mlx5_ecpf_vport_exists(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); + if (mlx5_ecpf_vport_exists(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), spec, &flow_act, &dest, 1); @@ -1236,13 +1236,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_ecpf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_vfs(peer_dev)) { esw_set_peer_miss_rule_source_port(esw, - peer_dev->priv.eswitch, - spec, vport->vport); + peer_esw, + spec, peer_vport->vport); flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), spec, &flow_act, &dest, 1); @@ -1250,22 +1251,22 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_vf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } - if (mlx5_core_ec_sriov_enabled(esw->dev)) { - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { - if (i >= mlx5_core_max_ec_vfs(peer_dev)) - break; - esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, - spec, vport->vport); + if (mlx5_core_ec_sriov_enabled(peer_dev)) { + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_ec_vfs(peer_dev)) { + esw_set_peer_miss_rule_source_port(esw, peer_esw, + spec, + peer_vport->vport); flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow)) { err = PTR_ERR(flow); goto add_ec_vf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } } @@ -1282,25 +1283,27 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, return 0; add_ec_vf_flow_err: - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { - if (!flows[vport->index]) + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_ec_vfs(peer_dev)) { + if (!flows[peer_vport->index]) continue; - mlx5_del_flow_rules(flows[vport->index]); + mlx5_del_flow_rules(flows[peer_vport->index]); } add_vf_flow_err: - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { - if (!flows[vport->index]) + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_vfs(peer_dev)) { + if (!flows[peer_vport->index]) continue; - mlx5_del_flow_rules(flows[vport->index]); + mlx5_del_flow_rules(flows[peer_vport->index]); } - if (mlx5_ecpf_vport_exists(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_ecpf_vport_exists(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); + mlx5_del_flow_rules(flows[peer_vport->index]); } add_ecpf_flow_err: - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); + mlx5_del_flow_rules(flows[peer_vport->index]); } add_pf_flow_err: esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); @@ -1313,37 +1316,34 @@ alloc_flows_err: static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw, struct mlx5_core_dev *peer_dev) { + struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; u16 peer_index = mlx5_get_dev_index(peer_dev); struct mlx5_flow_handle **flows; - struct mlx5_vport *vport; + struct mlx5_vport *peer_vport; unsigned long i; flows = esw->fdb_table.offloads.peer_miss_rules[peer_index]; if (!flows) return; - if (mlx5_core_ec_sriov_enabled(esw->dev)) { - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { - /* The flow for a particular vport could be NULL if the other ECPF - * has fewer or no VFs enabled - */ - if (!flows[vport->index]) - continue; - mlx5_del_flow_rules(flows[vport->index]); - } + if (mlx5_core_ec_sriov_enabled(peer_dev)) { + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_ec_vfs(peer_dev)) + mlx5_del_flow_rules(flows[peer_vport->index]); } - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) - mlx5_del_flow_rules(flows[vport->index]); + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_vfs(peer_dev)) + mlx5_del_flow_rules(flows[peer_vport->index]); - if (mlx5_ecpf_vport_exists(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_ecpf_vport_exists(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); + mlx5_del_flow_rules(flows[peer_vport->index]); } - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); + mlx5_del_flow_rules(flows[peer_vport->index]); } kvfree(flows); diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index d6c0699bc8cf..43f034e180c4 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index a7973651ae51..550843e2164b 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c index ddfd1c02a885..da53eb04b0a4 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.c +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -288,8 +288,12 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac) int i; addr = lower_32_bits(prueth->msmcram.pa); - if (slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + if (slice) { + if (prueth->pdata.banked_ms_ram) + addr += MSMC_RAM_BANK_SIZE; + else + addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE; + } if (addr % SZ_64K) { dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); @@ -297,43 +301,66 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac) } bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; - /* workaround for f/w bug. bpool 0 needs to be initialized */ - for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) { + + /* Configure buffer pools for forwarding buffers + * - used by firmware to store packets to be forwarded to other port + * - 8 total pools per slice + */ + for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) { writel(addr, &bpool_cfg[i].addr); - writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); - addr += PRUETH_EMAC_BUF_POOL_SIZE; + writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len); + addr += PRUETH_SW_FWD_BUF_POOL_SIZE; } - if (!slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; - else - addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; + /* Configure buffer pools for Local Injection buffers + * - used by firmware to store packets received from host core + * - 16 total pools per slice + */ + for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) { + int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; - for (i = PRUETH_NUM_BUF_POOLS; - i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS; - i++) { - /* The driver only uses first 4 queues per PRU so only initialize them */ - if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) { - writel(addr, &bpool_cfg[i].addr); - writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len); - addr += PRUETH_SW_BUF_POOL_SIZE_HOST; + /* The driver only uses first 4 queues per PRU, + * so only initialize buffer for them + */ + if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE) + < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) { + writel(addr, &bpool_cfg[cfg_idx].addr); + writel(PRUETH_SW_LI_BUF_POOL_SIZE, + &bpool_cfg[cfg_idx].len); + addr += PRUETH_SW_LI_BUF_POOL_SIZE; } else { - writel(0, &bpool_cfg[i].addr); - writel(0, &bpool_cfg[i].len); + writel(0, &bpool_cfg[cfg_idx].addr); + writel(0, &bpool_cfg[cfg_idx].len); } } - if (!slice) - addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; - else - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + /* Express RX buffer queue + * - used by firmware to store express packets to be transmitted + * to the host core + */ + rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; + for (i = 0; i < 3; i++) + writel(addr, &rxq_ctx->start[i]); + addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE; + writel(addr, &rxq_ctx->end); + + /* Pre-emptible RX buffer queue + * - used by firmware to store preemptible packets to be transmitted + * to the host core + */ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; for (i = 0; i < 3; i++) writel(addr, &rxq_ctx->start[i]); - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; - writel(addr - SZ_2K, &rxq_ctx->end); + addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE; + writel(addr, &rxq_ctx->end); + + /* Set pointer for default dropped packet write + * - used by firmware to temporarily store packet to be dropped + */ + rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET; + writel(addr, &rxq_ctx->start[0]); return 0; } @@ -347,13 +374,13 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac) u32 addr; int i; - /* Layout to have 64KB aligned buffer pool - * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1| - */ - addr = lower_32_bits(prueth->msmcram.pa); - if (slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + if (slice) { + if (prueth->pdata.banked_ms_ram) + addr += MSMC_RAM_BANK_SIZE; + else + addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE; + } if (addr % SZ_64K) { dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); @@ -361,39 +388,66 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac) } bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; - /* workaround for f/w bug. bpool 0 needs to be initilalized */ - writel(addr, &bpool_cfg[0].addr); - writel(0, &bpool_cfg[0].len); - for (i = PRUETH_EMAC_BUF_POOL_START; - i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS; - i++) { - writel(addr, &bpool_cfg[i].addr); - writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); - addr += PRUETH_EMAC_BUF_POOL_SIZE; + /* Configure buffer pools for forwarding buffers + * - in mac mode - no forwarding so initialize all pools to 0 + * - 8 total pools per slice + */ + for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) { + writel(0, &bpool_cfg[i].addr); + writel(0, &bpool_cfg[i].len); } - if (!slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; - else - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2; + /* Configure buffer pools for Local Injection buffers + * - used by firmware to store packets received from host core + * - 16 total pools per slice + */ + bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; + for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) { + int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; - /* Pre-emptible RX buffer queue */ - rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; - for (i = 0; i < 3; i++) - writel(addr, &rxq_ctx->start[i]); + /* In EMAC mode, only first 4 buffers are used, + * as 1 slice needs to handle only 1 port + */ + if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) { + writel(addr, &bpool_cfg[cfg_idx].addr); + writel(PRUETH_EMAC_LI_BUF_POOL_SIZE, + &bpool_cfg[cfg_idx].len); + addr += PRUETH_EMAC_LI_BUF_POOL_SIZE; + } else { + writel(0, &bpool_cfg[cfg_idx].addr); + writel(0, &bpool_cfg[cfg_idx].len); + } + } - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; - writel(addr, &rxq_ctx->end); - - /* Express RX buffer queue */ + /* Express RX buffer queue + * - used by firmware to store express packets to be transmitted + * to host core + */ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; for (i = 0; i < 3; i++) writel(addr, &rxq_ctx->start[i]); - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE; writel(addr, &rxq_ctx->end); + /* Pre-emptible RX buffer queue + * - used by firmware to store preemptible packets to be transmitted + * to host core + */ + rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; + for (i = 0; i < 3; i++) + writel(addr, &rxq_ctx->start[i]); + + addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE; + writel(addr, &rxq_ctx->end); + + /* Set pointer for default dropped packet write + * - used by firmware to temporarily store packet to be dropped + */ + rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET; + writel(addr, &rxq_ctx->start[0]); + return 0; } diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h index c884e9fa099e..60d69744ffae 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.h +++ b/drivers/net/ethernet/ti/icssg/icssg_config.h @@ -26,21 +26,71 @@ struct icssg_flow_cfg { #define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */ #define PRUETH_RX_FLOW_DATA 0 -#define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K -#define PRUETH_EMAC_POOLS_PER_SLICE 24 -#define PRUETH_EMAC_BUF_POOL_START 8 -#define PRUETH_NUM_BUF_POOLS 8 -#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */ -#define MSMC_RAM_SIZE \ - (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \ - PRUETH_EMAC_RX_CTX_BUF_SIZE * 2)) +/* Defines for forwarding path buffer pools: + * - used by firmware to store packets to be forwarded to other port + * - 8 total pools per slice + * - only used in switch mode (as no forwarding in mac mode) + */ +#define PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE 8 +#define PRUETH_SW_FWD_BUF_POOL_SIZE (SZ_8K) -#define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K -#define PRUETH_SW_NUM_BUF_POOLS_HOST 8 -#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4 -#define MSMC_RAM_SIZE_SWITCH_MODE \ - (MSMC_RAM_SIZE + \ - (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST)) +/* Defines for local injection path buffer pools: + * - used by firmware to store packets received from host core + * - 16 total pools per slice + * - 8 pools per port per slice and each slice handles both ports + * - only 4 out of 8 pools used per port (as only 4 real QoS levels in ICSSG) + * - switch mode: 8 total pools used + * - mac mode: 4 total pools used + */ +#define PRUETH_NUM_LI_BUF_POOLS_PER_SLICE 16 +#define PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE 8 +#define PRUETH_SW_LI_BUF_POOL_SIZE SZ_4K +#define PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE 8 +#define PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4 +#define PRUETH_EMAC_LI_BUF_POOL_SIZE SZ_8K +#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE 4 +#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4 + +/* Defines for host egress path - express and preemptible buffers + * - used by firmware to store express and preemptible packets + * to be transmitted to host core + * - used by both mac/switch modes + */ +#define PRUETH_SW_HOST_EXP_BUF_POOL_SIZE SZ_16K +#define PRUETH_SW_HOST_PRE_BUF_POOL_SIZE (SZ_16K - SZ_2K) +#define PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE PRUETH_SW_HOST_EXP_BUF_POOL_SIZE +#define PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + +/* Buffer used by firmware to temporarily store packet to be dropped */ +#define PRUETH_SW_DROP_PKT_BUF_SIZE SZ_2K +#define PRUETH_EMAC_DROP_PKT_BUF_SIZE PRUETH_SW_DROP_PKT_BUF_SIZE + +/* Total switch mode memory usage for buffers per slice */ +#define PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE \ + (PRUETH_SW_FWD_BUF_POOL_SIZE * PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE + \ + PRUETH_SW_LI_BUF_POOL_SIZE * PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE + \ + PRUETH_SW_HOST_EXP_BUF_POOL_SIZE + \ + PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + \ + PRUETH_SW_DROP_PKT_BUF_SIZE) + +/* Total switch mode memory usage for all buffers */ +#define PRUETH_SW_TOTAL_BUF_SIZE \ + (2 * PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE) + +/* Total mac mode memory usage for buffers per slice */ +#define PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE \ + (PRUETH_EMAC_LI_BUF_POOL_SIZE * \ + PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE + \ + PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE + \ + PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE + \ + PRUETH_EMAC_DROP_PKT_BUF_SIZE) + +/* Total mac mode memory usage for all buffers */ +#define PRUETH_EMAC_TOTAL_BUF_SIZE \ + (2 * PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE) + +/* Size of 1 bank of MSMC/OC_SRAM memory */ +#define MSMC_RAM_BANK_SIZE SZ_256K #define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1) diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 2aa812cbab92..2b973d6e2341 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -1814,10 +1814,15 @@ static int prueth_probe(struct platform_device *pdev) goto put_mem; } - msmc_ram_size = MSMC_RAM_SIZE; prueth->is_switchmode_supported = prueth->pdata.switch_mode; - if (prueth->is_switchmode_supported) - msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE; + if (prueth->pdata.banked_ms_ram) { + /* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */ + msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE); + } else { + msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE; + if (prueth->is_switchmode_supported) + msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE; + } /* NOTE: FW bug needs buffer base to be 64KB aligned */ prueth->msmcram.va = @@ -1985,7 +1990,8 @@ put_iep0: free_pool: gen_pool_free(prueth->sram_pool, - (unsigned long)prueth->msmcram.va, msmc_ram_size); + (unsigned long)prueth->msmcram.va, + prueth->msmcram.size); put_mem: pruss_release_mem_region(prueth->pruss, &prueth->shram); @@ -2037,8 +2043,8 @@ static void prueth_remove(struct platform_device *pdev) icss_iep_put(prueth->iep0); gen_pool_free(prueth->sram_pool, - (unsigned long)prueth->msmcram.va, - MSMC_RAM_SIZE); + (unsigned long)prueth->msmcram.va, + prueth->msmcram.size); pruss_release_mem_region(prueth->pruss, &prueth->shram); @@ -2055,12 +2061,14 @@ static const struct prueth_pdata am654_icssg_pdata = { .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, .quirk_10m_link_issue = 1, .switch_mode = 1, + .banked_ms_ram = 0, }; static const struct prueth_pdata am64x_icssg_pdata = { .fdqring_mode = K3_RINGACC_RING_MODE_RING, .quirk_10m_link_issue = 1, .switch_mode = 1, + .banked_ms_ram = 1, }; static const struct of_device_id prueth_dt_match[] = { diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index 9ca2e7fdefbd..ca8a22a4a5da 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -252,11 +252,13 @@ struct prueth_emac { * @fdqring_mode: Free desc queue mode * @quirk_10m_link_issue: 10M link detect errata * @switch_mode: switch firmware support + * @banked_ms_ram: banked memory support */ struct prueth_pdata { enum k3_ring_mode fdqring_mode; u32 quirk_10m_link_issue:1; u32 switch_mode:1; + u32 banked_ms_ram:1; }; struct icssg_firmwares { diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h index 490a9cc06fb0..7e053b8af3ec 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h +++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h @@ -180,6 +180,9 @@ /* Used to notify the FW of the current link speed */ #define PORT_LINK_SPEED_OFFSET 0x00A8 +/* 2k memory pointer reserved for default writes by PRU0*/ +#define DEFAULT_MSMC_Q_OFFSET 0x00AC + /* TAS gate mask for windows list0 */ #define TAS_GATE_MASK_LIST0 0x0100 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7493e5aa984c..895fb163d48e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -381,12 +381,12 @@ static void nvme_log_err_passthru(struct request *req) nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_STATUS_MORE ? "MORE " : "", nr->status & NVME_STATUS_DNR ? "DNR " : "", - nr->cmd->common.cdw10, - nr->cmd->common.cdw11, - nr->cmd->common.cdw12, - nr->cmd->common.cdw13, - nr->cmd->common.cdw14, - nr->cmd->common.cdw15); + le32_to_cpu(nr->cmd->common.cdw10), + le32_to_cpu(nr->cmd->common.cdw11), + le32_to_cpu(nr->cmd->common.cdw12), + le32_to_cpu(nr->cmd->common.cdw13), + le32_to_cpu(nr->cmd->common.cdw14), + le32_to_cpu(nr->cmd->common.cdw15)); } enum nvme_disposition { @@ -764,6 +764,10 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) return BLK_STS_RESOURCE; + + if (!(rq->rq_flags & RQF_DONTPREP)) + nvme_clear_nvme_request(rq); + return nvme_host_path_error(rq); } EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); @@ -3537,15 +3541,6 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) if (ret) goto out_free; } - - if (le16_to_cpu(id->awupf) != ctrl->subsys->awupf) { - dev_err_ratelimited(ctrl->device, - "inconsistent AWUPF, controller not added (%u/%u).\n", - le16_to_cpu(id->awupf), ctrl->subsys->awupf); - ret = -EINVAL; - goto out_free; - } - memcpy(ctrl->subsys->firmware_rev, id->fr, sizeof(ctrl->subsys->firmware_rev)); @@ -4077,7 +4072,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) return; } } - list_add(&ns->list, &ns->ctrl->namespaces); + list_add_rcu(&ns->list, &ns->ctrl->namespaces); } static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 688033b88d38..470bf37e5a63 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1928,10 +1928,10 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, struct sock *sk = queue->sock->sk; /* Restore the default callbacks before starting upcall */ - read_lock_bh(&sk->sk_callback_lock); + write_lock_bh(&sk->sk_callback_lock); sk->sk_user_data = NULL; sk->sk_data_ready = port->data_ready; - read_unlock_bh(&sk->sk_callback_lock); + write_unlock_bh(&sk->sk_callback_lock); if (!nvmet_tcp_try_peek_pdu(queue)) { if (!nvmet_tcp_tls_handshake(queue)) return; diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c index ca6dd71d8a2e..7807ec0e2d18 100644 --- a/drivers/nvmem/imx-ocotp-ele.c +++ b/drivers/nvmem/imx-ocotp-ele.c @@ -12,6 +12,7 @@ #include #include #include +#include /* ETH_ALEN */ enum fuse_type { FUSE_FSB = BIT(0), @@ -118,9 +119,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index, int i; /* Deal with some post processing of nvmem cell data */ - if (id && !strcmp(id, "mac-address")) + if (id && !strcmp(id, "mac-address")) { + bytes = min(bytes, ETH_ALEN); for (i = 0; i < bytes / 2; i++) swap(buf[i], buf[bytes - i - 1]); + } return 0; } diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c index 79dd4fda0329..7bf7656d4f96 100644 --- a/drivers/nvmem/imx-ocotp.c +++ b/drivers/nvmem/imx-ocotp.c @@ -23,6 +23,7 @@ #include #include #include +#include /* ETH_ALEN */ #define IMX_OCOTP_OFFSET_B0W0 0x400 /* Offset from base address of the * OTP Bank0 Word0 @@ -227,9 +228,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index, int i; /* Deal with some post processing of nvmem cell data */ - if (id && !strcmp(id, "mac-address")) + if (id && !strcmp(id, "mac-address")) { + bytes = min(bytes, ETH_ALEN); for (i = 0; i < bytes / 2; i++) swap(buf[i], buf[bytes - i - 1]); + } return 0; } diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c index 436426d4e8f9..8571aac56295 100644 --- a/drivers/nvmem/layouts/u-boot-env.c +++ b/drivers/nvmem/layouts/u-boot-env.c @@ -92,7 +92,7 @@ int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem, size_t crc32_data_offset; size_t crc32_data_len; size_t crc32_offset; - __le32 *crc32_addr; + uint32_t *crc32_addr; size_t data_offset; size_t data_len; size_t dev_size; @@ -143,8 +143,8 @@ int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem, goto err_kfree; } - crc32_addr = (__le32 *)(buf + crc32_offset); - crc32 = le32_to_cpu(*crc32_addr); + crc32_addr = (uint32_t *)(buf + crc32_offset); + crc32 = *crc32_addr; crc32_data_len = dev_size - crc32_data_offset; data_len = dev_size - data_offset; diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c index cc96be450360..28b3e93d31c0 100644 --- a/drivers/pci/controller/pci-hyperv-intf.c +++ b/drivers/pci/controller/pci-hyperv-intf.c @@ -14,6 +14,7 @@ #include #include #include +#include struct hyperv_pci_block_ops hvpci_block_ops; EXPORT_SYMBOL_GPL(hvpci_block_ops); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index ebe39218479a..d2b7e8ea710b 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -599,7 +599,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data) #define hv_msi_prepare pci_msi_prepare /** - * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current + * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current * affinity. * @data: Describes the IRQ * @@ -608,7 +608,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data) * is built out of this PCI bus's instance GUID and the function * number of the device. */ -static void hv_arch_irq_unmask(struct irq_data *data) +static void hv_irq_retarget_interrupt(struct irq_data *data) { struct msi_desc *msi_desc = irq_data_get_msi_desc(data); struct hv_retarget_device_interrupt *params; @@ -713,6 +713,20 @@ out: dev_err(&hbus->hdev->device, "%s() failed: %#llx", __func__, res); } + +static void hv_arch_irq_unmask(struct irq_data *data) +{ + if (hv_root_partition()) + /* + * In case of the nested root partition, the nested hypervisor + * is taking care of interrupt remapping and thus the + * MAP_DEVICE_INTERRUPT hypercall is required instead of + * RETARGET_INTERRUPT. + */ + (void)hv_map_msi_interrupt(data, NULL); + else + hv_irq_retarget_interrupt(data); +} #elif defined(CONFIG_ARM64) /* * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit @@ -4200,6 +4214,9 @@ static int __init init_hv_pci_drv(void) if (!hv_is_hyperv_initialized()) return -ENODEV; + if (hv_root_partition() && !hv_nested) + return -ENODEV; + ret = hv_pci_irqchip_init(); if (ret) return ret; diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 8e2daea81666..04a5a34e7a95 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -994,7 +994,8 @@ struct phy *phy_create(struct device *dev, struct device_node *node, } device_initialize(&phy->dev); - mutex_init(&phy->mutex); + lockdep_register_key(&phy->lockdep_key); + mutex_init_with_key(&phy->mutex, &phy->lockdep_key); phy->dev.class = &phy_class; phy->dev.parent = dev; @@ -1259,6 +1260,8 @@ static void phy_release(struct device *dev) dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); debugfs_remove_recursive(phy->debugfs); regulator_put(phy->pwr); + mutex_destroy(&phy->mutex); + lockdep_unregister_key(&phy->lockdep_key); ida_free(&phy_ida, phy->id); kfree(phy); } diff --git a/drivers/phy/phy-snps-eusb2.c b/drivers/phy/phy-snps-eusb2.c index b73a1d7e57b3..751b6d8ba2be 100644 --- a/drivers/phy/phy-snps-eusb2.c +++ b/drivers/phy/phy-snps-eusb2.c @@ -567,9 +567,11 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev) } } - if (IS_ERR_OR_NULL(phy->ref_clk)) - return dev_err_probe(dev, PTR_ERR(phy->ref_clk), + if (IS_ERR_OR_NULL(phy->ref_clk)) { + ret = phy->ref_clk ? PTR_ERR(phy->ref_clk) : -ENOENT; + return dev_err_probe(dev, ret, "failed to get ref clk\n"); + } num = ARRAY_SIZE(phy->vregs); for (i = 0; i < num; i++) diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c index 23a23f2d64e5..e818f6c3980e 100644 --- a/drivers/phy/tegra/xusb-tegra186.c +++ b/drivers/phy/tegra/xusb-tegra186.c @@ -648,14 +648,15 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl) udelay(100); } - if (padctl->soc->trk_hw_mode) { - value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); - value |= USB2_TRK_HW_MODE; + value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); + if (padctl->soc->trk_update_on_idle) value &= ~CYA_TRK_CODE_UPDATE_ON_IDLE; - padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); - } else { + if (padctl->soc->trk_hw_mode) + value |= USB2_TRK_HW_MODE; + padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); + + if (!padctl->soc->trk_hw_mode) clk_disable_unprepare(priv->usb2_trk_clk); - } } static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) @@ -782,13 +783,15 @@ static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl, } static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl, - bool status) + struct tegra_xusb_usb2_port *port, bool status) { - u32 value; + u32 value, id_override; + int err = 0; dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear"); value = padctl_readl(padctl, USB2_VBUS_ID); + id_override = value & ID_OVERRIDE(~0); if (status) { if (value & VBUS_OVERRIDE) { @@ -799,14 +802,34 @@ static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl, value = padctl_readl(padctl, USB2_VBUS_ID); } - value &= ~ID_OVERRIDE(~0); - value |= ID_OVERRIDE_GROUNDED; - } else { - value &= ~ID_OVERRIDE(~0); - value |= ID_OVERRIDE_FLOATING; - } + if (id_override != ID_OVERRIDE_GROUNDED) { + value &= ~ID_OVERRIDE(~0); + value |= ID_OVERRIDE_GROUNDED; + padctl_writel(padctl, value, USB2_VBUS_ID); - padctl_writel(padctl, value, USB2_VBUS_ID); + err = regulator_enable(port->supply); + if (err) { + dev_err(padctl->dev, "Failed to enable regulator: %d\n", err); + return err; + } + } + } else { + if (id_override == ID_OVERRIDE_GROUNDED) { + /* + * The regulator is disabled only when the role transitions + * from USB_ROLE_HOST to USB_ROLE_NONE. + */ + err = regulator_disable(port->supply); + if (err) { + dev_err(padctl->dev, "Failed to disable regulator: %d\n", err); + return err; + } + + value &= ~ID_OVERRIDE(~0); + value |= ID_OVERRIDE_FLOATING; + padctl_writel(padctl, value, USB2_VBUS_ID); + } + } return 0; } @@ -826,27 +849,20 @@ static int tegra186_utmi_phy_set_mode(struct phy *phy, enum phy_mode mode, if (mode == PHY_MODE_USB_OTG) { if (submode == USB_ROLE_HOST) { - tegra186_xusb_padctl_id_override(padctl, true); - - err = regulator_enable(port->supply); + err = tegra186_xusb_padctl_id_override(padctl, port, true); + if (err) + goto out; } else if (submode == USB_ROLE_DEVICE) { tegra186_xusb_padctl_vbus_override(padctl, true); } else if (submode == USB_ROLE_NONE) { - /* - * When port is peripheral only or role transitions to - * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not - * enabled. - */ - if (regulator_is_enabled(port->supply)) - regulator_disable(port->supply); - - tegra186_xusb_padctl_id_override(padctl, false); + err = tegra186_xusb_padctl_id_override(padctl, port, false); + if (err) + goto out; tegra186_xusb_padctl_vbus_override(padctl, false); } } - +out: mutex_unlock(&padctl->lock); - return err; } @@ -1710,7 +1726,8 @@ const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc = { .num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names), .supports_gen2 = true, .poll_trk_completed = true, - .trk_hw_mode = true, + .trk_hw_mode = false, + .trk_update_on_idle = true, .supports_lp_cfg_en = true, }; EXPORT_SYMBOL_GPL(tegra234_xusb_padctl_soc); diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h index 6e45d194c689..d2b5f9565132 100644 --- a/drivers/phy/tegra/xusb.h +++ b/drivers/phy/tegra/xusb.h @@ -434,6 +434,7 @@ struct tegra_xusb_padctl_soc { bool need_fake_usb3_port; bool poll_trk_completed; bool trk_hw_mode; + bool trk_update_on_idle; bool supports_lp_cfg_en; }; diff --git a/drivers/platform/arm64/huawei-gaokun-ec.c b/drivers/platform/arm64/huawei-gaokun-ec.c index 7e5aa7ca2403..7170f8eb76f7 100644 --- a/drivers/platform/arm64/huawei-gaokun-ec.c +++ b/drivers/platform/arm64/huawei-gaokun-ec.c @@ -662,6 +662,7 @@ static void gaokun_aux_release(struct device *dev) { struct auxiliary_device *adev = to_auxiliary_dev(dev); + of_node_put(dev->of_node); kfree(adev); } @@ -693,6 +694,7 @@ static int gaokun_aux_init(struct device *parent, const char *name, ret = auxiliary_device_init(adev); if (ret) { + of_node_put(adev->dev.of_node); kfree(adev); return ret; } diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c index a1c529f1ff1a..4776013e0764 100644 --- a/drivers/platform/mellanox/mlxbf-pmc.c +++ b/drivers/platform/mellanox/mlxbf-pmc.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #define MLXBF_PMC_WRITE_REG_32 0x82000009 @@ -1222,7 +1223,7 @@ static int mlxbf_pmc_get_event_num(const char *blk, const char *evt) return -ENODEV; } -/* Get the event number given the name */ +/* Get the event name given the number */ static char *mlxbf_pmc_get_event_name(const char *blk, u32 evt) { const struct mlxbf_pmc_events *events; @@ -1784,6 +1785,7 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev, attr, struct mlxbf_pmc_attribute, dev_attr); unsigned int blk_num, cnt_num; bool is_l3 = false; + char *evt_name; int evt_num; int err; @@ -1791,14 +1793,23 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev, cnt_num = attr_event->index; if (isalpha(buf[0])) { + /* Remove the trailing newline character if present */ + evt_name = kstrdup_and_replace(buf, '\n', '\0', GFP_KERNEL); + if (!evt_name) + return -ENOMEM; + evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num], - buf); + evt_name); + kfree(evt_name); if (evt_num < 0) return -EINVAL; } else { err = kstrtouint(buf, 0, &evt_num); if (err < 0) return err; + + if (!mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num)) + return -EINVAL; } if (strstr(pmc->block_name[blk_num], "l3cache")) @@ -1879,13 +1890,14 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev, { struct mlxbf_pmc_attribute *attr_enable = container_of( attr, struct mlxbf_pmc_attribute, dev_attr); - unsigned int en, blk_num; + unsigned int blk_num; u32 word; int err; + bool en; blk_num = attr_enable->nr; - err = kstrtouint(buf, 0, &en); + err = kstrtobool(buf, &en); if (err < 0) return err; @@ -1905,14 +1917,11 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev, MLXBF_PMC_CRSPACE_PERFMON_CTL(pmc->block[blk_num].counters), MLXBF_PMC_WRITE_REG_32, word); } else { - if (en && en != 1) - return -EINVAL; - err = mlxbf_pmc_config_l3_counters(blk_num, false, !!en); if (err) return err; - if (en == 1) { + if (en) { err = mlxbf_pmc_config_l3_counters(blk_num, true, false); if (err) return err; diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index abbc2644ff6d..bea87a85ae75 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -58,6 +58,8 @@ obj-$(CONFIG_X86_PLATFORM_DRIVERS_HP) += hp/ # Hewlett Packard Enterprise obj-$(CONFIG_UV_SYSFS) += uv_sysfs.o +obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o + # IBM Thinkpad and Lenovo obj-$(CONFIG_IBM_RTL) += ibm_rtl.o obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o @@ -128,7 +130,6 @@ obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o # Platform drivers -obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o obj-$(CONFIG_SERIAL_MULTI_INSTANTIATE) += serial-multi-instantiate.o obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o obj-$(CONFIG_WIRELESS_HOTKEY) += wireless-hotkey.o diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c index 20ec122a9fe0..b58cf74197f0 100644 --- a/drivers/platform/x86/dell/alienware-wmi-wmax.c +++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c @@ -89,6 +89,14 @@ static struct awcc_quirks generic_quirks = { static struct awcc_quirks empty_quirks; static const struct dmi_system_id awcc_dmi_table[] __initconst = { + { + .ident = "Alienware Area-51m", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware Area-51m"), + }, + .driver_data = &generic_quirks, + }, { .ident = "Alienware Area-51m R2", .matches = { @@ -97,6 +105,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = { }, .driver_data = &generic_quirks, }, + { + .ident = "Alienware m15 R5", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15 R5"), + }, + .driver_data = &generic_quirks, + }, { .ident = "Alienware m15 R7", .matches = { @@ -233,6 +249,7 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = { }, .driver_data = &g_series_quirks, }, + {} }; enum AWCC_GET_FAN_SENSORS_OPERATIONS { diff --git a/drivers/platform/x86/dell/dell-lis3lv02d.c b/drivers/platform/x86/dell/dell-lis3lv02d.c index 0791118dd6b7..732de5f556f8 100644 --- a/drivers/platform/x86/dell/dell-lis3lv02d.c +++ b/drivers/platform/x86/dell/dell-lis3lv02d.c @@ -49,6 +49,7 @@ static const struct dmi_system_id lis3lv02d_devices[] __initconst = { DELL_LIS3LV02D_DMI_ENTRY("Latitude E6330", 0x29), DELL_LIS3LV02D_DMI_ENTRY("Latitude E6430", 0x29), DELL_LIS3LV02D_DMI_ENTRY("Precision 3540", 0x29), + DELL_LIS3LV02D_DMI_ENTRY("Precision 3551", 0x29), DELL_LIS3LV02D_DMI_ENTRY("Precision M6800", 0x29), DELL_LIS3LV02D_DMI_ENTRY("Vostro V131", 0x1d), DELL_LIS3LV02D_DMI_ENTRY("Vostro 5568", 0x29), diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c index 67f3d7158403..62e3d060f038 100644 --- a/drivers/platform/x86/dell/dell-wmi-ddv.c +++ b/drivers/platform/x86/dell/dell-wmi-ddv.c @@ -689,9 +689,13 @@ static int dell_wmi_ddv_battery_translate(struct dell_wmi_ddv_data *data, dev_dbg(&data->wdev->dev, "Translation cache miss\n"); - /* Perform a translation between a ACPI battery and a battery index */ - - ret = power_supply_get_property(battery, POWER_SUPPLY_PROP_SERIAL_NUMBER, &val); + /* + * Perform a translation between a ACPI battery and a battery index. + * We have to use power_supply_get_property_direct() here because this + * function will also get called from the callbacks of the power supply + * extension. + */ + ret = power_supply_get_property_direct(battery, POWER_SUPPLY_PROP_SERIAL_NUMBER, &val); if (ret < 0) return ret; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index b5e4da6a6779..edb9d2fb02ec 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -1669,7 +1669,7 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv) priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT; priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get; priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set; - priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED; + priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN; err = led_classdev_register(&priv->platform_device->dev, &priv->kbd_bl.led); if (err) @@ -1728,7 +1728,7 @@ static int ideapad_fn_lock_led_init(struct ideapad_private *priv) priv->fn_lock.led.name = "platform::" LED_FUNCTION_FNLOCK; priv->fn_lock.led.brightness_get = ideapad_fn_lock_led_cdev_get; priv->fn_lock.led.brightness_set_blocking = ideapad_fn_lock_led_cdev_set; - priv->fn_lock.led.flags = LED_BRIGHT_HW_CHANGED; + priv->fn_lock.led.flags = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN; err = led_classdev_register(&priv->platform_device->dev, &priv->fn_lock.led); if (err) diff --git a/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c b/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c index 89153afd7015..7b9bad1978ff 100644 --- a/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c +++ b/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c @@ -122,26 +122,35 @@ static int lenovo_super_hotkey_wmi_led_init(enum mute_led_type led_type, struct return -EIO; union acpi_object *obj __free(kfree) = output.pointer; - if (obj && obj->type == ACPI_TYPE_INTEGER) - led_version = obj->integer.value; - else + if (!obj || obj->type != ACPI_TYPE_INTEGER) return -EIO; - wpriv->cdev[led_type].max_brightness = LED_ON; - wpriv->cdev[led_type].flags = LED_CORE_SUSPENDRESUME; + led_version = obj->integer.value; + + /* + * Output parameters define: 0 means mute LED is not supported, Non-zero means + * mute LED can be supported. + */ + if (led_version == 0) + return 0; + switch (led_type) { case MIC_MUTE: - if (led_version != WMI_LUD_SUPPORT_MICMUTE_LED_VER) - return -EIO; + if (led_version != WMI_LUD_SUPPORT_MICMUTE_LED_VER) { + pr_warn("The MIC_MUTE LED of this device isn't supported.\n"); + return 0; + } wpriv->cdev[led_type].name = "platform::micmute"; wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_micmute_led_set; wpriv->cdev[led_type].default_trigger = "audio-micmute"; break; case AUDIO_MUTE: - if (led_version != WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER) - return -EIO; + if (led_version != WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER) { + pr_warn("The AUDIO_MUTE LED of this device isn't supported.\n"); + return 0; + } wpriv->cdev[led_type].name = "platform::mute"; wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_audiomute_led_set; @@ -152,6 +161,9 @@ static int lenovo_super_hotkey_wmi_led_init(enum mute_led_type led_type, struct return -EINVAL; } + wpriv->cdev[led_type].max_brightness = LED_ON; + wpriv->cdev[led_type].flags = LED_CORE_SUSPENDRESUME; + err = devm_led_classdev_register(dev, &wpriv->cdev[led_type]); if (err < 0) { dev_err(dev, "Could not register mute LED %d : %d\n", led_type, err); diff --git a/drivers/pmdomain/governor.c b/drivers/pmdomain/governor.c index c1e148657c87..39359811a930 100644 --- a/drivers/pmdomain/governor.c +++ b/drivers/pmdomain/governor.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -349,6 +350,8 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) struct cpuidle_device *dev; ktime_t domain_wakeup, next_hrtimer; ktime_t now = ktime_get(); + struct device *cpu_dev; + s64 cpu_constraint, global_constraint; s64 idle_duration_ns; int cpu, i; @@ -359,6 +362,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN)) return true; + global_constraint = cpu_latency_qos_limit(); /* * Find the next wakeup for any of the online CPUs within the PM domain * and its subdomains. Note, we only need the genpd->cpus, as it already @@ -372,8 +376,16 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) if (ktime_before(next_hrtimer, domain_wakeup)) domain_wakeup = next_hrtimer; } + + cpu_dev = get_cpu_device(cpu); + if (cpu_dev) { + cpu_constraint = dev_pm_qos_raw_resume_latency(cpu_dev); + if (cpu_constraint < global_constraint) + global_constraint = cpu_constraint; + } } + global_constraint *= NSEC_PER_USEC; /* The minimum idle duration is from now - until the next wakeup. */ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now)); if (idle_duration_ns <= 0) @@ -389,8 +401,10 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) */ i = genpd->state_idx; do { - if (idle_duration_ns >= (genpd->states[i].residency_ns + - genpd->states[i].power_off_latency_ns)) { + if ((idle_duration_ns >= (genpd->states[i].residency_ns + + genpd->states[i].power_off_latency_ns)) && + (global_constraint >= (genpd->states[i].power_on_latency_ns + + genpd->states[i].power_off_latency_ns))) { genpd->state_idx = i; genpd->gd->last_enter = now; genpd->gd->reflect_residency = true; diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 33a5bfce4604..cfb0e3e0d4aa 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -1235,9 +1235,8 @@ bool power_supply_has_property(struct power_supply *psy, return false; } -int power_supply_get_property(struct power_supply *psy, - enum power_supply_property psp, - union power_supply_propval *val) +static int __power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, + union power_supply_propval *val, bool use_extensions) { struct power_supply_ext_registration *reg; @@ -1247,10 +1246,14 @@ int power_supply_get_property(struct power_supply *psy, return -ENODEV; } - scoped_guard(rwsem_read, &psy->extensions_sem) { - power_supply_for_each_extension(reg, psy) { - if (power_supply_ext_has_property(reg->ext, psp)) + if (use_extensions) { + scoped_guard(rwsem_read, &psy->extensions_sem) { + power_supply_for_each_extension(reg, psy) { + if (!power_supply_ext_has_property(reg->ext, psp)) + continue; + return reg->ext->get_property(psy, reg->ext, reg->data, psp, val); + } } } @@ -1261,20 +1264,49 @@ int power_supply_get_property(struct power_supply *psy, else return -EINVAL; } + +int power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, + union power_supply_propval *val) +{ + return __power_supply_get_property(psy, psp, val, true); +} EXPORT_SYMBOL_GPL(power_supply_get_property); -int power_supply_set_property(struct power_supply *psy, - enum power_supply_property psp, - const union power_supply_propval *val) +/** + * power_supply_get_property_direct - Read a power supply property without checking for extensions + * @psy: The power supply + * @psp: The power supply property to read + * @val: The resulting value of the power supply property + * + * Read a power supply property without taking into account any power supply extensions registered + * on the given power supply. This is mostly useful for power supply extensions that want to access + * their own power supply as using power_supply_get_property() directly will result in a potential + * deadlock. + * + * Return: 0 on success or negative error code on failure. + */ +int power_supply_get_property_direct(struct power_supply *psy, enum power_supply_property psp, + union power_supply_propval *val) +{ + return __power_supply_get_property(psy, psp, val, false); +} +EXPORT_SYMBOL_GPL(power_supply_get_property_direct); + + +static int __power_supply_set_property(struct power_supply *psy, enum power_supply_property psp, + const union power_supply_propval *val, bool use_extensions) { struct power_supply_ext_registration *reg; if (atomic_read(&psy->use_cnt) <= 0) return -ENODEV; - scoped_guard(rwsem_read, &psy->extensions_sem) { - power_supply_for_each_extension(reg, psy) { - if (power_supply_ext_has_property(reg->ext, psp)) { + if (use_extensions) { + scoped_guard(rwsem_read, &psy->extensions_sem) { + power_supply_for_each_extension(reg, psy) { + if (!power_supply_ext_has_property(reg->ext, psp)) + continue; + if (reg->ext->set_property) return reg->ext->set_property(psy, reg->ext, reg->data, psp, val); @@ -1289,8 +1321,34 @@ int power_supply_set_property(struct power_supply *psy, return psy->desc->set_property(psy, psp, val); } + +int power_supply_set_property(struct power_supply *psy, enum power_supply_property psp, + const union power_supply_propval *val) +{ + return __power_supply_set_property(psy, psp, val, true); +} EXPORT_SYMBOL_GPL(power_supply_set_property); +/** + * power_supply_set_property_direct - Write a power supply property without checking for extensions + * @psy: The power supply + * @psp: The power supply property to write + * @val: The value to write to the power supply property + * + * Write a power supply property without taking into account any power supply extensions registered + * on the given power supply. This is mostly useful for power supply extensions that want to access + * their own power supply as using power_supply_set_property() directly will result in a potential + * deadlock. + * + * Return: 0 on success or negative error code on failure. + */ +int power_supply_set_property_direct(struct power_supply *psy, enum power_supply_property psp, + const union power_supply_propval *val) +{ + return __power_supply_set_property(psy, psp, val, false); +} +EXPORT_SYMBOL_GPL(power_supply_set_property_direct); + int power_supply_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c index 5bfdfcf6013b..2c0e9ad820c0 100644 --- a/drivers/power/supply/test_power.c +++ b/drivers/power/supply/test_power.c @@ -259,6 +259,7 @@ static const struct power_supply_config test_power_configs[] = { static int test_power_battery_extmanufacture_year = 1234; static int test_power_battery_exttemp_max = 1000; static const enum power_supply_property test_power_battery_extprops[] = { + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, POWER_SUPPLY_PROP_MANUFACTURE_YEAR, POWER_SUPPLY_PROP_TEMP_MAX, }; @@ -270,6 +271,9 @@ static int test_power_battery_extget_property(struct power_supply *psy, union power_supply_propval *val) { switch (psp) { + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: + return power_supply_get_property_direct(psy, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, + val); case POWER_SUPPLY_PROP_MANUFACTURE_YEAR: val->intval = test_power_battery_extmanufacture_year; break; diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index b7f15f303ea2..967dc4f9eea8 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -130,6 +130,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd) struct ism_req_hdr *req = cmd; struct ism_resp_hdr *resp = cmd; + spin_lock(&ism->cmd_lock); __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req)); __ism_write_cmd(ism, req, 0, sizeof(*req)); @@ -143,6 +144,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd) } __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp)); out: + spin_unlock(&ism->cmd_lock); return resp->ret; } @@ -606,6 +608,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; spin_lock_init(&ism->lock); + spin_lock_init(&ism->cmd_lock); dev_set_drvdata(&pdev->dev, ism); ism->pdev = pdev; ism->dev.parent = &pdev->dev; diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c index a12c68b93b1c..7a671a786197 100644 --- a/drivers/soundwire/amd_manager.c +++ b/drivers/soundwire/amd_manager.c @@ -238,7 +238,7 @@ static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lo if (sts & AMD_SDW_IMM_RES_VALID) { dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance); - writel(0x00, amd_manager->mmio + ACP_SW_IMM_CMD_STS); + writel(AMD_SDW_IMM_RES_VALID, amd_manager->mmio + ACP_SW_IMM_CMD_STS); } writel(upper_data, amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD); writel(lower_data, amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD); @@ -1209,6 +1209,7 @@ static int __maybe_unused amd_suspend(struct device *dev) } if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) { + cancel_work_sync(&amd_manager->amd_sdw_work); amd_sdw_wake_enable(amd_manager, false); if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) { ret = amd_sdw_host_wake_enable(amd_manager, false); @@ -1219,6 +1220,7 @@ static int __maybe_unused amd_suspend(struct device *dev) if (ret) return ret; } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) { + cancel_work_sync(&amd_manager->amd_sdw_work); amd_sdw_wake_enable(amd_manager, false); if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) { ret = amd_sdw_host_wake_enable(amd_manager, false); diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index 295a46dc2be7..0f45e3404756 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -156,7 +156,6 @@ struct qcom_swrm_port_config { u8 word_length; u8 blk_group_count; u8 lane_control; - u8 ch_mask; }; /* @@ -1049,13 +1048,9 @@ static int qcom_swrm_port_enable(struct sdw_bus *bus, { u32 reg = SWRM_DP_PORT_CTRL_BANK(enable_ch->port_num, bank); struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus); - struct qcom_swrm_port_config *pcfg; u32 val; - pcfg = &ctrl->pconfig[enable_ch->port_num]; ctrl->reg_read(ctrl, reg, &val); - if (pcfg->ch_mask != SWR_INVALID_PARAM && pcfg->ch_mask != 0) - enable_ch->ch_mask = pcfg->ch_mask; if (enable_ch->enable) val |= (enable_ch->ch_mask << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT); @@ -1275,26 +1270,6 @@ static void *qcom_swrm_get_sdw_stream(struct snd_soc_dai *dai, int direction) return ctrl->sruntime[dai->id]; } -static int qcom_swrm_set_channel_map(struct snd_soc_dai *dai, - unsigned int tx_num, const unsigned int *tx_slot, - unsigned int rx_num, const unsigned int *rx_slot) -{ - struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev); - int i; - - if (tx_slot) { - for (i = 0; i < tx_num; i++) - ctrl->pconfig[i].ch_mask = tx_slot[i]; - } - - if (rx_slot) { - for (i = 0; i < rx_num; i++) - ctrl->pconfig[i].ch_mask = rx_slot[i]; - } - - return 0; -} - static int qcom_swrm_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { @@ -1331,7 +1306,6 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = { .shutdown = qcom_swrm_shutdown, .set_stream = qcom_swrm_set_sdw_stream, .get_stream = qcom_swrm_get_sdw_stream, - .set_channel_map = qcom_swrm_set_channel_map, }; static const struct snd_soc_component_driver qcom_swrm_dai_component = { diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 1bc0fdbb1bd7..0ffa3f9f2870 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -4138,10 +4138,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) xfer->tx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_DUAL) && - !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) + !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_QUAD) && - !(spi->mode & SPI_TX_QUAD)) + !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) + return -EINVAL; + if ((xfer->tx_nbits == SPI_NBITS_OCTAL) && + !(spi->mode & SPI_TX_OCTAL)) return -EINVAL; } /* Check transfer rx_nbits */ @@ -4154,10 +4157,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) xfer->rx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_DUAL) && - !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) + !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_QUAD) && - !(spi->mode & SPI_RX_QUAD)) + !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL))) + return -EINVAL; + if ((xfer->rx_nbits == SPI_NBITS_OCTAL) && + !(spi->mode & SPI_RX_OCTAL)) return -EINVAL; } diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 5dbf8d53db09..721b15b7e13b 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -97,6 +97,13 @@ struct vchiq_arm_state { * tracked separately with the state. */ int peer_use_count; + + /* + * Flag to indicate that the first vchiq connect has made it through. + * This means that both sides should be fully ready, and we should + * be able to suspend after this point. + */ + int first_connect; }; static int @@ -273,6 +280,29 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state return 0; } +int +vchiq_platform_init_state(struct vchiq_state *state) +{ + struct vchiq_arm_state *platform_state; + + platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL); + if (!platform_state) + return -ENOMEM; + + rwlock_init(&platform_state->susp_res_lock); + + init_completion(&platform_state->ka_evt); + atomic_set(&platform_state->ka_use_count, 0); + atomic_set(&platform_state->ka_use_ack_count, 0); + atomic_set(&platform_state->ka_release_count, 0); + + platform_state->state = state; + + state->platform_state = (struct opaque_platform_state *)platform_state; + + return 0; +} + static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state) { return (struct vchiq_arm_state *)state->platform_state; @@ -363,8 +393,7 @@ int vchiq_shutdown(struct vchiq_instance *instance) struct vchiq_state *state = instance->state; int ret = 0; - if (mutex_lock_killable(&state->mutex)) - return -EAGAIN; + mutex_lock(&state->mutex); /* Remove all services */ vchiq_shutdown_internal(state, instance); @@ -981,39 +1010,6 @@ exit: return 0; } -int -vchiq_platform_init_state(struct vchiq_state *state) -{ - struct vchiq_arm_state *platform_state; - char threadname[16]; - - platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL); - if (!platform_state) - return -ENOMEM; - - snprintf(threadname, sizeof(threadname), "vchiq-keep/%d", - state->id); - platform_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func, - (void *)state, threadname); - if (IS_ERR(platform_state->ka_thread)) { - dev_err(state->dev, "couldn't create thread %s\n", threadname); - return PTR_ERR(platform_state->ka_thread); - } - - rwlock_init(&platform_state->susp_res_lock); - - init_completion(&platform_state->ka_evt); - atomic_set(&platform_state->ka_use_count, 0); - atomic_set(&platform_state->ka_use_ack_count, 0); - atomic_set(&platform_state->ka_release_count, 0); - - platform_state->state = state; - - state->platform_state = (struct opaque_platform_state *)platform_state; - - return 0; -} - int vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, enum USE_TYPE_E use_type) @@ -1329,19 +1325,37 @@ out: return ret; } -void vchiq_platform_connected(struct vchiq_state *state) -{ - struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); - - wake_up_process(arm_state->ka_thread); -} - void vchiq_platform_conn_state_changed(struct vchiq_state *state, enum vchiq_connstate oldstate, enum vchiq_connstate newstate) { + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + char threadname[16]; + dev_dbg(state->dev, "suspend: %d: %s->%s\n", state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate)); + if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED) + return; + + write_lock_bh(&arm_state->susp_res_lock); + if (arm_state->first_connect) { + write_unlock_bh(&arm_state->susp_res_lock); + return; + } + + arm_state->first_connect = 1; + write_unlock_bh(&arm_state->susp_res_lock); + snprintf(threadname, sizeof(threadname), "vchiq-keep/%d", + state->id); + arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func, + (void *)state, + threadname); + if (IS_ERR(arm_state->ka_thread)) { + dev_err(state->dev, "suspend: Couldn't create thread %s\n", + threadname); + } else { + wake_up_process(arm_state->ka_thread); + } } static const struct of_device_id vchiq_of_match[] = { diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index e7b0c800a205..e2cac0898b8f 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -3343,7 +3343,6 @@ vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instanc return -EAGAIN; vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); - vchiq_platform_connected(state); complete(&state->connect); } diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h index 3b5c0618e567..9b4e766990a4 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h @@ -575,8 +575,6 @@ int vchiq_send_remote_use(struct vchiq_state *state); int vchiq_send_remote_use_active(struct vchiq_state *state); -void vchiq_platform_connected(struct vchiq_state *state); - void vchiq_platform_conn_state_changed(struct vchiq_state *state, enum vchiq_connstate oldstate, enum vchiq_connstate newstate); diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 28febb95f8fa..b6c58a7e7b6a 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -1450,7 +1450,7 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, return ret; data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; - data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; + data[1] &= ~ADP_DP_CS_1_AUX_TX_HOPID_MASK; data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & @@ -3437,7 +3437,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw) } } -static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) +static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime) { if (flags) tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); @@ -3445,7 +3445,7 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) tb_sw_dbg(sw, "disabling wakeup\n"); if (tb_switch_is_usb4(sw)) - return usb4_switch_set_wake(sw, flags); + return usb4_switch_set_wake(sw, flags, runtime); return tb_lc_set_wake(sw, flags); } @@ -3521,7 +3521,7 @@ int tb_switch_resume(struct tb_switch *sw, bool runtime) tb_switch_check_wakes(sw); /* Disable wakes */ - tb_switch_set_wake(sw, 0); + tb_switch_set_wake(sw, 0, true); err = tb_switch_tmu_init(sw); if (err) @@ -3603,7 +3603,7 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime) flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; } - tb_switch_set_wake(sw, flags); + tb_switch_set_wake(sw, flags, runtime); if (tb_switch_is_usb4(sw)) usb4_switch_set_sleep(sw); diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 87afd5a7c504..f503bad86413 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -1317,7 +1317,7 @@ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size); bool usb4_switch_lane_bonding_possible(struct tb_switch *sw); -int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags); +int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime); int usb4_switch_set_sleep(struct tb_switch *sw); int usb4_switch_nvm_sector_size(struct tb_switch *sw); int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index fce3c0f2354a..fdae76c8f728 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -403,12 +403,12 @@ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) * usb4_switch_set_wake() - Enabled/disable wake * @sw: USB4 router * @flags: Wakeup flags (%0 to disable) + * @runtime: Wake is being programmed during system runtime * * Enables/disables router to wake up from sleep. */ -int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) +int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime) { - struct usb4_port *usb4; struct tb_port *port; u64 route = tb_route(sw); u32 val; @@ -438,13 +438,11 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) val |= PORT_CS_19_WOU4; } else { bool configured = val & PORT_CS_19_PC; - usb4 = port->usb4; + bool wakeup = runtime || device_may_wakeup(&port->usb4->dev); - if (((flags & TB_WAKE_ON_CONNECT) && - device_may_wakeup(&usb4->dev)) && !configured) + if ((flags & TB_WAKE_ON_CONNECT) && wakeup && !configured) val |= PORT_CS_19_WOC; - if (((flags & TB_WAKE_ON_DISCONNECT) && - device_may_wakeup(&usb4->dev)) && configured) + if ((flags & TB_WAKE_ON_DISCONNECT) && wakeup && configured) val |= PORT_CS_19_WOD; if ((flags & TB_WAKE_ON_USB4) && configured) val |= PORT_CS_19_WOU4; diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 508e8c6f01d4..884fefbfd5a1 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -954,7 +954,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) __func__); return 0; } - dma_sync_sg_for_device(port->dev, priv->sg_tx_p, nent, DMA_TO_DEVICE); + dma_sync_sg_for_device(port->dev, priv->sg_tx_p, num, DMA_TO_DEVICE); priv->desc_tx = desc; desc->callback = pch_dma_tx_complete; desc->callback_param = priv; diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c index cb3b127b06b6..22749ab0428a 100644 --- a/drivers/tty/serial/serial_base_bus.c +++ b/drivers/tty/serial/serial_base_bus.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -93,6 +94,7 @@ static void serial_base_ctrl_release(struct device *dev) { struct serial_ctrl_device *ctrl_dev = to_serial_base_ctrl_device(dev); + of_node_put(dev->of_node); kfree(ctrl_dev); } @@ -140,6 +142,7 @@ static void serial_base_port_release(struct device *dev) { struct serial_port_device *port_dev = to_serial_base_port_device(dev); + of_node_put(dev->of_node); kfree(port_dev); } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 3e1215f7a9a0..256fe8c86828 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -5751,6 +5751,7 @@ static void port_event(struct usb_hub *hub, int port1) struct usb_device *hdev = hub->hdev; u16 portstatus, portchange; int i = 0; + int err; connect_change = test_bit(port1, hub->change_bits); clear_bit(port1, hub->event_bits); @@ -5847,8 +5848,11 @@ static void port_event(struct usb_hub *hub, int port1) } else if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION) || udev->state == USB_STATE_NOTATTACHED) { dev_dbg(&port_dev->dev, "do warm reset, port only\n"); - if (hub_port_reset(hub, port1, NULL, - HUB_BH_RESET_TIME, true) < 0) + err = hub_port_reset(hub, port1, NULL, + HUB_BH_RESET_TIME, true); + if (!udev && err == -ENOTCONN) + connect_change = 0; + else if (err < 0) hub_port_disable(hub, port1, 1); } else { dev_dbg(&port_dev->dev, "do warm reset, full device\n"); diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index d5b622f78cf3..0637bfbc054e 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -5389,20 +5389,34 @@ int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg) if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) { /* ULPI interface */ gpwrdn |= GPWRDN_ULPI_LATCH_EN_DURING_HIB_ENTRY; + dwc2_writel(hsotg, gpwrdn, GPWRDN); + udelay(10); + + /* Suspend the Phy Clock */ + pcgcctl = dwc2_readl(hsotg, PCGCTL); + pcgcctl |= PCGCTL_STOPPCLK; + dwc2_writel(hsotg, pcgcctl, PCGCTL); + udelay(10); + + gpwrdn = dwc2_readl(hsotg, GPWRDN); + gpwrdn |= GPWRDN_PMUACTV; + dwc2_writel(hsotg, gpwrdn, GPWRDN); + udelay(10); + } else { + /* UTMI+ Interface */ + dwc2_writel(hsotg, gpwrdn, GPWRDN); + udelay(10); + + gpwrdn = dwc2_readl(hsotg, GPWRDN); + gpwrdn |= GPWRDN_PMUACTV; + dwc2_writel(hsotg, gpwrdn, GPWRDN); + udelay(10); + + pcgcctl = dwc2_readl(hsotg, PCGCTL); + pcgcctl |= PCGCTL_STOPPCLK; + dwc2_writel(hsotg, pcgcctl, PCGCTL); + udelay(10); } - dwc2_writel(hsotg, gpwrdn, GPWRDN); - udelay(10); - - /* Suspend the Phy Clock */ - pcgcctl = dwc2_readl(hsotg, PCGCTL); - pcgcctl |= PCGCTL_STOPPCLK; - dwc2_writel(hsotg, pcgcctl, PCGCTL); - udelay(10); - - gpwrdn = dwc2_readl(hsotg, GPWRDN); - gpwrdn |= GPWRDN_PMUACTV; - dwc2_writel(hsotg, gpwrdn, GPWRDN); - udelay(10); /* Set flag to indicate that we are in hibernation */ hsotg->hibernated = 1; diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 7334de85ad10..ca7e1c02773a 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -680,12 +680,12 @@ static int dwc3_qcom_probe(struct platform_device *pdev) ret = reset_control_deassert(qcom->resets); if (ret) { dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret); - goto reset_assert; + return ret; } ret = clk_bulk_prepare_enable(qcom->num_clocks, qcom->clks); if (ret < 0) - goto reset_assert; + return ret; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { @@ -755,8 +755,6 @@ remove_core: dwc3_core_remove(&qcom->dwc); clk_disable: clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks); -reset_assert: - reset_control_assert(qcom->resets); return ret; } @@ -771,7 +769,6 @@ static void dwc3_qcom_remove(struct platform_device *pdev) clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks); dwc3_qcom_interconnect_exit(qcom); - reset_control_assert(qcom->resets); } static int dwc3_qcom_pm_suspend(struct device *dev) diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index fba2a56dae97..f94ea196ce54 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1065,6 +1065,8 @@ static ssize_t webusb_landingPage_store(struct config_item *item, const char *pa unsigned int bytes_to_strip = 0; int l = len; + if (!len) + return len; if (page[l - 1] == '\n') { --l; ++bytes_to_strip; @@ -1188,6 +1190,8 @@ static ssize_t os_desc_qw_sign_store(struct config_item *item, const char *page, struct gadget_info *gi = os_desc_item_to_gadget_info(item); int res, l; + if (!len) + return len; l = min_t(int, len, OS_STRING_QW_SIGN_LEN >> 1); if (page[l - 1] == '\n') --l; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 6869c58367f2..caf4d4cd4b75 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1913,6 +1913,7 @@ static int musb_gadget_stop(struct usb_gadget *g) * gadget driver here and have everything work; * that currently misbehaves. */ + usb_gadget_set_state(g, USB_STATE_NOTATTACHED); /* Force check of devctl register for PM runtime */ pm_runtime_mark_last_busy(musb->controller); @@ -2019,6 +2020,7 @@ void musb_g_disconnect(struct musb *musb) case OTG_STATE_B_PERIPHERAL: case OTG_STATE_B_IDLE: musb_set_state(musb, OTG_STATE_B_IDLE); + usb_gadget_set_state(&musb->g, USB_STATE_NOTATTACHED); break; case OTG_STATE_B_SRP_INIT: break; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 6ac7a0a5cf07..abfcfca3f971 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -803,6 +803,8 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, + { USB_DEVICE(FTDI_NDI_VID, FTDI_NDI_EMGUIDE_GEMINI_PID), + .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, { USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 9acb6f837327..4cc1fae8acb9 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -204,6 +204,9 @@ #define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */ #define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ +#define FTDI_NDI_VID 0x23F2 +#define FTDI_NDI_EMGUIDE_GEMINI_PID 0x0003 /* NDI Emguide Gemini */ + /* * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 27879cc57536..147ca50c94be 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1415,6 +1415,9 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(5) }, { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x30), /* Telit FE910C04 (ECM) */ + .driver_info = NCTRL(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */ .driver_info = NCTRL(6) }, { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) }, @@ -2343,6 +2346,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */ .driver_info = RSVD(5) | RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe167, 0xff), /* Foxconn T99W640 MBIM */ + .driver_info = RSVD(3) }, { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */ diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index b375ad610acd..b58525ec7b4d 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -511,7 +511,8 @@ again: bch2_dev_usage_read_fast(ca, &req->usage); avail = dev_buckets_free(ca, req->usage, req->watermark); - if (req->usage.buckets[BCH_DATA_need_discard] > avail) + if (req->usage.buckets[BCH_DATA_need_discard] > + min(avail, ca->mi.nbuckets >> 7)) bch2_dev_do_discards(ca); if (req->usage.buckets[BCH_DATA_need_gc_gens] > avail) diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index a4cc72986e36..590cd29f3e86 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -1295,9 +1295,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted); - if (updated_range) - bch2_btree_node_drop_keys_outside_node(b); - i = &b->data->keys; for (k = i->start; k != vstruct_last(i);) { struct bkey tmp; @@ -1335,6 +1332,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, btree_node_reset_sib_u64s(b); + if (updated_range) + bch2_btree_node_drop_keys_outside_node(b); + /* * XXX: * diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c index a18d0f78704d..28875c5c86ad 100644 --- a/fs/bcachefs/dirent.c +++ b/fs/bcachefs/dirent.c @@ -13,6 +13,7 @@ #include +#ifdef CONFIG_UNICODE int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info, const struct qstr *str, struct qstr *out_cf) { @@ -33,6 +34,7 @@ int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info, *out_cf = (struct qstr) QSTR_INIT(buf, ret); return 0; } +#endif static unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d) { @@ -254,6 +256,7 @@ int bch2_dirent_init_name(struct bch_fs *c, if (!bch2_fs_casefold_enabled(c)) return -EOPNOTSUPP; +#ifdef CONFIG_UNICODE memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len); char *cf_out = &dirent->v.d_cf_name_block.d_names[name->len]; @@ -279,6 +282,7 @@ int bch2_dirent_init_name(struct bch_fs *c, dirent->v.d_cf_name_block.d_cf_name_len = cpu_to_le16(cf_len); EBUG_ON(bch2_dirent_get_casefold_name(dirent_i_to_s_c(dirent)).len != cf_len); +#endif } unsigned u64s = dirent_val_u64s(name->len, cf_len); diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h index 1e17199cc5c7..0417608c18d5 100644 --- a/fs/bcachefs/dirent.h +++ b/fs/bcachefs/dirent.h @@ -23,8 +23,16 @@ struct bch_fs; struct bch_hash_info; struct bch_inode_info; +#ifdef CONFIG_UNICODE int bch2_casefold(struct btree_trans *, const struct bch_hash_info *, const struct qstr *, struct qstr *); +#else +static inline int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info, + const struct qstr *str, struct qstr *out_cf) +{ + return -EOPNOTSUPP; +} +#endif static inline int bch2_maybe_casefold(struct btree_trans *trans, const struct bch_hash_info *info, diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index cd184b219a65..e0874ad9a6cf 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -166,6 +166,7 @@ static noinline void promote_free(struct bch_read_bio *rbio) BUG_ON(ret); async_object_list_del(c, promote, op->list_idx); + async_object_list_del(c, rbio, rbio->list_idx); bch2_data_update_exit(&op->write); @@ -456,6 +457,10 @@ static void bch2_rbio_done(struct bch_read_bio *rbio) if (rbio->start_time) bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read], rbio->start_time); +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS + if (rbio->list_idx) + async_object_list_del(rbio->c, rbio, rbio->list_idx); +#endif bio_endio(&rbio->bio); } diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index dd3f3434c1b0..9e028dbcc3d0 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -1767,6 +1767,7 @@ static CLOSURE_CALLBACK(journal_write_done) closure_wake_up(&c->freelist_wait); bch2_reset_alloc_cursors(c); + do_discards = true; } j->seq_ondisk = seq; diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 27e68d470ad0..5e6de91a8763 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -71,7 +71,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, if (ret) return ret; - struct bch_dev *ca = bch2_dev_tryget(c, k.k->p.inode); + struct bch_dev *ca = bch2_dev_bucket_tryget(c, k.k->p); if (!ca) goto out; diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index c08e4a66ac07..3e0576d9db1d 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -347,8 +347,6 @@ int __cachefiles_write(struct cachefiles_object *object, default: ki->was_async = false; cachefiles_write_complete(&ki->iocb, ret); - if (ret > 0) - ret = 0; break; } diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index d9bc67176128..a7ed86fa98bb 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -83,10 +83,8 @@ static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb, trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len); ret = __cachefiles_write(object, file, pos, iter, NULL, NULL); - if (!ret) { - ret = len; + if (ret > 0) kiocb->ki_pos += ret; - } out: fput(file); diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index c900d98bf494..284d6dbba2ec 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -390,10 +390,16 @@ static int efivarfs_reconfigure(struct fs_context *fc) return 0; } +static void efivarfs_free(struct fs_context *fc) +{ + kfree(fc->s_fs_info); +} + static const struct fs_context_operations efivarfs_context_ops = { .get_tree = efivarfs_get_tree, .parse_param = efivarfs_parse_param, .reconfigure = efivarfs_reconfigure, + .free = efivarfs_free, }; static int efivarfs_check_missing(efi_char16_t *name16, efi_guid_t vendor, diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 3729391a18f3..fb4519158f3a 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -71,6 +71,9 @@ static void iomap_set_range_uptodate(struct folio *folio, size_t off, unsigned long flags; bool uptodate = true; + if (folio_test_uptodate(folio)) + return; + if (ifs) { spin_lock_irqsave(&ifs->state_lock, flags); uptodate = ifs_set_range_uptodate(folio, ifs, off, len); diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index d5da9817df9b..33e6a620c103 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -1440,9 +1440,16 @@ static int isofs_read_inode(struct inode *inode, int relocated) inode->i_op = &page_symlink_inode_operations; inode_nohighmem(inode); inode->i_data.a_ops = &isofs_symlink_aops; - } else + } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || + S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { /* XXX - parse_rock_ridge_inode() had already set i_rdev. */ init_special_inode(inode, inode->i_mode, inode->i_rdev); + } else { + printk(KERN_DEBUG "ISOFS: Invalid file type 0%04o for inode %lu.\n", + inode->i_mode, inode->i_ino); + ret = -EIO; + goto fail; + } ret = 0; out: diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c index 5bbe906a551d..8097bc069c1d 100644 --- a/fs/netfs/read_pgpriv2.c +++ b/fs/netfs/read_pgpriv2.c @@ -110,6 +110,8 @@ static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache( if (!creq->io_streams[1].avail) goto cancel_put; + __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &creq->flags); + trace_netfs_copy2cache(rreq, creq); trace_netfs_write(creq, netfs_write_trace_copy_to_cache); netfs_stat(&netfs_n_wh_copy_to_cache); rreq->copy_to_cache = creq; @@ -154,6 +156,9 @@ void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq) netfs_issue_write(creq, &creq->io_streams[1]); smp_wmb(); /* Write lists before ALL_QUEUED. */ set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags); + trace_netfs_rreq(rreq, netfs_rreq_trace_end_copy_to_cache); + if (list_empty_careful(&creq->io_streams[1].subrequests)) + netfs_wake_collector(creq); netfs_put_request(creq, netfs_rreq_trace_put_return); creq->copy_to_cache = NULL; diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index c4cdaf5fa7ed..9fb73bafd41d 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -308,6 +308,10 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg) goto out_err; } + error = file_f_owner_allocate(filp); + if (error) + goto out_err; + /* new fsnotify mark, we expect most fcntl calls to add a new mark */ new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL); if (!new_dn_mark) { @@ -315,10 +319,6 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg) goto out_err; } - error = file_f_owner_allocate(filp); - if (error) - goto out_err; - /* set up the new_fsn_mark and new_dn_mark */ new_fsn_mark = &new_dn_mark->fsn_mark; fsnotify_init_mark(new_fsn_mark, dnotify_group); diff --git a/fs/pidfs.c b/fs/pidfs.c index 69919be1c9d8..4625e097e3a0 100644 --- a/fs/pidfs.c +++ b/fs/pidfs.c @@ -319,7 +319,7 @@ static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg) if (!c) return -ESRCH; - if (!(kinfo.mask & PIDFD_INFO_COREDUMP)) { + if ((kinfo.mask & PIDFD_INFO_COREDUMP) && !(kinfo.coredump_mask)) { task_lock(task); if (task->mm) kinfo.coredump_mask = pidfs_coredump_mask(task->mm->flags); diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c index 1c6e5389c51f..5223edf6d11a 100644 --- a/fs/smb/client/dir.c +++ b/fs/smb/client/dir.c @@ -190,6 +190,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int disposition; struct TCP_Server_Info *server = tcon->ses->server; struct cifs_open_parms oparms; + struct cached_fid *parent_cfid = NULL; int rdwr_for_fscache = 0; __le32 lease_flags = 0; @@ -313,10 +314,10 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned if (!tcon->unix_ext && (mode & S_IWUGO) == 0) create_options |= CREATE_OPTION_READONLY; + retry_open: if (tcon->cfids && direntry->d_parent && server->dialect >= SMB30_PROT_ID) { - struct cached_fid *parent_cfid; - + parent_cfid = NULL; spin_lock(&tcon->cfids->cfid_list_lock); list_for_each_entry(parent_cfid, &tcon->cfids->entries, entry) { if (parent_cfid->dentry == direntry->d_parent) { @@ -327,6 +328,7 @@ retry_open: memcpy(fid->parent_lease_key, parent_cfid->fid.lease_key, SMB2_LEASE_KEY_SIZE); + parent_cfid->dirents.is_valid = false; } break; } diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index e9212da32f01..1421bde045c2 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -3088,7 +3088,8 @@ void cifs_oplock_break(struct work_struct *work) struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); struct inode *inode = d_inode(cfile->dentry); - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct super_block *sb = inode->i_sb; + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_tcon *tcon; struct TCP_Server_Info *server; @@ -3098,6 +3099,12 @@ void cifs_oplock_break(struct work_struct *work) __u64 persistent_fid, volatile_fid; __u16 net_fid; + /* + * Hold a reference to the superblock to prevent it and its inodes from + * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put() + * may release the last reference to the sb and trigger inode eviction. + */ + cifs_sb_active(sb); wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); @@ -3170,6 +3177,7 @@ oplock_break_ack: cifs_put_tlink(tlink); out: cifs_done_oplock_break(cinode); + cifs_sb_deactive(sb); } static int cifs_swap_activate(struct swap_info_struct *sis, diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c index 2a3e46b8e15a..a11a2a693c51 100644 --- a/fs/smb/client/smb2inode.c +++ b/fs/smb/client/smb2inode.c @@ -1346,7 +1346,8 @@ struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data, * empty object on the server. */ if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS)) - return ERR_PTR(-EOPNOTSUPP); + if (!tcon->posix_extensions) + return ERR_PTR(-EOPNOTSUPP); oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, SYNCHRONIZE | DELETE | diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 1468c16ea9b8..938a8a7c5d21 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -4316,6 +4316,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, u8 key[SMB3_ENC_DEC_KEY_SIZE]; struct aead_request *req; u8 *iv; + DECLARE_CRYPTO_WAIT(wait); unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); void *creq; size_t sensitive_size; @@ -4366,7 +4367,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, aead_request_set_crypt(req, sg, sg, crypt_len, iv); aead_request_set_ad(req, assoc_data_len); - rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + + rc = crypto_wait_req(enc ? crypto_aead_encrypt(req) + : crypto_aead_decrypt(req), &wait); if (!rc && enc) memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); @@ -5255,7 +5260,8 @@ static int smb2_make_node(unsigned int xid, struct inode *inode, if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { rc = cifs_sfu_make_node(xid, inode, dentry, tcon, full_path, mode, dev); - } else if (le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS) { + } else if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS) + || (tcon->posix_extensions)) { rc = smb2_mknod_reparse(xid, inode, dentry, tcon, full_path, mode, dev); } diff --git a/fs/ufs/super.c b/fs/ufs/super.c index eea718ac66b4..6e4585169f94 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -397,7 +397,7 @@ static int ufs_parse_param(struct fs_context *fc, struct fs_parameter *param) pr_err("ufstype can't be changed during remount\n"); return -EINVAL; } - if (!ctx->flavour) { + if (ctx->flavour) { pr_err("conflicting ufstype options\n"); return -EINVAL; } diff --git a/fs/xfs/libxfs/xfs_group.c b/fs/xfs/libxfs/xfs_group.c index e9d76bcdc820..20ad7c309489 100644 --- a/fs/xfs/libxfs/xfs_group.c +++ b/fs/xfs/libxfs/xfs_group.c @@ -163,7 +163,8 @@ xfs_group_free( xfs_defer_drain_free(&xg->xg_intents_drain); #ifdef __KERNEL__ - kfree(xg->xg_busy_extents); + if (xfs_group_has_extent_busy(xg->xg_mount, xg->xg_type)) + kfree(xg->xg_busy_extents); #endif if (uninit) @@ -189,9 +190,11 @@ xfs_group_insert( xg->xg_type = type; #ifdef __KERNEL__ - xg->xg_busy_extents = xfs_extent_busy_alloc(); - if (!xg->xg_busy_extents) - return -ENOMEM; + if (xfs_group_has_extent_busy(mp, type)) { + xg->xg_busy_extents = xfs_extent_busy_alloc(); + if (!xg->xg_busy_extents) + return -ENOMEM; + } spin_lock_init(&xg->xg_state_lock); xfs_hooks_init(&xg->xg_rmap_update_hooks); #endif @@ -210,7 +213,8 @@ xfs_group_insert( out_drain: xfs_defer_drain_free(&xg->xg_intents_drain); #ifdef __KERNEL__ - kfree(xg->xg_busy_extents); + if (xfs_group_has_extent_busy(xg->xg_mount, xg->xg_type)) + kfree(xg->xg_busy_extents); #endif return error; } diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index ba5bd6031ece..f9ef3b2a332a 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1683,7 +1683,7 @@ xfs_free_buftarg( fs_put_dax(btp->bt_daxdev, btp->bt_mount); /* the main block device is closed by kill_block_super */ if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev) - bdev_fput(btp->bt_bdev_file); + bdev_fput(btp->bt_file); kfree(btp); } @@ -1712,8 +1712,8 @@ xfs_configure_buftarg_atomic_writes( max_bytes = 0; } - btp->bt_bdev_awu_min = min_bytes; - btp->bt_bdev_awu_max = max_bytes; + btp->bt_awu_min = min_bytes; + btp->bt_awu_max = max_bytes; } /* Configure a buffer target that abstracts a block device. */ @@ -1738,14 +1738,9 @@ xfs_configure_buftarg( return -EINVAL; } - /* - * Flush the block device pagecache so our bios see anything dirtied - * before mount. - */ if (bdev_can_atomic_write(btp->bt_bdev)) xfs_configure_buftarg_atomic_writes(btp); - - return sync_blockdev(btp->bt_bdev); + return 0; } int @@ -1803,7 +1798,7 @@ xfs_alloc_buftarg( btp = kzalloc(sizeof(*btp), GFP_KERNEL | __GFP_NOFAIL); btp->bt_mount = mp; - btp->bt_bdev_file = bdev_file; + btp->bt_file = bdev_file; btp->bt_bdev = file_bdev(bdev_file); btp->bt_dev = btp->bt_bdev->bd_dev; btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off, diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 15fc56948346..b269e115d9ac 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -94,7 +94,6 @@ void xfs_buf_cache_destroy(struct xfs_buf_cache *bch); */ struct xfs_buftarg { dev_t bt_dev; - struct file *bt_bdev_file; struct block_device *bt_bdev; struct dax_device *bt_daxdev; struct file *bt_file; @@ -112,9 +111,9 @@ struct xfs_buftarg { struct percpu_counter bt_readahead_count; struct ratelimit_state bt_ioerror_rl; - /* Atomic write unit values, bytes */ - unsigned int bt_bdev_awu_min; - unsigned int bt_bdev_awu_max; + /* Hardware atomic write unit values, bytes */ + unsigned int bt_awu_min; + unsigned int bt_awu_max; /* built-in cache, if we're not using the perag one */ struct xfs_buf_cache bt_cache[]; @@ -375,7 +374,6 @@ extern void xfs_buftarg_wait(struct xfs_buftarg *); extern void xfs_buftarg_drain(struct xfs_buftarg *); int xfs_configure_buftarg(struct xfs_buftarg *btp, unsigned int sectorsize); -#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops); diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index 94d0873bcd62..603d51365645 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -103,24 +103,6 @@ xfs_discard_endio( bio_put(bio); } -static inline struct block_device * -xfs_group_bdev( - const struct xfs_group *xg) -{ - struct xfs_mount *mp = xg->xg_mount; - - switch (xg->xg_type) { - case XG_TYPE_AG: - return mp->m_ddev_targp->bt_bdev; - case XG_TYPE_RTG: - return mp->m_rtdev_targp->bt_bdev; - default: - ASSERT(0); - break; - } - return NULL; -} - /* * Walk the discard list and issue discards on all the busy extents in the * list. We plug and chain the bios so that we only need a single completion @@ -138,11 +120,14 @@ xfs_discard_extents( blk_start_plug(&plug); list_for_each_entry(busyp, &extents->extent_list, list) { - trace_xfs_discard_extent(busyp->group, busyp->bno, - busyp->length); + struct xfs_group *xg = busyp->group; + struct xfs_buftarg *btp = + xfs_group_type_buftarg(xg->xg_mount, xg->xg_type); - error = __blkdev_issue_discard(xfs_group_bdev(busyp->group), - xfs_gbno_to_daddr(busyp->group, busyp->bno), + trace_xfs_discard_extent(xg, busyp->bno, busyp->length); + + error = __blkdev_issue_discard(btp->bt_bdev, + xfs_gbno_to_daddr(xg, busyp->bno), XFS_FSB_TO_BB(mp, busyp->length), GFP_KERNEL, &bio); if (error && error != -EOPNOTSUPP) { diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h index f069b04e8ea1..3e6e019b6146 100644 --- a/fs/xfs/xfs_extent_busy.h +++ b/fs/xfs/xfs_extent_busy.h @@ -68,4 +68,12 @@ static inline void xfs_extent_busy_sort(struct list_head *list) list_sort(NULL, list, xfs_extent_busy_ag_cmp); } +/* + * Zoned RTGs don't need to track busy extents, as the actual block freeing only + * happens by a zone reset, which forces out all transactions that touched the + * to be reset zone first. + */ +#define xfs_group_has_extent_busy(mp, type) \ + ((type) == XG_TYPE_AG || !xfs_has_zoned((mp))) + #endif /* __XFS_EXTENT_BUSY_H__ */ diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 0b41b18debf3..38e365b16348 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -752,7 +752,7 @@ xfs_file_dio_write_atomic( * HW offload should be faster, so try that first if it is already * known that the write length is not too large. */ - if (ocount > xfs_inode_buftarg(ip)->bt_bdev_awu_max) + if (ocount > xfs_inode_buftarg(ip)->bt_awu_max) dops = &xfs_atomic_write_cow_iomap_ops; else dops = &xfs_direct_write_iomap_ops; diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index d7e2b902ef5c..07fbdcc4cbf5 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -358,7 +358,7 @@ static inline bool xfs_inode_has_bigrtalloc(const struct xfs_inode *ip) static inline bool xfs_inode_can_hw_atomic_write(const struct xfs_inode *ip) { - return xfs_inode_buftarg(ip)->bt_bdev_awu_max > 0; + return xfs_inode_buftarg(ip)->bt_awu_max > 0; } /* diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index ff05e6b1b0bb..ec30b78bf5c4 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -827,7 +827,7 @@ xfs_bmap_hw_atomic_write_possible( /* * The ->iomap_begin caller should ensure this, but check anyway. */ - return len <= xfs_inode_buftarg(ip)->bt_bdev_awu_max; + return len <= xfs_inode_buftarg(ip)->bt_awu_max; } static int diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 8cddbb7c149b..01e597290eb5 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -665,7 +665,7 @@ xfs_get_atomic_write_max_opt( * less than our out of place write limit, but we don't want to exceed * the awu_max. */ - return min(awu_max, xfs_inode_buftarg(ip)->bt_bdev_awu_max); + return min(awu_max, xfs_inode_buftarg(ip)->bt_awu_max); } static void diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 29276fe60df9..0b690bc119d7 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -171,19 +171,16 @@ xfs_readsb( ASSERT(mp->m_ddev_targp != NULL); /* - * For the initial read, we must guess at the sector - * size based on the block device. It's enough to - * get the sb_sectsize out of the superblock and - * then reread with the proper length. - * We don't verify it yet, because it may not be complete. + * In the first pass, use the device sector size to just read enough + * of the superblock to extract the XFS sector size. + * + * The device sector size must be smaller than or equal to the XFS + * sector size and thus we can always read the superblock. Once we know + * the XFS sector size, re-read it and run the buffer verifier. */ - sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); + sector_size = mp->m_ddev_targp->bt_logical_sectorsize; buf_ops = NULL; - /* - * Allocate a (locked) buffer to hold the superblock. This will be kept - * around at all times to optimize access to the superblock. - */ reread: error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size), &bp, buf_ops); @@ -247,6 +244,10 @@ reread: /* no need to be quiet anymore, so reset the buf ops */ bp->b_ops = &xfs_sb_buf_ops; + /* + * Keep a pointer of the sb buffer around instead of caching it in the + * buffer cache because we access it frequently. + */ mp->m_sb_bp = bp; xfs_buf_unlock(bp); return 0; @@ -678,68 +679,46 @@ static inline unsigned int max_pow_of_two_factor(const unsigned int nr) } /* - * If the data device advertises atomic write support, limit the size of data - * device atomic writes to the greatest power-of-two factor of the AG size so - * that every atomic write unit aligns with the start of every AG. This is - * required so that the per-AG allocations for an atomic write will always be + * If the underlying device advertises atomic write support, limit the size of + * atomic writes to the greatest power-of-two factor of the group size so + * that every atomic write unit aligns with the start of every group. This is + * required so that the allocations for an atomic write will always be * aligned compatibly with the alignment requirements of the storage. * - * If the data device doesn't advertise atomic writes, then there are no - * alignment restrictions and the largest out-of-place write we can do - * ourselves is the number of blocks that user files can allocate from any AG. + * If the device doesn't advertise atomic writes, then there are no alignment + * restrictions and the largest out-of-place write we can do ourselves is the + * number of blocks that user files can allocate from any group. */ -static inline xfs_extlen_t xfs_calc_perag_awu_max(struct xfs_mount *mp) +static xfs_extlen_t +xfs_calc_group_awu_max( + struct xfs_mount *mp, + enum xfs_group_type type) { - if (mp->m_ddev_targp->bt_bdev_awu_min > 0) - return max_pow_of_two_factor(mp->m_sb.sb_agblocks); - return rounddown_pow_of_two(mp->m_ag_max_usable); -} + struct xfs_groups *g = &mp->m_groups[type]; + struct xfs_buftarg *btp = xfs_group_type_buftarg(mp, type); -/* - * Reflink on the realtime device requires rtgroups, and atomic writes require - * reflink. - * - * If the realtime device advertises atomic write support, limit the size of - * data device atomic writes to the greatest power-of-two factor of the rtgroup - * size so that every atomic write unit aligns with the start of every rtgroup. - * This is required so that the per-rtgroup allocations for an atomic write - * will always be aligned compatibly with the alignment requirements of the - * storage. - * - * If the rt device doesn't advertise atomic writes, then there are no - * alignment restrictions and the largest out-of-place write we can do - * ourselves is the number of blocks that user files can allocate from any - * rtgroup. - */ -static inline xfs_extlen_t xfs_calc_rtgroup_awu_max(struct xfs_mount *mp) -{ - struct xfs_groups *rgs = &mp->m_groups[XG_TYPE_RTG]; - - if (rgs->blocks == 0) + if (g->blocks == 0) return 0; - if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_bdev_awu_min > 0) - return max_pow_of_two_factor(rgs->blocks); - return rounddown_pow_of_two(rgs->blocks); + if (btp && btp->bt_awu_min > 0) + return max_pow_of_two_factor(g->blocks); + return rounddown_pow_of_two(g->blocks); } /* Compute the maximum atomic write unit size for each section. */ static inline void xfs_calc_atomic_write_unit_max( - struct xfs_mount *mp) + struct xfs_mount *mp, + enum xfs_group_type type) { - struct xfs_groups *ags = &mp->m_groups[XG_TYPE_AG]; - struct xfs_groups *rgs = &mp->m_groups[XG_TYPE_RTG]; + struct xfs_groups *g = &mp->m_groups[type]; const xfs_extlen_t max_write = xfs_calc_atomic_write_max(mp); const xfs_extlen_t max_ioend = xfs_reflink_max_atomic_cow(mp); - const xfs_extlen_t max_agsize = xfs_calc_perag_awu_max(mp); - const xfs_extlen_t max_rgsize = xfs_calc_rtgroup_awu_max(mp); + const xfs_extlen_t max_gsize = xfs_calc_group_awu_max(mp, type); - ags->awu_max = min3(max_write, max_ioend, max_agsize); - rgs->awu_max = min3(max_write, max_ioend, max_rgsize); - - trace_xfs_calc_atomic_write_unit_max(mp, max_write, max_ioend, - max_agsize, max_rgsize); + g->awu_max = min3(max_write, max_ioend, max_gsize); + trace_xfs_calc_atomic_write_unit_max(mp, type, max_write, max_ioend, + max_gsize, g->awu_max); } /* @@ -757,7 +736,8 @@ xfs_set_max_atomic_write_opt( max(mp->m_groups[XG_TYPE_AG].blocks, mp->m_groups[XG_TYPE_RTG].blocks); const xfs_extlen_t max_group_write = - max(xfs_calc_perag_awu_max(mp), xfs_calc_rtgroup_awu_max(mp)); + max(xfs_calc_group_awu_max(mp, XG_TYPE_AG), + xfs_calc_group_awu_max(mp, XG_TYPE_RTG)); int error; if (new_max_bytes == 0) @@ -813,7 +793,8 @@ set_limit: return error; } - xfs_calc_atomic_write_unit_max(mp); + xfs_calc_atomic_write_unit_max(mp, XG_TYPE_AG); + xfs_calc_atomic_write_unit_max(mp, XG_TYPE_RTG); mp->m_awu_max_bytes = new_max_bytes; return 0; } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index d85084f9f317..97de44c32272 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -802,4 +802,21 @@ static inline void xfs_mod_sb_delalloc(struct xfs_mount *mp, int64_t delta) int xfs_set_max_atomic_write_opt(struct xfs_mount *mp, unsigned long long new_max_bytes); +static inline struct xfs_buftarg * +xfs_group_type_buftarg( + struct xfs_mount *mp, + enum xfs_group_type type) +{ + switch (type) { + case XG_TYPE_AG: + return mp->m_ddev_targp; + case XG_TYPE_RTG: + return mp->m_rtdev_targp; + default: + ASSERT(0); + break; + } + return NULL; +} + #endif /* __XFS_MOUNT_H__ */ diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c index 3545dc1d953c..42e9c72b85c0 100644 --- a/fs/xfs/xfs_notify_failure.c +++ b/fs/xfs/xfs_notify_failure.c @@ -253,8 +253,7 @@ xfs_dax_notify_dev_failure( return -EOPNOTSUPP; } - error = xfs_dax_translate_range(type == XG_TYPE_RTG ? - mp->m_rtdev_targp : mp->m_ddev_targp, + error = xfs_dax_translate_range(xfs_group_type_buftarg(mp, type), offset, len, &daddr, &bblen); if (error) return error; diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index ba45d801df1c..78be223b13b2 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -171,36 +171,33 @@ DEFINE_ATTR_LIST_EVENT(xfs_attr_leaf_list); DEFINE_ATTR_LIST_EVENT(xfs_attr_node_list); TRACE_EVENT(xfs_calc_atomic_write_unit_max, - TP_PROTO(struct xfs_mount *mp, unsigned int max_write, - unsigned int max_ioend, unsigned int max_agsize, - unsigned int max_rgsize), - TP_ARGS(mp, max_write, max_ioend, max_agsize, max_rgsize), + TP_PROTO(struct xfs_mount *mp, enum xfs_group_type type, + unsigned int max_write, unsigned int max_ioend, + unsigned int max_gsize, unsigned int awu_max), + TP_ARGS(mp, type, max_write, max_ioend, max_gsize, awu_max), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(unsigned int, max_write) __field(unsigned int, max_ioend) - __field(unsigned int, max_agsize) - __field(unsigned int, max_rgsize) - __field(unsigned int, data_awu_max) - __field(unsigned int, rt_awu_max) + __field(unsigned int, max_gsize) + __field(unsigned int, awu_max) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; + __entry->type = type; __entry->max_write = max_write; __entry->max_ioend = max_ioend; - __entry->max_agsize = max_agsize; - __entry->max_rgsize = max_rgsize; - __entry->data_awu_max = mp->m_groups[XG_TYPE_AG].awu_max; - __entry->rt_awu_max = mp->m_groups[XG_TYPE_RTG].awu_max; + __entry->max_gsize = max_gsize; + __entry->awu_max = awu_max; ), - TP_printk("dev %d:%d max_write %u max_ioend %u max_agsize %u max_rgsize %u data_awu_max %u rt_awu_max %u", + TP_printk("dev %d:%d %s max_write %u max_ioend %u max_gsize %u awu_max %u", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->max_write, __entry->max_ioend, - __entry->max_agsize, - __entry->max_rgsize, - __entry->data_awu_max, - __entry->rt_awu_max) + __entry->max_gsize, + __entry->awu_max) ); TRACE_EVENT(xfs_calc_max_atomic_write_fsblocks, diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c index 0f641a9091ec..ac5cecec9aa1 100644 --- a/fs/xfs/xfs_xattr.c +++ b/fs/xfs/xfs_xattr.c @@ -243,7 +243,7 @@ __xfs_xattr_put_listent( offset = context->buffer + context->count; memcpy(offset, prefix, prefix_len); offset += prefix_len; - strncpy(offset, (char *)name, namelen); /* real name */ + memcpy(offset, (char *)name, namelen); /* real name */ offset += namelen; *offset = '\0'; diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h index 9689a7c5dd36..513837632b7d 100644 --- a/include/drm/drm_buddy.h +++ b/include/drm/drm_buddy.h @@ -160,6 +160,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm, u64 new_size, struct list_head *blocks); +void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear); + void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block); void drm_buddy_free_list(struct drm_buddy *mm, diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index 55cfebc658e6..8a2f652a05ff 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -119,6 +119,7 @@ int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, struct icc_node *icc_node_create_dyn(void); struct icc_node *icc_node_create(int id); void icc_node_destroy(int id); +int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, const char *name); int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node); int icc_link_create(struct icc_node *node, const int dst_id); void icc_node_add(struct icc_node *node, struct icc_provider *provider); @@ -152,6 +153,12 @@ static inline void icc_node_destroy(int id) { } +static inline int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, + const char *name) +{ + return -EOPNOTSUPP; +} + static inline int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node) { return -EOPNOTSUPP; diff --git a/include/linux/ism.h b/include/linux/ism.h index 5428edd90982..8358b4cd7ba6 100644 --- a/include/linux/ism.h +++ b/include/linux/ism.h @@ -28,6 +28,7 @@ struct ism_dmb { struct ism_dev { spinlock_t lock; /* protects the ism device */ + spinlock_t cmd_lock; /* serializes cmds */ struct list_head list; struct pci_dev *pdev; diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 437769e061b7..13add0c2c407 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -154,6 +154,7 @@ struct phy_attrs { * @id: id of the phy device * @ops: function pointers for performing phy operations * @mutex: mutex to protect phy_ops + * @lockdep_key: lockdep information for this mutex * @init_count: used to protect when the PHY is used by multiple consumers * @power_count: used to protect when the PHY is used by multiple consumers * @attrs: used to specify PHY specific attributes @@ -165,6 +166,7 @@ struct phy { int id; const struct phy_ops *ops; struct mutex mutex; + struct lock_class_key lockdep_key; int init_count; int power_count; struct phy_attrs attrs; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 7803edaa8ff8..0cca01b5607b 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -888,15 +888,23 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; } extern int power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val); +int power_supply_get_property_direct(struct power_supply *psy, enum power_supply_property psp, + union power_supply_propval *val); #if IS_ENABLED(CONFIG_POWER_SUPPLY) extern int power_supply_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val); +int power_supply_set_property_direct(struct power_supply *psy, enum power_supply_property psp, + const union power_supply_propval *val); #else static inline int power_supply_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) { return 0; } +static inline int power_supply_set_property_direct(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ return 0; } #endif extern void power_supply_external_power_changed(struct power_supply *psy); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index a21e276dbe44..f3014e4f54fc 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -441,7 +441,6 @@ int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo); int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo); void xfrm_flush_gc(void); -void xfrm_state_delete_tunnel(struct xfrm_state *x); struct xfrm_type { struct module *owner; @@ -474,7 +473,7 @@ struct xfrm_type_offload { int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); -void xfrm_set_type_offload(struct xfrm_state *x); +void xfrm_set_type_offload(struct xfrm_state *x, bool try_load); static inline void xfrm_unset_type_offload(struct xfrm_state *x) { if (!x->type_offload) @@ -916,7 +915,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols) xfrm_pol_put(pols[i]); } -void __xfrm_state_destroy(struct xfrm_state *, bool); +void __xfrm_state_destroy(struct xfrm_state *); static inline void __xfrm_state_put(struct xfrm_state *x) { @@ -926,13 +925,7 @@ static inline void __xfrm_state_put(struct xfrm_state *x) static inline void xfrm_state_put(struct xfrm_state *x) { if (refcount_dec_and_test(&x->refcnt)) - __xfrm_state_destroy(x, false); -} - -static inline void xfrm_state_put_sync(struct xfrm_state *x) -{ - if (refcount_dec_and_test(&x->refcnt)) - __xfrm_state_destroy(x, true); + __xfrm_state_destroy(x); } static inline void xfrm_state_hold(struct xfrm_state *x) @@ -1770,7 +1763,7 @@ struct xfrmk_spdinfo { struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num); int xfrm_state_delete(struct xfrm_state *x); -int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync); +int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); int xfrm_dev_policy_flush(struct net *net, struct net_device *dev, bool task_valid); diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 73e96ccbe830..64a382fbc31a 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -55,6 +55,7 @@ EM(netfs_rreq_trace_copy, "COPY ") \ EM(netfs_rreq_trace_dirty, "DIRTY ") \ EM(netfs_rreq_trace_done, "DONE ") \ + EM(netfs_rreq_trace_end_copy_to_cache, "END-C2C") \ EM(netfs_rreq_trace_free, "FREE ") \ EM(netfs_rreq_trace_ki_complete, "KI-CMPL") \ EM(netfs_rreq_trace_recollect, "RECLLCT") \ @@ -559,6 +560,35 @@ TRACE_EVENT(netfs_write, __entry->start, __entry->start + __entry->len - 1) ); +TRACE_EVENT(netfs_copy2cache, + TP_PROTO(const struct netfs_io_request *rreq, + const struct netfs_io_request *creq), + + TP_ARGS(rreq, creq), + + TP_STRUCT__entry( + __field(unsigned int, rreq) + __field(unsigned int, creq) + __field(unsigned int, cookie) + __field(unsigned int, ino) + ), + + TP_fast_assign( + struct netfs_inode *__ctx = netfs_inode(rreq->inode); + struct fscache_cookie *__cookie = netfs_i_cookie(__ctx); + __entry->rreq = rreq->debug_id; + __entry->creq = creq->debug_id; + __entry->cookie = __cookie ? __cookie->debug_id : 0; + __entry->ino = rreq->inode->i_ino; + ), + + TP_printk("R=%08x CR=%08x c=%08x i=%x ", + __entry->rreq, + __entry->creq, + __entry->cookie, + __entry->ino) + ); + TRACE_EVENT(netfs_collect, TP_PROTO(const struct netfs_io_request *wreq), diff --git a/io_uring/net.c b/io_uring/net.c index 43a43522f406..bec8c6ed0a93 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -1738,9 +1738,11 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags) int ret; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; - if (unlikely(req->flags & REQ_F_FAIL)) { - ret = -ECONNRESET; - goto out; + if (connect->in_progress) { + struct poll_table_struct pt = { ._key = EPOLLERR }; + + if (vfs_poll(req->file, &pt) & EPOLLERR) + goto get_sock_err; } file_flags = force_nonblock ? O_NONBLOCK : 0; @@ -1765,8 +1767,10 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags) * which means the previous result is good. For both of these, * grab the sock_error() and use that for the completion. */ - if (ret == -EBADFD || ret == -EISCONN) + if (ret == -EBADFD || ret == -EISCONN) { +get_sock_err: ret = sock_error(sock_from_file(req->file)->sk); + } } if (ret == -ERESTARTSYS) ret = -EINTR; diff --git a/io_uring/poll.c b/io_uring/poll.c index 0526062e2f81..20e9b46a4adf 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -273,8 +273,6 @@ static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw) return IOU_POLL_REISSUE; } } - if (unlikely(req->cqe.res & EPOLLERR)) - req_set_fail(req); if (req->apoll_events & EPOLLONESHOT) return IOU_POLL_DONE; diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index 00d0064b22a5..4a7011c799f0 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -76,6 +76,8 @@ static int io_import_dmabuf(struct io_zcrx_ifq *ifq, int dmabuf_fd = area_reg->dmabuf_fd; int i, ret; + if (off) + return -EINVAL; if (WARN_ON_ONCE(!ifq->dev)) return -EFAULT; if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER)) @@ -106,7 +108,7 @@ static int io_import_dmabuf(struct io_zcrx_ifq *ifq, for_each_sgtable_dma_sg(mem->sgt, sg, i) total_size += sg_dma_len(sg); - if (total_size < off + len) { + if (total_size != len) { ret = -EINVAL; goto err; } diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index b71e428ad936..ad6df48b540c 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -884,6 +884,13 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, if (fmt[i] == 'p') { sizeof_cur_arg = sizeof(long); + if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || + ispunct(fmt[i + 1])) { + if (tmp_buf) + cur_arg = raw_args[num_spec]; + goto nocopy_fmt; + } + if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && fmt[i + 2] == 's') { fmt_ptype = fmt[i + 1]; @@ -891,11 +898,9 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, goto fmt_str; } - if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || - ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' || + if (fmt[i + 1] == 'K' || fmt[i + 1] == 'x' || fmt[i + 1] == 's' || fmt[i + 1] == 'S') { - /* just kernel pointers */ if (tmp_buf) cur_arg = raw_args[num_spec]; i++; diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c index 941d0d2427e3..8e61dc555415 100644 --- a/kernel/bpf/sysfs_btf.c +++ b/kernel/bpf/sysfs_btf.c @@ -21,7 +21,7 @@ static int btf_sysfs_vmlinux_mmap(struct file *filp, struct kobject *kobj, { unsigned long pages = PAGE_ALIGN(attr->size) >> PAGE_SHIFT; size_t vm_size = vma->vm_end - vma->vm_start; - phys_addr_t addr = virt_to_phys(__start_BTF); + phys_addr_t addr = __pa_symbol(__start_BTF); unsigned long pfn = addr >> PAGE_SHIFT; if (attr->private != __start_BTF || !PAGE_ALIGNED(addr)) diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c index 507b8f19a262..dd9417425d92 100644 --- a/kernel/cgroup/legacy_freezer.c +++ b/kernel/cgroup/legacy_freezer.c @@ -66,15 +66,9 @@ static struct freezer *parent_freezer(struct freezer *freezer) bool cgroup_freezing(struct task_struct *task) { bool ret; - unsigned int state; rcu_read_lock(); - /* Check if the cgroup is still FREEZING, but not FROZEN. The extra - * !FROZEN check is required, because the FREEZING bit is not cleared - * when the state FROZEN is reached. - */ - state = task_freezer(task)->state; - ret = (state & CGROUP_FREEZING) && !(state & CGROUP_FROZEN); + ret = task_freezer(task)->state & CGROUP_FREEZING; rcu_read_unlock(); return ret; diff --git a/kernel/freezer.c b/kernel/freezer.c index 8d530d0949ff..6a96149aede9 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c @@ -201,18 +201,9 @@ static int __restore_freezer_state(struct task_struct *p, void *arg) void __thaw_task(struct task_struct *p) { - unsigned long flags; - - spin_lock_irqsave(&freezer_lock, flags); - if (WARN_ON_ONCE(freezing(p))) - goto unlock; - - if (!frozen(p) || task_call_func(p, __restore_freezer_state, NULL)) - goto unlock; - - wake_up_state(p, TASK_FROZEN); -unlock: - spin_unlock_irqrestore(&freezer_lock, flags); + guard(spinlock_irqsave)(&freezer_lock); + if (frozen(p) && !task_call_func(p, __restore_freezer_state, NULL)) + wake_up_state(p, TASK_FROZEN); } /** diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index b498d867ba21..7dd5cbcb7a06 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1272,7 +1272,8 @@ static inline struct rq *scx_locked_rq(void) #define SCX_CALL_OP(sch, mask, op, rq, args...) \ do { \ - update_locked_rq(rq); \ + if (rq) \ + update_locked_rq(rq); \ if (mask) { \ scx_kf_allow(mask); \ (sch)->ops.op(args); \ @@ -1280,14 +1281,16 @@ do { \ } else { \ (sch)->ops.op(args); \ } \ - update_locked_rq(NULL); \ + if (rq) \ + update_locked_rq(NULL); \ } while (0) #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \ ({ \ __typeof__((sch)->ops.op(args)) __ret; \ \ - update_locked_rq(rq); \ + if (rq) \ + update_locked_rq(rq); \ if (mask) { \ scx_kf_allow(mask); \ __ret = (sch)->ops.op(args); \ @@ -1295,7 +1298,8 @@ do { \ } else { \ __ret = (sch)->ops.op(args); \ } \ - update_locked_rq(NULL); \ + if (rq) \ + update_locked_rq(NULL); \ __ret; \ }) diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c index 6d29d3cbc670..001fb88a8481 100644 --- a/kernel/sched/ext_idle.c +++ b/kernel/sched/ext_idle.c @@ -903,7 +903,7 @@ s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags, * selection optimizations and simply check whether the previously * used CPU is idle and within the allowed cpumask. */ - if (p->nr_cpus_allowed == 1) { + if (p->nr_cpus_allowed == 1 || is_migration_disabled(p)) { if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) && scx_idle_test_and_clear_cpu(prev_cpu)) cpu = prev_cpu; diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index c48900b856a2..52ca8e268cfc 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -80,7 +80,7 @@ long calc_load_fold_active(struct rq *this_rq, long adjust) long nr_active, delta = 0; nr_active = this_rq->nr_running - adjust; - nr_active += (int)this_rq->nr_uninterruptible; + nr_active += (long)this_rq->nr_uninterruptible; if (nr_active != this_rq->calc_load_active) { delta = nr_active - this_rq->calc_load_active; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 475bb5998295..83e3aa917142 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1149,7 +1149,7 @@ struct rq { * one CPU and if it got migrated afterwards it may decrease * it on another CPU. Always updated under the runqueue lock: */ - unsigned int nr_uninterruptible; + unsigned long nr_uninterruptible; union { struct task_struct __rcu *donor; /* Scheduler context */ diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 120531268abf..d01e5c910ce1 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -3136,7 +3136,10 @@ __register_event(struct trace_event_call *call, struct module *mod) if (ret < 0) return ret; + down_write(&trace_event_sem); list_add(&call->list, &ftrace_events); + up_write(&trace_event_sem); + if (call->flags & TRACE_EVENT_FL_DYNAMIC) atomic_set(&call->refcnt, 0); else @@ -3750,6 +3753,8 @@ __trace_add_event_dirs(struct trace_array *tr) struct trace_event_call *call; int ret; + lockdep_assert_held(&trace_event_sem); + list_for_each_entry(call, &ftrace_events, list) { ret = __trace_add_new_event(call, tr); if (ret < 0) diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index 6819b93309ce..fd259da0aa64 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -637,8 +637,8 @@ __timerlat_dump_stack(struct trace_buffer *buffer, struct trace_stack *fstack, u entry = ring_buffer_event_data(event); - memcpy(&entry->caller, fstack->calls, size); entry->size = fstack->nr_entries; + memcpy(&entry->caller, fstack->calls, size); trace_buffer_unlock_commit_nostack(buffer, event); } diff --git a/mm/secretmem.c b/mm/secretmem.c index 9a11a38a6770..e042a4a0bc0c 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -261,7 +261,15 @@ err_put_fd: static int secretmem_init_fs_context(struct fs_context *fc) { - return init_pseudo(fc, SECRETMEM_MAGIC) ? 0 : -ENOMEM; + struct pseudo_fs_context *ctx; + + ctx = init_pseudo(fc, SECRETMEM_MAGIC); + if (!ctx) + return -ENOMEM; + + fc->s_iflags |= SB_I_NOEXEC; + fc->s_iflags |= SB_I_NODEV; + return 0; } static struct file_system_type secretmem_fs = { @@ -279,9 +287,6 @@ static int __init secretmem_init(void) if (IS_ERR(secretmem_mnt)) return PTR_ERR(secretmem_mnt); - /* prevent secretmem mappings from ever getting PROT_EXEC */ - secretmem_mnt->mnt_flags |= MNT_NOEXEC; - return 0; } fs_initcall(secretmem_init); diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 9c787e2e4b17..4744e3fd4544 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c @@ -35,6 +35,7 @@ #include #include #include +#include int sysctl_aarp_expiry_time = AARP_EXPIRY_TIME; int sysctl_aarp_tick_time = AARP_TICK_TIME; @@ -44,6 +45,7 @@ int sysctl_aarp_resolve_time = AARP_RESOLVE_TIME; /* Lists of aarp entries */ /** * struct aarp_entry - AARP entry + * @refcnt: Reference count * @last_sent: Last time we xmitted the aarp request * @packet_queue: Queue of frames wait for resolution * @status: Used for proxy AARP @@ -55,6 +57,7 @@ int sysctl_aarp_resolve_time = AARP_RESOLVE_TIME; * @next: Next entry in chain */ struct aarp_entry { + refcount_t refcnt; /* These first two are only used for unresolved entries */ unsigned long last_sent; struct sk_buff_head packet_queue; @@ -79,6 +82,17 @@ static DEFINE_RWLOCK(aarp_lock); /* Used to walk the list and purge/kick entries. */ static struct timer_list aarp_timer; +static inline void aarp_entry_get(struct aarp_entry *a) +{ + refcount_inc(&a->refcnt); +} + +static inline void aarp_entry_put(struct aarp_entry *a) +{ + if (refcount_dec_and_test(&a->refcnt)) + kfree(a); +} + /* * Delete an aarp queue * @@ -87,7 +101,7 @@ static struct timer_list aarp_timer; static void __aarp_expire(struct aarp_entry *a) { skb_queue_purge(&a->packet_queue); - kfree(a); + aarp_entry_put(a); } /* @@ -380,9 +394,11 @@ static void aarp_purge(void) static struct aarp_entry *aarp_alloc(void) { struct aarp_entry *a = kmalloc(sizeof(*a), GFP_ATOMIC); + if (!a) + return NULL; - if (a) - skb_queue_head_init(&a->packet_queue); + refcount_set(&a->refcnt, 1); + skb_queue_head_init(&a->packet_queue); return a; } @@ -477,6 +493,7 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct atalk_addr *sa) entry->dev = atif->dev; write_lock_bh(&aarp_lock); + aarp_entry_get(entry); hash = sa->s_node % (AARP_HASH_SIZE - 1); entry->next = proxies[hash]; @@ -502,6 +519,7 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct atalk_addr *sa) retval = 1; } + aarp_entry_put(entry); write_unlock_bh(&aarp_lock); out: return retval; diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 5a4fb2539b08..9a45aed508d1 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -54,6 +54,7 @@ static int ipcomp4_err(struct sk_buff *skb, u32 info) } /* We always hold one tunnel user reference to indicate a tunnel */ +static struct lock_class_key xfrm_state_lock_key; static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) { struct net *net = xs_net(x); @@ -62,6 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) t = xfrm_state_alloc(net); if (!t) goto out; + lockdep_set_class(&t->lock, &xfrm_state_lock_key); t->id.proto = IPPROTO_IPIP; t->id.spi = x->props.saddr.a4; diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index 0d31a8c108d4..f28cfd88eaf5 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -202,6 +202,9 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) goto out; + /* set the transport header to ESP */ + skb_set_transport_header(skb, offset); + NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 72d4858dec18..8607569de34f 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -71,6 +71,7 @@ static int ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, return 0; } +static struct lock_class_key xfrm_state_lock_key; static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) { struct net *net = xs_net(x); @@ -79,6 +80,7 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) t = xfrm_state_alloc(net); if (!t) goto out; + lockdep_set_class(&t->lock, &xfrm_state_lock_key); t->id.proto = IPPROTO_IPV6; t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr); diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 841c81abaaf4..9005fc156a20 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -202,6 +202,9 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) goto out; + /* set the transport header to ESP */ + skb_set_transport_header(skb, offset); + NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index bf140ef781c1..5120a763da0d 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -334,8 +334,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net) struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); unsigned int i; + xfrm_state_flush(net, IPSEC_PROTO_ANY, false); xfrm_flush_gc(); - xfrm_state_flush(net, 0, false, true); for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); diff --git a/net/key/af_key.c b/net/key/af_key.c index 1f82f69acfde..2ebde0352245 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1766,7 +1766,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m if (proto == 0) return -EINVAL; - err = xfrm_state_flush(net, proto, true, false); + err = xfrm_state_flush(net, proto, true); err2 = unicast_flush_resp(sk, hdr); if (err || err2) { if (err == -ESRCH) /* empty table - go quietly */ diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index f0eb70353744..2255355e51d3 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -536,9 +536,6 @@ destroy_class: static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl) { - struct qfq_sched *q = qdisc_priv(sch); - - qfq_rm_from_agg(q, cl); gen_kill_estimator(&cl->rate_est); qdisc_put(cl->qdisc); kfree(cl); @@ -559,10 +556,11 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg, qdisc_purge_queue(cl->qdisc); qdisc_class_hash_remove(&q->clhash, &cl->common); - qfq_destroy_class(sch, cl); + qfq_rm_from_agg(q, cl); sch_tree_unlock(sch); + qfq_destroy_class(sch, cl); return 0; } @@ -1503,6 +1501,7 @@ static void qfq_destroy_qdisc(struct Qdisc *sch) for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], common.hnode) { + qfq_rm_from_agg(q, cl); qfq_destroy_class(sch, cl); } } diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 81fd486b5e56..d2819baea414 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -305,7 +305,6 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, return -EINVAL; } - xfrm_set_type_offload(x); if (!x->type_offload) { NL_SET_ERR_MSG(extack, "Type doesn't support offload"); dev_put(dev); diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c index cb1e12740c87..330a05286a56 100644 --- a/net/xfrm/xfrm_interface_core.c +++ b/net/xfrm/xfrm_interface_core.c @@ -875,7 +875,7 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], return -EINVAL; } - if (p.collect_md) { + if (p.collect_md || xi->p.collect_md) { NL_SET_ERR_MSG(extack, "collect_md can't be changed"); return -EINVAL; } @@ -886,11 +886,6 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], } else { if (xi->dev != dev) return -EEXIST; - if (xi->p.collect_md) { - NL_SET_ERR_MSG(extack, - "device can't be changed to collect_md"); - return -EINVAL; - } } return xfrmi_update(xi, &p); diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 907c3ccb440d..43fdc6ed8dd1 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -97,7 +97,7 @@ static int ipcomp_input_done2(struct sk_buff *skb, int err) struct ip_comp_hdr *ipch = ip_comp_hdr(skb); const int plen = skb->len; - skb_reset_transport_header(skb); + skb->transport_header = skb->network_header + sizeof(*ipch); return ipcomp_post_acomp(skb, err, 0) ?: skb->len < (plen + sizeof(ip_comp_hdr)) ? -EINVAL : @@ -313,7 +313,6 @@ void ipcomp_destroy(struct xfrm_state *x) struct ipcomp_data *ipcd = x->data; if (!ipcd) return; - xfrm_state_delete_tunnel(x); ipcomp_free_data(ipcd); kfree(ipcd); } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index f0f66405b39d..77db3b5fe4ac 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -424,11 +424,10 @@ void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, } EXPORT_SYMBOL(xfrm_unregister_type_offload); -void xfrm_set_type_offload(struct xfrm_state *x) +void xfrm_set_type_offload(struct xfrm_state *x, bool try_load) { const struct xfrm_type_offload *type = NULL; struct xfrm_state_afinfo *afinfo; - bool try_load = true; retry: afinfo = xfrm_state_get_afinfo(x->props.family); @@ -593,7 +592,7 @@ void xfrm_state_free(struct xfrm_state *x) } EXPORT_SYMBOL(xfrm_state_free); -static void ___xfrm_state_destroy(struct xfrm_state *x) +static void xfrm_state_gc_destroy(struct xfrm_state *x) { if (x->mode_cbs && x->mode_cbs->destroy_state) x->mode_cbs->destroy_state(x); @@ -607,6 +606,7 @@ static void ___xfrm_state_destroy(struct xfrm_state *x) kfree(x->coaddr); kfree(x->replay_esn); kfree(x->preplay_esn); + xfrm_unset_type_offload(x); if (x->type) { x->type->destructor(x); xfrm_put_type(x->type); @@ -631,7 +631,7 @@ static void xfrm_state_gc_task(struct work_struct *work) synchronize_rcu(); hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) - ___xfrm_state_destroy(x); + xfrm_state_gc_destroy(x); } static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) @@ -780,8 +780,6 @@ void xfrm_dev_state_free(struct xfrm_state *x) struct xfrm_dev_offload *xso = &x->xso; struct net_device *dev = READ_ONCE(xso->dev); - xfrm_unset_type_offload(x); - if (dev && dev->xfrmdev_ops) { spin_lock_bh(&xfrm_state_dev_gc_lock); if (!hlist_unhashed(&x->dev_gclist)) @@ -797,22 +795,18 @@ void xfrm_dev_state_free(struct xfrm_state *x) } #endif -void __xfrm_state_destroy(struct xfrm_state *x, bool sync) +void __xfrm_state_destroy(struct xfrm_state *x) { WARN_ON(x->km.state != XFRM_STATE_DEAD); - if (sync) { - synchronize_rcu(); - ___xfrm_state_destroy(x); - } else { - spin_lock_bh(&xfrm_state_gc_lock); - hlist_add_head(&x->gclist, &xfrm_state_gc_list); - spin_unlock_bh(&xfrm_state_gc_lock); - schedule_work(&xfrm_state_gc_work); - } + spin_lock_bh(&xfrm_state_gc_lock); + hlist_add_head(&x->gclist, &xfrm_state_gc_list); + spin_unlock_bh(&xfrm_state_gc_lock); + schedule_work(&xfrm_state_gc_work); } EXPORT_SYMBOL(__xfrm_state_destroy); +static void xfrm_state_delete_tunnel(struct xfrm_state *x); int __xfrm_state_delete(struct xfrm_state *x) { struct net *net = xs_net(x); @@ -840,6 +834,8 @@ int __xfrm_state_delete(struct xfrm_state *x) xfrm_dev_state_delete(x); + xfrm_state_delete_tunnel(x); + /* All xfrm_state objects are created by xfrm_state_alloc. * The xfrm_state_alloc call gives a reference, and that * is what we are dropping here. @@ -921,7 +917,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool } #endif -int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync) +int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) { int i, err = 0, cnt = 0; @@ -943,10 +939,7 @@ restart: err = xfrm_state_delete(x); xfrm_audit_state_delete(x, err ? 0 : 1, task_valid); - if (sync) - xfrm_state_put_sync(x); - else - xfrm_state_put(x); + xfrm_state_put(x); if (!err) cnt++; @@ -1307,14 +1300,8 @@ static void xfrm_hash_grow_check(struct net *net, int have_hash_collision) static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, const struct flowi *fl, unsigned short family, struct xfrm_state **best, int *acq_in_progress, - int *error) + int *error, unsigned int pcpu_id) { - /* We need the cpu id just as a lookup key, - * we don't require it to be stable. - */ - unsigned int pcpu_id = get_cpu(); - put_cpu(); - /* Resolution logic: * 1. There is a valid state with matching selector. Done. * 2. Valid state with inappropriate selector. Skip. @@ -1381,14 +1368,15 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, /* We need the cpu id just as a lookup key, * we don't require it to be stable. */ - pcpu_id = get_cpu(); - put_cpu(); + pcpu_id = raw_smp_processor_id(); to_put = NULL; sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); rcu_read_lock(); + xfrm_hash_ptrs_get(net, &state_ptrs); + hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) { if (x->props.family == encap_family && x->props.reqid == tmpl->reqid && @@ -1400,7 +1388,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) xfrm_state_look_at(pol, x, fl, encap_family, - &best, &acquire_in_progress, &error); + &best, &acquire_in_progress, &error, pcpu_id); } if (best) @@ -1417,7 +1405,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) xfrm_state_look_at(pol, x, fl, family, - &best, &acquire_in_progress, &error); + &best, &acquire_in_progress, &error, pcpu_id); } cached: @@ -1429,8 +1417,6 @@ cached: else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */ WARN_ON(1); - xfrm_hash_ptrs_get(net, &state_ptrs); - h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask); hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) { #ifdef CONFIG_XFRM_OFFLOAD @@ -1460,7 +1446,7 @@ cached: tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) xfrm_state_look_at(pol, x, fl, family, - &best, &acquire_in_progress, &error); + &best, &acquire_in_progress, &error, pcpu_id); } if (best || acquire_in_progress) goto found; @@ -1495,7 +1481,7 @@ cached: tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) xfrm_state_look_at(pol, x, fl, family, - &best, &acquire_in_progress, &error); + &best, &acquire_in_progress, &error, pcpu_id); } found: @@ -3096,20 +3082,17 @@ void xfrm_flush_gc(void) } EXPORT_SYMBOL(xfrm_flush_gc); -/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ -void xfrm_state_delete_tunnel(struct xfrm_state *x) +static void xfrm_state_delete_tunnel(struct xfrm_state *x) { if (x->tunnel) { struct xfrm_state *t = x->tunnel; - if (atomic_read(&t->tunnel_users) == 2) + if (atomic_dec_return(&t->tunnel_users) == 1) xfrm_state_delete(t); - atomic_dec(&t->tunnel_users); - xfrm_state_put_sync(t); + xfrm_state_put(t); x->tunnel = NULL; } } -EXPORT_SYMBOL(xfrm_state_delete_tunnel); u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) { @@ -3314,8 +3297,8 @@ void xfrm_state_fini(struct net *net) unsigned int sz; flush_work(&net->xfrm.state_hash_work); + xfrm_state_flush(net, IPSEC_PROTO_ANY, false); flush_work(&xfrm_state_gc_work); - xfrm_state_flush(net, 0, false, true); WARN_ON(!list_empty(&net->xfrm.state_all)); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 59f258daf830..684239018bec 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -977,6 +977,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, /* override default values from above */ xfrm_update_ae_params(x, attrs, 0); + xfrm_set_type_offload(x, attrs[XFRMA_OFFLOAD_DEV]); /* configure the hardware if offload is requested */ if (attrs[XFRMA_OFFLOAD_DEV]) { err = xfrm_dev_state_add(net, x, @@ -2634,7 +2635,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct xfrm_usersa_flush *p = nlmsg_data(nlh); int err; - err = xfrm_state_flush(net, p->proto, true, false); + err = xfrm_state_flush(net, p->proto, true); if (err) { if (err == -ESRCH) /* empty table */ return 0; diff --git a/rust/Makefile b/rust/Makefile index 27dec7904c3a..115b63b7d1e3 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -194,6 +194,7 @@ quiet_cmd_rustdoc_test = RUSTDOC T $< RUST_MODFILE=test.rs \ OBJTREE=$(abspath $(objtree)) \ $(RUSTDOC) --test $(rust_common_flags) \ + -Zcrate-attr='feature(used_with_arg)' \ @$(objtree)/include/generated/rustc_cfg \ $(rustc_target_flags) $(rustdoc_test_target_flags) \ $(rustdoc_test_quiet) \ diff --git a/rust/kernel/firmware.rs b/rust/kernel/firmware.rs index 2494c96e105f..4fe621f35716 100644 --- a/rust/kernel/firmware.rs +++ b/rust/kernel/firmware.rs @@ -202,7 +202,7 @@ macro_rules! module_firmware { }; #[link_section = ".modinfo"] - #[used] + #[used(compiler)] static __MODULE_FIRMWARE: [u8; $($builder)*::create(__MODULE_FIRMWARE_PREFIX) .build_length()] = $($builder)*::create(__MODULE_FIRMWARE_PREFIX).build(); }; diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs index 8d228c237954..21ef202ab0db 100644 --- a/rust/kernel/init.rs +++ b/rust/kernel/init.rs @@ -231,14 +231,14 @@ macro_rules! try_init { ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? { $($fields:tt)* }) => { - ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? { + ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),*>)? { $($fields)* }? $crate::error::Error) }; ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? { $($fields:tt)* }? $err:ty) => { - ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? { + ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),*>)? { $($fields)* }? $err) }; @@ -291,14 +291,14 @@ macro_rules! try_pin_init { ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? { $($fields:tt)* }) => { - ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? { + ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),*>)? { $($fields)* }? $crate::error::Error) }; ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? { $($fields:tt)* }? $err:ty) => { - ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? { + ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),*>)? { $($fields)* }? $err) }; diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs index 4b8cdcb21e77..b9e65905e121 100644 --- a/rust/kernel/kunit.rs +++ b/rust/kernel/kunit.rs @@ -302,7 +302,7 @@ macro_rules! kunit_unsafe_test_suite { is_init: false, }; - #[used] + #[used(compiler)] #[allow(unused_unsafe)] #[cfg_attr(not(target_os = "macos"), link_section = ".kunit_test_suites")] static mut KUNIT_TEST_SUITE_ENTRY: *const ::kernel::bindings::kunit_suite = diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index 6b4774b2b1c3..e13d6ed88fa6 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -34,6 +34,9 @@ // Expected to become stable. #![feature(arbitrary_self_types)] // +// To be determined. +#![feature(used_with_arg)] +// // `feature(derive_coerce_pointee)` is expected to become stable. Before Rust // 1.84.0, it did not exist, so enable the predecessor features. #![cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, feature(derive_coerce_pointee))] diff --git a/rust/macros/module.rs b/rust/macros/module.rs index 2ddd2eeb2852..75efc6eeeafc 100644 --- a/rust/macros/module.rs +++ b/rust/macros/module.rs @@ -57,7 +57,7 @@ impl<'a> ModInfoBuilder<'a> { {cfg} #[doc(hidden)] #[cfg_attr(not(target_os = \"macos\"), link_section = \".modinfo\")] - #[used] + #[used(compiler)] pub static __{module}_{counter}: [u8; {length}] = *{string}; ", cfg = if builtin { @@ -249,7 +249,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { // key or a new section. For the moment, keep it simple. #[cfg(MODULE)] #[doc(hidden)] - #[used] + #[used(compiler)] static __IS_RUST_MODULE: () = (); static mut __MOD: ::core::mem::MaybeUninit<{type_}> = @@ -273,7 +273,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { #[cfg(MODULE)] #[doc(hidden)] - #[used] + #[used(compiler)] #[link_section = \".init.data\"] static __UNIQUE_ID___addressable_init_module: unsafe extern \"C\" fn() -> i32 = init_module; @@ -293,7 +293,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { #[cfg(MODULE)] #[doc(hidden)] - #[used] + #[used(compiler)] #[link_section = \".exit.data\"] static __UNIQUE_ID___addressable_cleanup_module: extern \"C\" fn() = cleanup_module; @@ -303,7 +303,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))] #[doc(hidden)] #[link_section = \"{initcall_section}\"] - #[used] + #[used(compiler)] pub static __{ident}_initcall: extern \"C\" fn() -> ::kernel::ffi::c_int = __{ident}_init; diff --git a/scripts/Makefile.build b/scripts/Makefile.build index a6461ea411f7..ba71b27aa363 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -312,10 +312,11 @@ $(obj)/%.lst: $(obj)/%.c FORCE # - Stable since Rust 1.82.0: `feature(asm_const)`, `feature(raw_ref_op)`. # - Stable since Rust 1.87.0: `feature(asm_goto)`. # - Expected to become stable: `feature(arbitrary_self_types)`. +# - To be determined: `feature(used_with_arg)`. # # Please see https://github.com/Rust-for-Linux/linux/issues/2 for details on # the unstable features in use. -rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,raw_ref_op +rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,raw_ref_op,used_with_arg # `--out-dir` is required to avoid temporaries being created by `rustc` in the # current working directory, which may be not accessible in the out-of-tree diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index 840bb9cfe789..a66f258cafaa 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -1269,62 +1269,62 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg) stream = &data->stream; guard(mutex)(&stream->device->lock); - switch (_IOC_NR(cmd)) { - case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION): + switch (cmd) { + case SNDRV_COMPRESS_IOCTL_VERSION: return put_user(SNDRV_COMPRESS_VERSION, (int __user *)arg) ? -EFAULT : 0; - case _IOC_NR(SNDRV_COMPRESS_GET_CAPS): + case SNDRV_COMPRESS_GET_CAPS: return snd_compr_get_caps(stream, arg); #ifndef COMPR_CODEC_CAPS_OVERFLOW - case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS): + case SNDRV_COMPRESS_GET_CODEC_CAPS: return snd_compr_get_codec_caps(stream, arg); #endif - case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS): + case SNDRV_COMPRESS_SET_PARAMS: return snd_compr_set_params(stream, arg); - case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS): + case SNDRV_COMPRESS_GET_PARAMS: return snd_compr_get_params(stream, arg); - case _IOC_NR(SNDRV_COMPRESS_SET_METADATA): + case SNDRV_COMPRESS_SET_METADATA: return snd_compr_set_metadata(stream, arg); - case _IOC_NR(SNDRV_COMPRESS_GET_METADATA): + case SNDRV_COMPRESS_GET_METADATA: return snd_compr_get_metadata(stream, arg); } if (stream->direction == SND_COMPRESS_ACCEL) { #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL) - switch (_IOC_NR(cmd)) { - case _IOC_NR(SNDRV_COMPRESS_TASK_CREATE): + switch (cmd) { + case SNDRV_COMPRESS_TASK_CREATE: return snd_compr_task_create(stream, arg); - case _IOC_NR(SNDRV_COMPRESS_TASK_FREE): + case SNDRV_COMPRESS_TASK_FREE: return snd_compr_task_seq(stream, arg, snd_compr_task_free_one); - case _IOC_NR(SNDRV_COMPRESS_TASK_START): + case SNDRV_COMPRESS_TASK_START: return snd_compr_task_start_ioctl(stream, arg); - case _IOC_NR(SNDRV_COMPRESS_TASK_STOP): + case SNDRV_COMPRESS_TASK_STOP: return snd_compr_task_seq(stream, arg, snd_compr_task_stop_one); - case _IOC_NR(SNDRV_COMPRESS_TASK_STATUS): + case SNDRV_COMPRESS_TASK_STATUS: return snd_compr_task_status_ioctl(stream, arg); } #endif return -ENOTTY; } - switch (_IOC_NR(cmd)) { - case _IOC_NR(SNDRV_COMPRESS_TSTAMP): + switch (cmd) { + case SNDRV_COMPRESS_TSTAMP: return snd_compr_tstamp(stream, arg); - case _IOC_NR(SNDRV_COMPRESS_AVAIL): + case SNDRV_COMPRESS_AVAIL: return snd_compr_ioctl_avail(stream, arg); - case _IOC_NR(SNDRV_COMPRESS_PAUSE): + case SNDRV_COMPRESS_PAUSE: return snd_compr_pause(stream); - case _IOC_NR(SNDRV_COMPRESS_RESUME): + case SNDRV_COMPRESS_RESUME: return snd_compr_resume(stream); - case _IOC_NR(SNDRV_COMPRESS_START): + case SNDRV_COMPRESS_START: return snd_compr_start(stream); - case _IOC_NR(SNDRV_COMPRESS_STOP): + case SNDRV_COMPRESS_STOP: return snd_compr_stop(stream); - case _IOC_NR(SNDRV_COMPRESS_DRAIN): + case SNDRV_COMPRESS_DRAIN: return snd_compr_drain(stream); - case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN): + case SNDRV_COMPRESS_PARTIAL_DRAIN: return snd_compr_partial_drain(stream); - case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK): + case SNDRV_COMPRESS_NEXT_TRACK: return snd_compr_next_track(stream); } diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c index 3f2fd32f4ad9..886c53184fec 100644 --- a/sound/pci/hda/cs35l56_hda.c +++ b/sound/pci/hda/cs35l56_hda.c @@ -873,6 +873,52 @@ static int cs35l56_hda_system_resume(struct device *dev) return 0; } +static int cs35l56_hda_fixup_yoga9(struct cs35l56_hda *cs35l56, int *bus_addr) +{ + /* The cirrus,dev-index property has the wrong values */ + switch (*bus_addr) { + case 0x30: + cs35l56->index = 1; + return 0; + case 0x31: + cs35l56->index = 0; + return 0; + default: + /* There is a pseudo-address for broadcast to both amps - ignore it */ + dev_dbg(cs35l56->base.dev, "Ignoring I2C address %#x\n", *bus_addr); + return 0; + } +} + +static const struct { + const char *sub; + int (*fixup_fn)(struct cs35l56_hda *cs35l56, int *bus_addr); +} cs35l56_hda_fixups[] = { + { + .sub = "17AA390B", /* Lenovo Yoga Book 9i GenX */ + .fixup_fn = cs35l56_hda_fixup_yoga9, + }, +}; + +static int cs35l56_hda_apply_platform_fixups(struct cs35l56_hda *cs35l56, const char *sub, + int *bus_addr) +{ + int i; + + if (IS_ERR(sub)) + return 0; + + for (i = 0; i < ARRAY_SIZE(cs35l56_hda_fixups); i++) { + if (strcasecmp(cs35l56_hda_fixups[i].sub, sub) == 0) { + dev_dbg(cs35l56->base.dev, "Applying fixup for %s\n", + cs35l56_hda_fixups[i].sub); + return (cs35l56_hda_fixups[i].fixup_fn)(cs35l56, bus_addr); + } + } + + return 0; +} + static int cs35l56_hda_read_acpi(struct cs35l56_hda *cs35l56, int hid, int id) { u32 values[HDA_MAX_COMPONENTS]; @@ -897,39 +943,47 @@ static int cs35l56_hda_read_acpi(struct cs35l56_hda *cs35l56, int hid, int id) ACPI_COMPANION_SET(cs35l56->base.dev, adev); } - property = "cirrus,dev-index"; - ret = device_property_count_u32(cs35l56->base.dev, property); - if (ret <= 0) - goto err; - - if (ret > ARRAY_SIZE(values)) { - ret = -EINVAL; - goto err; - } - nval = ret; - - ret = device_property_read_u32_array(cs35l56->base.dev, property, values, nval); - if (ret) - goto err; - + /* Initialize things that could be overwritten by a fixup */ cs35l56->index = -1; - for (i = 0; i < nval; i++) { - if (values[i] == id) { - cs35l56->index = i; - break; - } - } - /* - * It's not an error for the ID to be missing: for I2C there can be - * an alias address that is not a real device. So reject silently. - */ - if (cs35l56->index == -1) { - dev_dbg(cs35l56->base.dev, "No index found in %s\n", property); - ret = -ENODEV; - goto err; - } sub = acpi_get_subsystem_id(ACPI_HANDLE(cs35l56->base.dev)); + ret = cs35l56_hda_apply_platform_fixups(cs35l56, sub, &id); + if (ret) + return ret; + + if (cs35l56->index == -1) { + property = "cirrus,dev-index"; + ret = device_property_count_u32(cs35l56->base.dev, property); + if (ret <= 0) + goto err; + + if (ret > ARRAY_SIZE(values)) { + ret = -EINVAL; + goto err; + } + nval = ret; + + ret = device_property_read_u32_array(cs35l56->base.dev, property, values, nval); + if (ret) + goto err; + + for (i = 0; i < nval; i++) { + if (values[i] == id) { + cs35l56->index = i; + break; + } + } + + /* + * It's not an error for the ID to be missing: for I2C there can be + * an alias address that is not a real device. So reject silently. + */ + if (cs35l56->index == -1) { + dev_dbg(cs35l56->base.dev, "No index found in %s\n", property); + ret = -ENODEV; + goto err; + } + } if (IS_ERR(sub)) { dev_info(cs35l56->base.dev, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 060db37eab83..d91aac06adde 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -7497,6 +7497,9 @@ static void alc287_fixup_yoga9_14iap7_bass_spk_pin(struct hda_codec *codec, }; struct alc_spec *spec = codec->spec; + /* Support Audio mute LED and Mic mute LED on keyboard */ + hda_fixup_ideapad_acpi(codec, fix, action); + switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_apply_pincfgs(codec, pincfgs); @@ -10814,6 +10817,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8bbe, "HP Victus 16-r0xxx (MB 8BBE)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8bc8, "HP Victus 15-fa1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), @@ -11006,6 +11010,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x1a8e, "ASUS G712LWS", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B), SND_PCI_QUIRK(0x1043, 0x1b13, "ASUS U41SV/GA403U", ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC), diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index 97e340140d0c..f210a253da9f 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -416,6 +416,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "M6501RM"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -535,6 +542,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "Victus by HP Gaming Laptop 15-fb1xxx"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -584,6 +598,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_BOARD_NAME, "8A7F"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + DMI_MATCH(DMI_BOARD_NAME, "8A81"), + } + }, { .driver_data = &acp6x_card, .matches = { diff --git a/sound/soc/codecs/rt5660.c b/sound/soc/codecs/rt5660.c index 82b92e83be4c..44c3a3b92f98 100644 --- a/sound/soc/codecs/rt5660.c +++ b/sound/soc/codecs/rt5660.c @@ -1315,14 +1315,17 @@ static int rt5660_i2c_probe(struct i2c_client *i2c) regmap_update_bits(rt5660->regmap, RT5660_GPIO_CTRL1, RT5660_GP1_PIN_MASK, RT5660_GP1_PIN_DMIC1_SCL); - if (rt5660->pdata.dmic1_data_pin == RT5660_DMIC1_DATA_GPIO2) + if (rt5660->pdata.dmic1_data_pin == RT5660_DMIC1_DATA_GPIO2) { regmap_update_bits(rt5660->regmap, RT5660_DMIC_CTRL1, RT5660_SEL_DMIC_DATA_MASK, RT5660_SEL_DMIC_DATA_GPIO2); - else if (rt5660->pdata.dmic1_data_pin == RT5660_DMIC1_DATA_IN1P) + regmap_update_bits(rt5660->regmap, RT5660_GPIO_CTRL1, + RT5660_GP2_PIN_MASK, RT5660_GP2_PIN_DMIC1_SDA); + } else if (rt5660->pdata.dmic1_data_pin == RT5660_DMIC1_DATA_IN1P) { regmap_update_bits(rt5660->regmap, RT5660_DMIC_CTRL1, RT5660_SEL_DMIC_DATA_MASK, RT5660_SEL_DMIC_DATA_IN1P); + } } return devm_snd_soc_register_component(&i2c->dev, diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c index ccf90428126d..0efe490024b0 100644 --- a/sound/soc/intel/avs/pcm.c +++ b/sound/soc/intel/avs/pcm.c @@ -1570,11 +1570,13 @@ static void avs_component_hda_unregister_dais(struct snd_soc_component *componen { struct snd_soc_acpi_mach *mach; struct snd_soc_dai *dai, *save; + struct avs_mach_pdata *pdata; struct hda_codec *codec; char name[32]; mach = dev_get_platdata(component->card->dev); - codec = mach->pdata; + pdata = mach->pdata; + codec = pdata->codec; snprintf(name, sizeof(name), "%s-cpu", dev_name(&codec->core.dev)); for_each_component_dais_safe(component, dai, save) { diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig index 128b6876af83..c23fdb6aad4c 100644 --- a/sound/soc/intel/boards/Kconfig +++ b/sound/soc/intel/boards/Kconfig @@ -11,7 +11,7 @@ menuconfig SND_SOC_INTEL_MACH kernel: saying N will just cause the configurator to skip all the questions about Intel ASoC machine drivers. -if SND_SOC_INTEL_MACH +if SND_SOC_INTEL_MACH && (SND_SOC_SOF_INTEL_COMMON || !SND_SOC_SOF_INTEL_COMMON) config SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES bool "Use more user friendly long card names" diff --git a/sound/soc/intel/common/soc-acpi-intel-arl-match.c b/sound/soc/intel/common/soc-acpi-intel-arl-match.c index 1ad704ca2c5f..6bf7a6250ddc 100644 --- a/sound/soc/intel/common/soc-acpi-intel-arl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-arl-match.c @@ -238,6 +238,15 @@ static const struct snd_soc_acpi_adr_device rt722_0_single_adr[] = { } }; +static const struct snd_soc_acpi_adr_device rt1316_3_single_adr[] = { + { + .adr = 0x000330025D131601ull, + .num_endpoints = 1, + .endpoints = &single_endpoint, + .name_prefix = "rt1316-1" + } +}; + static const struct snd_soc_acpi_adr_device rt1320_2_single_adr[] = { { .adr = 0x000230025D132001ull, @@ -368,6 +377,20 @@ static const struct snd_soc_acpi_link_adr arl_sdca_rvp[] = { {} }; +static const struct snd_soc_acpi_link_adr arl_rt711_l0_rt1316_l3[] = { + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(rt711_sdca_0_adr), + .adr_d = rt711_sdca_0_adr, + }, + { + .mask = BIT(3), + .num_adr = ARRAY_SIZE(rt1316_3_single_adr), + .adr_d = rt1316_3_single_adr, + }, + {} +}; + static const struct snd_soc_acpi_link_adr arl_rt722_l0_rt1320_l2[] = { { .mask = BIT(0), @@ -481,6 +504,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = { .sof_tplg_filename = "sof-arl-cs42l43-l2.tplg", .get_function_tplg_files = sof_sdw_get_tplg_files, }, + { + .link_mask = BIT(0) | BIT(3), + .links = arl_rt711_l0_rt1316_l3, + .drv_name = "sof_sdw", + .sof_tplg_filename = "sof-arl-rt711-l0-rt1316-l3.tplg", + }, { .link_mask = 0x1, /* link0 required */ .links = arl_rvp, diff --git a/tools/hv/hv_fcopy_uio_daemon.c b/tools/hv/hv_fcopy_uio_daemon.c index 0198321d14a2..92e8307b2a46 100644 --- a/tools/hv/hv_fcopy_uio_daemon.c +++ b/tools/hv/hv_fcopy_uio_daemon.c @@ -35,7 +35,10 @@ #define WIN8_SRV_MINOR 1 #define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) -#define FCOPY_UIO "/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/uio" +#define FCOPY_DEVICE_PATH(subdir) \ + "/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/" #subdir +#define FCOPY_UIO_PATH FCOPY_DEVICE_PATH(uio) +#define FCOPY_CHANNELS_PATH FCOPY_DEVICE_PATH(channels) #define FCOPY_VER_COUNT 1 static const int fcopy_versions[] = { @@ -47,9 +50,62 @@ static const int fw_versions[] = { UTIL_FW_VERSION }; -#define HV_RING_SIZE 0x4000 /* 16KB ring buffer size */ +static uint32_t get_ring_buffer_size(void) +{ + char ring_path[PATH_MAX]; + DIR *dir; + struct dirent *entry; + struct stat st; + uint32_t ring_size = 0; + int retry_count = 0; -static unsigned char desc[HV_RING_SIZE]; + /* Find the channel directory */ + dir = opendir(FCOPY_CHANNELS_PATH); + if (!dir) { + usleep(100 * 1000); /* Avoid race with kernel, wait 100ms and retry once */ + dir = opendir(FCOPY_CHANNELS_PATH); + if (!dir) { + syslog(LOG_ERR, "Failed to open channels directory: %s", strerror(errno)); + return 0; + } + } + +retry_once: + while ((entry = readdir(dir)) != NULL) { + if (entry->d_type == DT_DIR && strcmp(entry->d_name, ".") != 0 && + strcmp(entry->d_name, "..") != 0) { + snprintf(ring_path, sizeof(ring_path), "%s/%s/ring", + FCOPY_CHANNELS_PATH, entry->d_name); + + if (stat(ring_path, &st) == 0) { + /* + * stat returns size of Tx, Rx rings combined, + * so take half of it for individual ring size. + */ + ring_size = (uint32_t)st.st_size / 2; + syslog(LOG_INFO, "Ring buffer size from %s: %u bytes", + ring_path, ring_size); + break; + } + } + } + + if (!ring_size && retry_count == 0) { + retry_count = 1; + rewinddir(dir); + usleep(100 * 1000); /* Wait 100ms and retry once */ + goto retry_once; + } + + closedir(dir); + + if (!ring_size) + syslog(LOG_ERR, "Could not determine ring size"); + + return ring_size; +} + +static unsigned char *desc; static int target_fd; static char target_fname[PATH_MAX]; @@ -62,8 +118,11 @@ static int hv_fcopy_create_file(char *file_name, char *path_name, __u32 flags) filesize = 0; p = path_name; - snprintf(target_fname, sizeof(target_fname), "%s/%s", - path_name, file_name); + if (snprintf(target_fname, sizeof(target_fname), "%s/%s", + path_name, file_name) >= sizeof(target_fname)) { + syslog(LOG_ERR, "target file name is too long: %s/%s", path_name, file_name); + goto done; + } /* * Check to see if the path is already in place; if not, @@ -270,7 +329,7 @@ static void wcstoutf8(char *dest, const __u16 *src, size_t dest_size) { size_t len = 0; - while (len < dest_size) { + while (len < dest_size && *src) { if (src[len] < 0x80) dest[len++] = (char)(*src++); else @@ -282,27 +341,15 @@ static void wcstoutf8(char *dest, const __u16 *src, size_t dest_size) static int hv_fcopy_start(struct hv_start_fcopy *smsg_in) { + /* + * file_name and path_name should have same length with appropriate + * member of hv_start_fcopy. + */ + char file_name[W_MAX_PATH], path_name[W_MAX_PATH]; + setlocale(LC_ALL, "en_US.utf8"); - size_t file_size, path_size; - char *file_name, *path_name; - char *in_file_name = (char *)smsg_in->file_name; - char *in_path_name = (char *)smsg_in->path_name; - - file_size = wcstombs(NULL, (const wchar_t *restrict)in_file_name, 0) + 1; - path_size = wcstombs(NULL, (const wchar_t *restrict)in_path_name, 0) + 1; - - file_name = (char *)malloc(file_size * sizeof(char)); - path_name = (char *)malloc(path_size * sizeof(char)); - - if (!file_name || !path_name) { - free(file_name); - free(path_name); - syslog(LOG_ERR, "Can't allocate memory for file name and/or path name"); - return HV_E_FAIL; - } - - wcstoutf8(file_name, (__u16 *)in_file_name, file_size); - wcstoutf8(path_name, (__u16 *)in_path_name, path_size); + wcstoutf8(file_name, smsg_in->file_name, W_MAX_PATH - 1); + wcstoutf8(path_name, smsg_in->path_name, W_MAX_PATH - 1); return hv_fcopy_create_file(file_name, path_name, smsg_in->copy_flags); } @@ -406,7 +453,7 @@ int main(int argc, char *argv[]) int daemonize = 1, long_index = 0, opt, ret = -EINVAL; struct vmbus_br txbr, rxbr; void *ring; - uint32_t len = HV_RING_SIZE; + uint32_t ring_size, len; char uio_name[NAME_MAX] = {0}; char uio_dev_path[PATH_MAX] = {0}; @@ -437,7 +484,20 @@ int main(int argc, char *argv[]) openlog("HV_UIO_FCOPY", 0, LOG_USER); syslog(LOG_INFO, "starting; pid is:%d", getpid()); - fcopy_get_first_folder(FCOPY_UIO, uio_name); + ring_size = get_ring_buffer_size(); + if (!ring_size) { + ret = -ENODEV; + goto exit; + } + + desc = malloc(ring_size * sizeof(unsigned char)); + if (!desc) { + syslog(LOG_ERR, "malloc failed for desc buffer"); + ret = -ENOMEM; + goto exit; + } + + fcopy_get_first_folder(FCOPY_UIO_PATH, uio_name); snprintf(uio_dev_path, sizeof(uio_dev_path), "/dev/%s", uio_name); fcopy_fd = open(uio_dev_path, O_RDWR); @@ -445,17 +505,17 @@ int main(int argc, char *argv[]) syslog(LOG_ERR, "open %s failed; error: %d %s", uio_dev_path, errno, strerror(errno)); ret = fcopy_fd; - goto exit; + goto free_desc; } - ring = vmbus_uio_map(&fcopy_fd, HV_RING_SIZE); + ring = vmbus_uio_map(&fcopy_fd, ring_size); if (!ring) { ret = errno; syslog(LOG_ERR, "mmap ringbuffer failed; error: %d %s", ret, strerror(ret)); goto close; } - vmbus_br_setup(&txbr, ring, HV_RING_SIZE); - vmbus_br_setup(&rxbr, (char *)ring + HV_RING_SIZE, HV_RING_SIZE); + vmbus_br_setup(&txbr, ring, ring_size); + vmbus_br_setup(&rxbr, (char *)ring + ring_size, ring_size); rxbr.vbr->imask = 0; @@ -472,7 +532,7 @@ int main(int argc, char *argv[]) goto close; } - len = HV_RING_SIZE; + len = ring_size; ret = rte_vmbus_chan_recv_raw(&rxbr, desc, &len); if (unlikely(ret <= 0)) { /* This indicates a failure to communicate (or worse) */ @@ -492,6 +552,8 @@ int main(int argc, char *argv[]) } close: close(fcopy_fd); +free_desc: + free(desc); exit: return ret; } diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 52e353368f58..d41ee26b9443 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -735,7 +735,7 @@ struct bpf_object { struct usdt_manager *usdt_man; - struct bpf_map *arena_map; + int arena_map_idx; void *arena_data; size_t arena_data_sz; @@ -1517,6 +1517,7 @@ static struct bpf_object *bpf_object__new(const char *path, obj->efile.obj_buf_sz = obj_buf_sz; obj->efile.btf_maps_shndx = -1; obj->kconfig_map_idx = -1; + obj->arena_map_idx = -1; obj->kern_version = get_kernel_version(); obj->state = OBJ_OPEN; @@ -2964,7 +2965,7 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map, const long page_sz = sysconf(_SC_PAGE_SIZE); size_t mmap_sz; - mmap_sz = bpf_map_mmap_sz(obj->arena_map); + mmap_sz = bpf_map_mmap_sz(map); if (roundup(data_sz, page_sz) > mmap_sz) { pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n", sec_name, mmap_sz, data_sz); @@ -3038,12 +3039,12 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, if (map->def.type != BPF_MAP_TYPE_ARENA) continue; - if (obj->arena_map) { + if (obj->arena_map_idx >= 0) { pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n", - map->name, obj->arena_map->name); + map->name, obj->maps[obj->arena_map_idx].name); return -EINVAL; } - obj->arena_map = map; + obj->arena_map_idx = i; if (obj->efile.arena_data) { err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx, @@ -3053,7 +3054,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, return err; } } - if (obj->efile.arena_data && !obj->arena_map) { + if (obj->efile.arena_data && obj->arena_map_idx < 0) { pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n", ARENA_SEC); return -ENOENT; @@ -4583,8 +4584,13 @@ static int bpf_program__record_reloc(struct bpf_program *prog, if (shdr_idx == obj->efile.arena_data_shndx) { reloc_desc->type = RELO_DATA; reloc_desc->insn_idx = insn_idx; - reloc_desc->map_idx = obj->arena_map - obj->maps; + reloc_desc->map_idx = obj->arena_map_idx; reloc_desc->sym_off = sym->st_value; + + map = &obj->maps[obj->arena_map_idx]; + pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n", + prog->name, obj->arena_map_idx, map->name, map->sec_idx, + map->sec_offset, insn_idx); return 0; } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index d967ac001498..67d76f3a1dce 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -224,6 +224,7 @@ static bool is_rust_noreturn(const struct symbol *func) str_ends_with(func->name, "_4core9panicking14panic_explicit") || str_ends_with(func->name, "_4core9panicking14panic_nounwind") || str_ends_with(func->name, "_4core9panicking18panic_bounds_check") || + str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt") || str_ends_with(func->name, "_4core9panicking19assert_failed_inner") || str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") || str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") || diff --git a/tools/testing/selftests/bpf/prog_tests/recursive_attach.c b/tools/testing/selftests/bpf/prog_tests/recursive_attach.c index 8100509e561b..0ffa01d54ce2 100644 --- a/tools/testing/selftests/bpf/prog_tests/recursive_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/recursive_attach.c @@ -149,3 +149,70 @@ close_prog: fentry_recursive_target__destroy(target_skel); fentry_recursive__destroy(tracing_skel); } + +static void *fentry_target_test_run(void *arg) +{ + for (;;) { + int prog_fd = __atomic_load_n((int *)arg, __ATOMIC_SEQ_CST); + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err; + + if (prog_fd == -1) + break; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "fentry_target test_run")) + break; + } + + return NULL; +} + +void test_fentry_attach_stress(void) +{ + struct fentry_recursive_target *target_skel = NULL; + struct fentry_recursive *tracing_skel = NULL; + struct bpf_program *prog; + int err, i, tgt_prog_fd; + pthread_t thread; + + target_skel = fentry_recursive_target__open_and_load(); + if (!ASSERT_OK_PTR(target_skel, + "fentry_recursive_target__open_and_load")) + goto close_prog; + tgt_prog_fd = bpf_program__fd(target_skel->progs.fentry_target); + err = pthread_create(&thread, NULL, + fentry_target_test_run, &tgt_prog_fd); + if (!ASSERT_OK(err, "bpf_program__set_attach_target")) + goto close_prog; + + for (i = 0; i < 1000; i++) { + tracing_skel = fentry_recursive__open(); + if (!ASSERT_OK_PTR(tracing_skel, "fentry_recursive__open")) + goto stop_thread; + + prog = tracing_skel->progs.recursive_attach; + err = bpf_program__set_attach_target(prog, tgt_prog_fd, + "fentry_target"); + if (!ASSERT_OK(err, "bpf_program__set_attach_target")) + goto stop_thread; + + err = fentry_recursive__load(tracing_skel); + if (!ASSERT_OK(err, "fentry_recursive__load")) + goto stop_thread; + + err = fentry_recursive__attach(tracing_skel); + if (!ASSERT_OK(err, "fentry_recursive__attach")) + goto stop_thread; + + fentry_recursive__destroy(tracing_skel); + tracing_skel = NULL; + } + +stop_thread: + __atomic_store_n(&tgt_prog_fd, -1, __ATOMIC_SEQ_CST); + err = pthread_join(thread, NULL); + ASSERT_OK(err, "pthread_join"); +close_prog: + fentry_recursive__destroy(tracing_skel); + fentry_recursive_target__destroy(target_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c index 4be6fdb78c6a..594441acb707 100644 --- a/tools/testing/selftests/bpf/prog_tests/snprintf.c +++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c @@ -116,6 +116,8 @@ static void test_snprintf_negative(void) ASSERT_ERR(load_single_snprintf("%llc"), "invalid specifier 7"); ASSERT_ERR(load_single_snprintf("\x80"), "non ascii character"); ASSERT_ERR(load_single_snprintf("\x1"), "non printable character"); + ASSERT_ERR(load_single_snprintf("%p%"), "invalid specifier 8"); + ASSERT_ERR(load_single_snprintf("%s%"), "invalid specifier 9"); } void test_snprintf(void) diff --git a/tools/testing/selftests/drivers/net/lib/py/load.py b/tools/testing/selftests/drivers/net/lib/py/load.py index d9c10613ae67..44151b7b1a24 100644 --- a/tools/testing/selftests/drivers/net/lib/py/load.py +++ b/tools/testing/selftests/drivers/net/lib/py/load.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 +import re import time from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen @@ -10,12 +11,11 @@ class GenerateTraffic: self.env = env - if port is None: - port = rand_port() - self._iperf_server = cmd(f"iperf3 -s -1 -p {port}", background=True) - wait_port_listen(port) + self.port = rand_port() if port is None else port + self._iperf_server = cmd(f"iperf3 -s -1 -p {self.port}", background=True) + wait_port_listen(self.port) time.sleep(0.1) - self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {port} -t 86400", + self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {self.port} -t 86400", background=True, host=env.remote) # Wait for traffic to ramp up @@ -56,3 +56,16 @@ class GenerateTraffic: ksft_pr(">> Server:") ksft_pr(self._iperf_server.stdout) ksft_pr(self._iperf_server.stderr) + self._wait_client_stopped() + + def _wait_client_stopped(self, sleep=0.005, timeout=5): + end = time.monotonic() + timeout + + live_port_pattern = re.compile(fr":{self.port:04X} 0[^6] ") + + while time.monotonic() < end: + data = cmd("cat /proc/net/tcp*", host=self.env.remote).stdout + if not live_port_pattern.search(data): + return + time.sleep(sleep) + raise Exception(f"Waiting for client to stop timed out after {timeout}s") diff --git a/tools/testing/selftests/futex/include/futex2test.h b/tools/testing/selftests/futex/include/futex2test.h index ea79662405bc..1f625b39948a 100644 --- a/tools/testing/selftests/futex/include/futex2test.h +++ b/tools/testing/selftests/futex/include/futex2test.h @@ -4,6 +4,7 @@ * * Copyright 2021 Collabora Ltd. */ +#include #include #define u64_to_ptr(x) ((void *)(uintptr_t)(x)) @@ -65,7 +66,12 @@ struct futex32_numa { static inline int futex_waitv(volatile struct futex_waitv *waiters, unsigned long nr_waiters, unsigned long flags, struct timespec *timo, clockid_t clockid) { - return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, timo, clockid); + struct __kernel_timespec ts = { + .tv_sec = timo->tv_sec, + .tv_nsec = timo->tv_nsec, + }; + + return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, &ts, clockid); } /* diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile index e47788bfa671..4c7e51336ab2 100644 --- a/tools/testing/selftests/net/mptcp/Makefile +++ b/tools/testing/selftests/net/mptcp/Makefile @@ -4,7 +4,8 @@ top_srcdir = ../../../../.. CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES) -TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \ +TEST_PROGS := mptcp_connect.sh mptcp_connect_mmap.sh mptcp_connect_sendfile.sh \ + mptcp_connect_checksum.sh pm_netlink.sh mptcp_join.sh diag.sh \ simult_flows.sh mptcp_sockopt.sh userspace_pm.sh TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq mptcp_diag diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh new file mode 100755 index 000000000000..ce93ec2f107f --- /dev/null +++ b/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \ + "$(dirname "${0}")/mptcp_connect.sh" -C "${@}" diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh new file mode 100755 index 000000000000..5dd30f9394af --- /dev/null +++ b/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \ + "$(dirname "${0}")/mptcp_connect.sh" -m mmap "${@}" diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh new file mode 100755 index 000000000000..1d16fb1cc9bb --- /dev/null +++ b/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \ + "$(dirname "${0}")/mptcp_connect.sh" -m sendfile "${@}" diff --git a/tools/testing/selftests/net/netfilter/conntrack_clash.sh b/tools/testing/selftests/net/netfilter/conntrack_clash.sh index 3712c1b9b38b..606a43a60f73 100755 --- a/tools/testing/selftests/net/netfilter/conntrack_clash.sh +++ b/tools/testing/selftests/net/netfilter/conntrack_clash.sh @@ -93,32 +93,28 @@ ping_test() run_one_clash_test() { local ns="$1" - local daddr="$2" - local dport="$3" + local ctns="$2" + local daddr="$3" + local dport="$4" local entries local cre if ! ip netns exec "$ns" ./udpclash $daddr $dport;then - echo "FAIL: did not receive expected number of replies for $daddr:$dport" - ret=1 - return 1 + echo "INFO: did not receive expected number of replies for $daddr:$dport" + ip netns exec "$ctns" conntrack -S + # don't fail: check if clash resolution triggered after all. fi - entries=$(conntrack -S | wc -l) - cre=$(conntrack -S | grep -v "clash_resolve=0" | wc -l) + entries=$(ip netns exec "$ctns" conntrack -S | wc -l) + cre=$(ip netns exec "$ctns" conntrack -S | grep "clash_resolve=0" | wc -l) - if [ "$cre" -ne "$entries" ] ;then + if [ "$cre" -ne "$entries" ];then clash_resolution_active=1 return 0 fi - # 1 cpu -> parallel insertion impossible - if [ "$entries" -eq 1 ]; then - return 0 - fi - - # not a failure: clash resolution logic did not trigger, but all replies - # were received. With right timing, xmit completed sequentially and + # not a failure: clash resolution logic did not trigger. + # With right timing, xmit completed sequentially and # no parallel insertion occurs. return $ksft_skip } @@ -126,20 +122,23 @@ run_one_clash_test() run_clash_test() { local ns="$1" - local daddr="$2" - local dport="$3" + local ctns="$2" + local daddr="$3" + local dport="$4" + local softerr=0 for i in $(seq 1 10);do - run_one_clash_test "$ns" "$daddr" "$dport" + run_one_clash_test "$ns" "$ctns" "$daddr" "$dport" local rv=$? if [ $rv -eq 0 ];then echo "PASS: clash resolution test for $daddr:$dport on attempt $i" return 0 - elif [ $rv -eq 1 ];then - echo "FAIL: clash resolution test for $daddr:$dport on attempt $i" - return 1 + elif [ $rv -eq $ksft_skip ]; then + softerr=1 fi done + + [ $softerr -eq 1 ] && echo "SKIP: clash resolution for $daddr:$dport did not trigger" } ip link add veth0 netns "$nsclient1" type veth peer name veth0 netns "$nsrouter" @@ -161,11 +160,11 @@ spawn_servers "$nsclient2" # exercise clash resolution with nat: # nsrouter is supposed to dnat to 10.0.2.1:900{0,1,2,3}. -run_clash_test "$nsclient1" 10.0.1.99 "$dport" +run_clash_test "$nsclient1" "$nsrouter" 10.0.1.99 "$dport" # exercise clash resolution without nat. load_simple_ruleset "$nsclient2" -run_clash_test "$nsclient2" 127.0.0.1 9001 +run_clash_test "$nsclient2" "$nsclient2" 127.0.0.1 9001 if [ $clash_resolution_active -eq 0 ];then [ "$ret" -eq 0 ] && ret=$ksft_skip diff --git a/tools/testing/selftests/sched_ext/exit.c b/tools/testing/selftests/sched_ext/exit.c index 9451782689de..ee25824b1cbe 100644 --- a/tools/testing/selftests/sched_ext/exit.c +++ b/tools/testing/selftests/sched_ext/exit.c @@ -22,6 +22,14 @@ static enum scx_test_status run(void *ctx) struct bpf_link *link; char buf[16]; + /* + * On single-CPU systems, ops.select_cpu() is never + * invoked, so skip this test to avoid getting stuck + * indefinitely. + */ + if (tc == EXIT_SELECT_CPU && libbpf_num_possible_cpus() == 1) + continue; + skel = exit__open(); SCX_ENUM_INIT(skel); skel->rodata->exit_point = tc;