Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-6.16-rc5).

No conflicts.

No adjacent changes.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni 2025-07-04 08:03:18 +02:00
commit 6b9fd8857b
299 changed files with 3353 additions and 2033 deletions

View file

@ -223,6 +223,8 @@ Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
Domen Puncer <domen@coderock.org>
Douglas Gilbert <dougg@torque.net>
Drew Fustini <fustini@kernel.org> <drew@pdp7.com>
<duje@dujemihanovic.xyz> <duje.mihanovic@skole.hr>
Ed L. Cashin <ecashin@coraid.com>
Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
@ -830,3 +832,6 @@ Yosry Ahmed <yosry.ahmed@linux.dev> <yosryahmed@google.com>
Yusuke Goda <goda.yusuke@renesas.com>
Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com>
Zhu Yanjun <zyjzyj2000@gmail.com> <yanjunz@nvidia.com>
Zijun Hu <zijun.hu@oss.qualcomm.com> <quic_zijuhu@quicinc.com>
Zijun Hu <zijun.hu@oss.qualcomm.com> <zijuhu@codeaurora.org>
Zijun Hu <zijun_hu@htc.com>

View file

@ -49,6 +49,12 @@ Description:
(RO) Supported minimum scrub cycle duration in seconds
by the memory scrubber.
Device-based scrub: returns the minimum scrub cycle
supported by the memory device.
Region-based scrub: returns the max of minimum scrub cycles
supported by individual memory devices that back the region.
What: /sys/bus/edac/devices/<dev-name>/scrubX/max_cycle_duration
Date: March 2025
KernelVersion: 6.15
@ -57,6 +63,16 @@ Description:
(RO) Supported maximum scrub cycle duration in seconds
by the memory scrubber.
Device-based scrub: returns the maximum scrub cycle supported
by the memory device.
Region-based scrub: returns the min of maximum scrub cycles
supported by individual memory devices that back the region.
If the memory device does not provide maximum scrub cycle
information, return the maximum supported value of the scrub
cycle field.
What: /sys/bus/edac/devices/<dev-name>/scrubX/current_cycle_duration
Date: March 2025
KernelVersion: 6.15

View file

@ -118,15 +118,11 @@ $defs:
ti,lvds-vod-swing-clock-microvolt:
description: LVDS diferential output voltage <min max> for clock
lanes in microvolts.
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
ti,lvds-vod-swing-data-microvolt:
description: LVDS diferential output voltage <min max> for data
lanes in microvolts.
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
allOf:

View file

@ -80,6 +80,8 @@ examples:
interrupt-parent = <&intc>;
interrupts = <296 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
resets = <&rst 30>;
reset-names = "stmmaceth";
snps,multicast-filter-bins = <0>;
@ -91,7 +93,6 @@ examples:
snps,mtl-rx-config = <&gmac0_mtl_rx_setup>;
snps,mtl-tx-config = <&gmac0_mtl_tx_setup>;
snps,axi-config = <&gmac0_stmmac_axi_setup>;
status = "disabled";
gmac0_mtl_rx_setup: rx-queues-config {
snps,rx-queues-to-use = <8>;

View file

@ -45,7 +45,7 @@ allOf:
- ns16550
- ns16550a
then:
anyOf:
oneOf:
- required: [ clock-frequency ]
- required: [ clocks ]

View file

@ -1,5 +0,0 @@
Altera JTAG UART
Required properties:
- compatible : should be "ALTR,juart-1.0" <DEPRECATED>
- compatible : should be "altr,juart-1.0"

View file

@ -1,8 +0,0 @@
Altera UART
Required properties:
- compatible : should be "ALTR,uart-1.0" <DEPRECATED>
- compatible : should be "altr,uart-1.0"
Optional properties:
- clock-frequency : frequency of the clock input to the UART

View file

@ -0,0 +1,19 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/serial/altr,juart-1.0.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Altera JTAG UART
maintainers:
- Dinh Nguyen <dinguyen@kernel.org>
properties:
compatible:
const: altr,juart-1.0
required:
- compatible
additionalProperties: false

View file

@ -0,0 +1,25 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/serial/altr,uart-1.0.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Altera UART
maintainers:
- Dinh Nguyen <dinguyen@kernel.org>
allOf:
- $ref: /schemas/serial/serial.yaml#
properties:
compatible:
const: altr,uart-1.0
clock-frequency:
description: Frequency of the clock input to the UART.
required:
- compatible
unevaluatedProperties: false

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas//soc/fsl/fsl,ls1028a-reset.yaml#
$id: http://devicetree.org/schemas/soc/fsl/fsl,ls1028a-reset.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale Layerscape Reset Registers Module

View file

@ -16,11 +16,13 @@ User interface
Creating a TLS connection
-------------------------
First create a new TCP socket and set the TLS ULP.
First create a new TCP socket and once the connection is established set the
TLS ULP.
.. code-block:: c
sock = socket(AF_INET, SOCK_STREAM, 0);
connect(sock, addr, addrlen);
setsockopt(sock, SOL_TCP, TCP_ULP, "tls", sizeof("tls"));
Setting the TLS ULP allows us to set/get TLS socket options. Currently

View file

@ -312,7 +312,7 @@ Posting as one thread is discouraged because it confuses patchwork
(as of patchwork 2.2.2).
Co-posting selftests
--------------------
~~~~~~~~~~~~~~~~~~~~
Selftests should be part of the same series as the code changes.
Specifically for fixes both code change and related test should go into

View file

@ -15555,6 +15555,7 @@ F: drivers/net/ethernet/mellanox/mlx4/en_*
MELLANOX ETHERNET DRIVER (mlx5e)
M: Saeed Mahameed <saeedm@nvidia.com>
M: Tariq Toukan <tariqt@nvidia.com>
M: Mark Bloch <mbloch@nvidia.com>
L: netdev@vger.kernel.org
S: Maintained
W: https://www.nvidia.com/networking/
@ -15624,6 +15625,7 @@ MELLANOX MLX5 core VPI driver
M: Saeed Mahameed <saeedm@nvidia.com>
M: Leon Romanovsky <leonro@nvidia.com>
M: Tariq Toukan <tariqt@nvidia.com>
M: Mark Bloch <mbloch@nvidia.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
S: Maintained
@ -15681,6 +15683,8 @@ MEMBLOCK AND MEMORY MANAGEMENT INITIALIZATION
M: Mike Rapoport <rppt@kernel.org>
L: linux-mm@kvack.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock.git for-next
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock.git fixes
F: Documentation/core-api/boot-time-mm.rst
F: Documentation/core-api/kho/bindings/memblock/*
F: include/linux/memblock.h
@ -15853,6 +15857,17 @@ F: mm/numa.c
F: mm/numa_emulation.c
F: mm/numa_memblks.c
MEMORY MANAGEMENT - OOM KILLER
M: Michal Hocko <mhocko@suse.com>
R: David Rientjes <rientjes@google.com>
R: Shakeel Butt <shakeel.butt@linux.dev>
L: linux-mm@kvack.org
S: Maintained
F: include/linux/oom.h
F: include/trace/events/oom.h
F: include/uapi/linux/oom.h
F: mm/oom_kill.c
MEMORY MANAGEMENT - PAGE ALLOCATOR
M: Andrew Morton <akpm@linux-foundation.org>
M: Vlastimil Babka <vbabka@suse.cz>
@ -15867,8 +15882,17 @@ F: include/linux/compaction.h
F: include/linux/gfp.h
F: include/linux/page-isolation.h
F: mm/compaction.c
F: mm/debug_page_alloc.c
F: mm/fail_page_alloc.c
F: mm/page_alloc.c
F: mm/page_ext.c
F: mm/page_frag_cache.c
F: mm/page_isolation.c
F: mm/page_owner.c
F: mm/page_poison.c
F: mm/page_reporting.c
F: mm/show_mem.c
F: mm/shuffle.c
MEMORY MANAGEMENT - RECLAIM
M: Andrew Morton <akpm@linux-foundation.org>
@ -15928,9 +15952,9 @@ F: mm/swapfile.c
MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE)
M: Andrew Morton <akpm@linux-foundation.org>
M: David Hildenbrand <david@redhat.com>
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Zi Yan <ziy@nvidia.com>
R: Baolin Wang <baolin.wang@linux.alibaba.com>
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Nico Pache <npache@redhat.com>
R: Ryan Roberts <ryan.roberts@arm.com>
@ -21181,7 +21205,7 @@ M: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
L: netdev@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml
F: Documentation/devicetree/bindings/net/renesas,rzv2h-gbeth.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
RENESAS RZ/V2H(P) USB2PHY PORT RESET DRIVER
@ -21393,7 +21417,7 @@ N: spacemit
K: spacemit
RISC-V THEAD SoC SUPPORT
M: Drew Fustini <drew@pdp7.com>
M: Drew Fustini <fustini@kernel.org>
M: Guo Ren <guoren@kernel.org>
M: Fu Wei <wefu@redhat.com>
L: linux-riscv@lists.infradead.org
@ -22569,9 +22593,11 @@ S: Maintained
F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
M: D. Wythe <alibuda@linux.alibaba.com>
M: Dust Li <dust.li@linux.alibaba.com>
M: Sidraya Jayagond <sidraya@linux.ibm.com>
M: Wenjia Zhang <wenjia@linux.ibm.com>
M: Jan Karcher <jaka@linux.ibm.com>
R: D. Wythe <alibuda@linux.alibaba.com>
R: Mahanta Jambigi <mjambigi@linux.ibm.com>
R: Tony Lu <tonylu@linux.alibaba.com>
R: Wen Gu <guwen@linux.alibaba.com>
L: linux-rdma@vger.kernel.org
@ -24082,6 +24108,7 @@ M: Bin Du <bin.du@amd.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-designware-amdisp.c
F: include/linux/soc/amd/isp4_misc.h
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
M: Jaehoon Chung <jh80.chung@samsung.com>

View file

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 16
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View file

@ -18,12 +18,12 @@
/*
* This gives the physical RAM offset.
*/
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#ifndef PHYS_OFFSET
#define PHYS_OFFSET _UL(0)
#endif
extern unsigned long vm_map_base;
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#ifndef IO_BASE
#define IO_BASE CSR_DMW0_BASE
@ -66,7 +66,7 @@ extern unsigned long vm_map_base;
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
#endif
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
#define _ATYPE_
#define _ATYPE32_
#define _ATYPE64_
@ -85,7 +85,7 @@ extern unsigned long vm_map_base;
/*
* 32/64-bit LoongArch address spaces
*/
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
#define _ACAST32_
#define _ACAST64_
#else

View file

@ -2,7 +2,7 @@
#ifndef _ASM_ALTERNATIVE_ASM_H
#define _ASM_ALTERNATIVE_ASM_H
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
#include <asm/asm.h>
@ -77,6 +77,6 @@
.previous
.endm
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#endif /* _ASM_ALTERNATIVE_ASM_H */

View file

@ -2,7 +2,7 @@
#ifndef _ASM_ALTERNATIVE_H
#define _ASM_ALTERNATIVE_H
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/stddef.h>
@ -106,6 +106,6 @@ extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
(asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory"))
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#endif /* _ASM_ALTERNATIVE_H */

View file

@ -7,7 +7,7 @@
#define EX_TYPE_UACCESS_ERR_ZERO 2
#define EX_TYPE_BPF 3
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
.pushsection __ex_table, "a"; \
@ -22,7 +22,7 @@
__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
.endm
#else /* __ASSEMBLY__ */
#else /* __ASSEMBLER__ */
#include <linux/bits.h>
#include <linux/stringify.h>
@ -60,6 +60,6 @@
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#endif /* __ASM_ASM_EXTABLE_H */

View file

@ -110,7 +110,7 @@
#define LONG_SRA srai.w
#define LONG_SRAV sra.w
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
#define LONG .word
#endif
#define LONGSIZE 4
@ -131,7 +131,7 @@
#define LONG_SRA srai.d
#define LONG_SRAV sra.d
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
#define LONG .dword
#endif
#define LONGSIZE 8
@ -158,7 +158,7 @@
#define PTR_SCALESHIFT 2
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
#define PTR .word
#endif
#define PTRSIZE 4
@ -181,7 +181,7 @@
#define PTR_SCALESHIFT 3
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
#define PTR .dword
#endif
#define PTRSIZE 8

View file

@ -46,7 +46,7 @@
#define PRID_PRODUCT_MASK 0x0fff
#if !defined(__ASSEMBLY__)
#if !defined(__ASSEMBLER__)
enum cpu_type_enum {
CPU_UNKNOWN,
@ -55,7 +55,7 @@ enum cpu_type_enum {
CPU_LAST
};
#endif /* !__ASSEMBLY */
#endif /* !__ASSEMBLER__ */
/*
* ISA Level encodings

View file

@ -14,7 +14,7 @@
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#ifndef CONFIG_DYNAMIC_FTRACE
@ -84,7 +84,7 @@ __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#endif /* CONFIG_FUNCTION_TRACER */

View file

@ -2,7 +2,7 @@
#ifndef __ASM_GPR_NUM_H
#define __ASM_GPR_NUM_H
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
.equ .L__gpr_num_zero, 0
.irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
@ -25,7 +25,7 @@
.equ .L__gpr_num_$s\num, 23 + \num
.endr
#else /* __ASSEMBLY__ */
#else /* __ASSEMBLER__ */
#define __DEFINE_ASM_GPR_NUMS \
" .equ .L__gpr_num_zero, 0\n" \
@ -47,6 +47,6 @@
" .equ .L__gpr_num_$s\\num, 23 + \\num\n" \
" .endr\n" \
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#endif /* __ASM_GPR_NUM_H */

View file

@ -5,7 +5,7 @@
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <linux/compiler.h>
#include <linux/stringify.h>
@ -80,6 +80,6 @@ static inline int arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags());
}
#endif /* #ifndef __ASSEMBLY__ */
#endif /* #ifndef __ASSEMBLER__ */
#endif /* _ASM_IRQFLAGS_H */

View file

@ -7,7 +7,7 @@
#ifndef __ASM_JUMP_LABEL_H
#define __ASM_JUMP_LABEL_H
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <linux/types.h>
@ -50,5 +50,5 @@ l_yes:
return true;
}
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#endif /* __ASM_JUMP_LABEL_H */

View file

@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <linux/linkage.h>
#include <linux/mmzone.h>

View file

@ -9,15 +9,15 @@
#include <linux/linkage.h>
#include <linux/types.h>
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <larchintrin.h>
/* CPUCFG */
#define read_cpucfg(reg) __cpucfg(reg)
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
/* LoongArch Registers */
#define REG_ZERO 0x0
@ -53,7 +53,7 @@
#define REG_S7 0x1e
#define REG_S8 0x1f
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
/* Bit fields for CPUCFG registers */
#define LOONGARCH_CPUCFG0 0x0
@ -171,7 +171,7 @@
* SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h
*/
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
/* CSR */
#define csr_read32(reg) __csrrd_w(reg)
@ -187,7 +187,7 @@
#define iocsr_write32(val, reg) __iocsrwr_w(val, reg)
#define iocsr_write64(val, reg) __iocsrwr_d(val, reg)
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
/* CSR register number */
@ -1195,7 +1195,7 @@
#define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE 0x1c00
#define IOCSR_EXTIOI_VECTOR_NUM 256
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
static __always_inline u64 drdtime(void)
{
@ -1357,7 +1357,7 @@ __BUILD_CSR_OP(tlbidx)
#define clear_csr_estat(val) \
csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT)
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
/* Generic EntryLo bit definitions */
#define ENTRYLO_V (_ULCAST_(1) << 0)

View file

@ -34,7 +34,7 @@
#define ORC_TYPE_REGS 3
#define ORC_TYPE_REGS_PARTIAL 4
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
/*
* This struct is more or less a vastly simplified version of the DWARF Call
* Frame Information standard. It contains only the necessary parts of DWARF
@ -53,6 +53,6 @@ struct orc_entry {
unsigned int type:3;
unsigned int signal:1;
};
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#endif /* _ORC_TYPES_H */

View file

@ -15,7 +15,7 @@
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <linux/kernel.h>
#include <linux/pfn.h>
@ -110,6 +110,6 @@ extern int __virt_addr_valid(volatile void *kaddr);
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
#endif /* _ASM_PAGE_H */

View file

@ -92,7 +92,7 @@
#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC)
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC)
@ -127,6 +127,6 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
return __pgprot(prot);
}
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
#endif /* _ASM_PGTABLE_BITS_H */

View file

@ -55,7 +55,7 @@
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <linux/mm_types.h>
#include <linux/mmzone.h>
@ -618,6 +618,6 @@ static inline long pmd_protnone(pmd_t pmd)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
#endif /* _ASM_PGTABLE_H */

View file

@ -8,7 +8,7 @@
#define Pref_Load 0
#define Pref_Store 8
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
.macro __pref hint addr
#ifdef CONFIG_CPU_HAS_PREFETCH

View file

@ -39,7 +39,7 @@ int loongson_cpu_disable(void);
void loongson_cpu_die(unsigned int cpu);
#endif
static inline void plat_smp_setup(void)
static inline void __init plat_smp_setup(void)
{
loongson_smp_setup();
}

View file

@ -10,7 +10,7 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <asm/processor.h>
@ -53,7 +53,7 @@ static inline struct thread_info *current_thread_info(void)
register unsigned long current_stack_pointer __asm__("$sp");
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
/* thread information allocation */
#define THREAD_SIZE SZ_16K

View file

@ -8,7 +8,7 @@
#include <asm-generic/int-ll64.h>
#include <uapi/asm/types.h>
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
#define _ULCAST_
#define _U64CAST_
#else

View file

@ -5,7 +5,7 @@
#include <linux/objtool.h>
#include <asm/orc_types.h>
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLER__
.macro UNWIND_HINT_UNDEFINED
UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED
@ -23,7 +23,7 @@
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
.endm
#else /* !__ASSEMBLY__ */
#else /* !__ASSEMBLER__ */
#define UNWIND_HINT_SAVE \
UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0)
@ -31,6 +31,6 @@
#define UNWIND_HINT_RESTORE \
UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */

View file

@ -7,7 +7,7 @@
#ifndef _VDSO_ARCH_DATA_H
#define _VDSO_ARCH_DATA_H
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <asm/asm.h>
#include <asm/vdso.h>
@ -20,6 +20,6 @@ struct vdso_arch_data {
struct vdso_pcpu_data pdata[NR_CPUS];
};
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#endif

View file

@ -5,7 +5,7 @@
#ifndef __ASM_VDSO_GETRANDOM_H
#define __ASM_VDSO_GETRANDOM_H
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <asm/unistd.h>
#include <asm/vdso/vdso.h>
@ -28,6 +28,6 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns
return ret;
}
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETRANDOM_H */

View file

@ -7,7 +7,7 @@
#ifndef __ASM_VDSO_GETTIMEOFDAY_H
#define __ASM_VDSO_GETTIMEOFDAY_H
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <asm/unistd.h>
#include <asm/vdso/vdso.h>
@ -89,6 +89,6 @@ static inline bool loongarch_vdso_hres_capable(void)
}
#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */

View file

@ -5,10 +5,10 @@
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#define cpu_relax() barrier()
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#endif /* __ASM_VDSO_PROCESSOR_H */

View file

@ -7,7 +7,7 @@
#ifndef _ASM_VDSO_VDSO_H
#define _ASM_VDSO_VDSO_H
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <asm/asm.h>
#include <asm/page.h>
@ -16,6 +16,6 @@
#define VVAR_SIZE (VDSO_NR_PAGES << PAGE_SHIFT)
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#endif

View file

@ -2,13 +2,13 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <vdso/datapage.h>
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_VSYSCALL_H */

View file

@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/efi-bgrt.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/memblock.h>

View file

@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/alternative.h>

View file

@ -144,6 +144,18 @@ void __init efi_init(void)
if (efi_memmap_init_early(&data) < 0)
panic("Unable to map EFI memory map.\n");
/*
* Reserve the physical memory region occupied by the EFI
* memory map table (header + descriptors). This is crucial
* for kdump, as the kdump kernel relies on this original
* memmap passed by the bootloader. Without reservation,
* this region could be overwritten by the primary kernel.
* Also, set the EFI_PRESERVE_BS_REGIONS flag to indicate that
* critical boot services code/data regions like this are preserved.
*/
memblock_reserve((phys_addr_t)boot_memmap, sizeof(*tbl) + data.size);
set_bit(EFI_PRESERVE_BS_REGIONS, &efi.flags);
early_memunmap(tbl, sizeof(*tbl));
}

View file

@ -6,7 +6,6 @@
#include <linux/binfmts.h>
#include <linux/elf.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <asm/cpu-features.h>

View file

@ -4,6 +4,7 @@
*/
#include <linux/cpu.h>
#include <linux/export.h>
#include <linux/init.h>
#include <asm/fpu.h>
#include <asm/smp.h>

View file

@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/irq_work.h>

View file

@ -102,7 +102,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
return 0;
}
static unsigned long __init get_loops_per_jiffy(void)
static unsigned long get_loops_per_jiffy(void)
{
unsigned long lpj = (unsigned long)const_clock_freq;

View file

@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/extable.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>

View file

@ -3,6 +3,7 @@
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <asm/unwind.h>
#include <linux/export.h>
unsigned long unwind_get_return_address(struct unwind_state *state)
{

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/objtool.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/objtool.h>
#include <linux/sort.h>
#include <asm/exception.h>
#include <asm/orc_header.h>

View file

@ -3,6 +3,7 @@
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/ftrace.h>
#include <linux/kallsyms.h>

View file

@ -9,7 +9,8 @@
static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
{
int ipnum, cpu, irq_index, irq_mask, irq;
int ipnum, cpu, cpuid, irq_index, irq_mask, irq;
struct kvm_vcpu *vcpu;
for (irq = 0; irq < EIOINTC_IRQS; irq++) {
ipnum = s->ipmap.reg_u8[irq / 32];
@ -20,7 +21,12 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
irq_index = irq / 32;
irq_mask = BIT(irq & 0x1f);
cpu = s->coremap.reg_u8[irq];
cpuid = s->coremap.reg_u8[irq];
vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
if (!vcpu)
continue;
cpu = vcpu->vcpu_id;
if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask))
set_bit(irq, s->sw_coreisr[cpu][ipnum]);
else
@ -66,20 +72,25 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
}
static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
int irq, void *pvalue, u32 len, bool notify)
int irq, u64 val, u32 len, bool notify)
{
int i, cpu;
u64 val = *(u64 *)pvalue;
int i, cpu, cpuid;
struct kvm_vcpu *vcpu;
for (i = 0; i < len; i++) {
cpu = val & 0xff;
cpuid = val & 0xff;
val = val >> 8;
if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) {
cpu = ffs(cpu) - 1;
cpu = (cpu >= 4) ? 0 : cpu;
cpuid = ffs(cpuid) - 1;
cpuid = (cpuid >= 4) ? 0 : cpuid;
}
vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
if (!vcpu)
continue;
cpu = vcpu->vcpu_id;
if (s->sw_coremap[irq + i] == cpu)
continue;
@ -305,6 +316,11 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
return -EINVAL;
}
if (addr & (len - 1)) {
kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
return -EINVAL;
}
vcpu->kvm->stat.eiointc_read_exits++;
spin_lock_irqsave(&eiointc->lock, flags);
switch (len) {
@ -398,7 +414,7 @@ static int loongarch_eiointc_writeb(struct kvm_vcpu *vcpu,
irq = offset - EIOINTC_COREMAP_START;
index = irq;
s->coremap.reg_u8[index] = data;
eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
break;
default:
ret = -EINVAL;
@ -436,17 +452,16 @@ static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu,
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = (offset - EIOINTC_ENABLE_START) >> 1;
old_data = s->enable.reg_u32[index];
old_data = s->enable.reg_u16[index];
s->enable.reg_u16[index] = data;
/*
* 1: enable irq.
* update irq when isr is set.
*/
data = s->enable.reg_u16[index] & ~old_data & s->isr.reg_u16[index];
index = index << 1;
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index + i, mask, 1);
eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 1);
}
/*
* 0: disable irq.
@ -455,7 +470,7 @@ static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu,
data = ~s->enable.reg_u16[index] & old_data & s->isr.reg_u16[index];
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index, mask, 0);
eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 0);
}
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
@ -484,7 +499,7 @@ static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu,
irq = offset - EIOINTC_COREMAP_START;
index = irq >> 1;
s->coremap.reg_u16[index] = data;
eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
break;
default:
ret = -EINVAL;
@ -529,10 +544,9 @@ static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu,
* update irq when isr is set.
*/
data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index];
index = index << 2;
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index + i, mask, 1);
eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 1);
}
/*
* 0: disable irq.
@ -541,7 +555,7 @@ static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu,
data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index];
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index, mask, 0);
eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 0);
}
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
@ -570,7 +584,7 @@ static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu,
irq = offset - EIOINTC_COREMAP_START;
index = irq >> 2;
s->coremap.reg_u32[index] = data;
eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
break;
default:
ret = -EINVAL;
@ -615,10 +629,9 @@ static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu,
* update irq when isr is set.
*/
data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index];
index = index << 3;
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index + i, mask, 1);
eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 1);
}
/*
* 0: disable irq.
@ -627,7 +640,7 @@ static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu,
data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index];
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index, mask, 0);
eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 0);
}
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
@ -656,7 +669,7 @@ static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu,
irq = offset - EIOINTC_COREMAP_START;
index = irq >> 3;
s->coremap.reg_u64[index] = data;
eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true);
eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
break;
default:
ret = -EINVAL;
@ -679,6 +692,11 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
return -EINVAL;
}
if (addr & (len - 1)) {
kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
return -EINVAL;
}
vcpu->kvm->stat.eiointc_write_exits++;
spin_lock_irqsave(&eiointc->lock, flags);
switch (len) {
@ -787,7 +805,7 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
int ret = 0;
unsigned long flags;
unsigned long type = (unsigned long)attr->attr;
u32 i, start_irq;
u32 i, start_irq, val;
void __user *data;
struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
@ -795,8 +813,14 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
spin_lock_irqsave(&s->lock, flags);
switch (type) {
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
if (copy_from_user(&s->num_cpu, data, 4))
if (copy_from_user(&val, data, 4))
ret = -EFAULT;
else {
if (val >= EIOINTC_ROUTE_MAX_VCPUS)
ret = -EINVAL;
else
s->num_cpu = val;
}
break;
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
if (copy_from_user(&s->features, data, 4))
@ -809,7 +833,7 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
for (i = 0; i < (EIOINTC_IRQS / 4); i++) {
start_irq = i * 4;
eiointc_update_sw_coremap(s, start_irq,
(void *)&s->coremap.reg_u32[i], sizeof(u32), false);
s->coremap.reg_u32[i], sizeof(u32), false);
}
break;
default:
@ -824,7 +848,7 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
bool is_write)
{
int addr, cpuid, offset, ret = 0;
int addr, cpu, offset, ret = 0;
unsigned long flags;
void *p = NULL;
void __user *data;
@ -832,7 +856,7 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
s = dev->kvm->arch.eiointc;
addr = attr->attr;
cpuid = addr >> 16;
cpu = addr >> 16;
addr &= 0xffff;
data = (void __user *)attr->addr;
switch (addr) {
@ -857,8 +881,11 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
p = &s->isr.reg_u32[offset];
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
if (cpu >= s->num_cpu)
return -EINVAL;
offset = (addr - EIOINTC_COREISR_START) / 4;
p = &s->coreisr.reg_u32[cpuid][offset];
p = &s->coreisr.reg_u32[cpu][offset];
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
offset = (addr - EIOINTC_COREMAP_START) / 4;
@ -899,9 +926,15 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
data = (void __user *)attr->addr;
switch (addr) {
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
if (is_write)
return ret;
p = &s->num_cpu;
break;
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE:
if (is_write)
return ret;
p = &s->features;
break;
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE:

View file

@ -11,6 +11,7 @@
#include <asm/cpu-features.h>
#include <linux/crc32.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/unaligned.h>

View file

@ -2,6 +2,7 @@
// Copyright (C) 2019-2020 Arm Ltd.
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/kasan-checks.h>
#include <linux/kernel.h>

View file

@ -16,12 +16,12 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
}
void *early_memremap_ro(resource_size_t phys_addr, unsigned long size)
void * __init early_memremap_ro(resource_size_t phys_addr, unsigned long size)
{
return early_memremap(phys_addr, size);
}
void *early_memremap_prot(resource_size_t phys_addr, unsigned long size,
void * __init early_memremap_prot(resource_size_t phys_addr, unsigned long size,
unsigned long prot_val)
{
return early_memremap(phys_addr, size);

View file

@ -3,7 +3,6 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/types.h>

View file

@ -1075,7 +1075,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
*/
#ifdef CONFIG_64BIT
#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
#define TASK_SIZE_MAX LONG_MAX
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)

View file

@ -206,7 +206,7 @@ static inline void __runtime_fixup_32(__le16 *lui_parcel, __le16 *addi_parcel, u
addi_insn_mask &= 0x07fff;
}
if (lower_immediate & 0x00000fff) {
if (lower_immediate & 0x00000fff || lui_insn == RISCV_INSN_NOP4) {
/* replace upper 12 bits of addi with lower 12 bits of val */
addi_insn &= addi_insn_mask;
addi_insn |= (lower_immediate & 0x00000fff) << 20;

View file

@ -127,6 +127,7 @@ do { \
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define __get_user_8(x, ptr, label) \
do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
u32 __lo, __hi; \
asm_goto_output( \
@ -141,7 +142,7 @@ do { \
: : label); \
(x) = (__typeof__(x))((__typeof__((x) - (x)))( \
(((u64)__hi << 32) | __lo))); \
} while (0)
#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
#define __get_user_8(x, ptr, label) \
do { \

View file

@ -18,7 +18,7 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns
register unsigned int flags asm("a2") = _flags;
asm volatile ("ecall\n"
: "+r" (ret)
: "=r" (ret)
: "r" (nr), "r" (buffer), "r" (len), "r" (flags)
: "memory");

View file

@ -205,11 +205,11 @@ static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to,
THEAD_VSETVLI_T4X0E8M8D1
THEAD_VSB_V_V0T0
"add t0, t0, t4\n\t"
THEAD_VSB_V_V0T0
THEAD_VSB_V_V8T0
"add t0, t0, t4\n\t"
THEAD_VSB_V_V0T0
THEAD_VSB_V_V16T0
"add t0, t0, t4\n\t"
THEAD_VSB_V_V0T0
THEAD_VSB_V_V24T0
: : "r" (datap) : "memory", "t0", "t4");
} else {
asm volatile (
@ -241,11 +241,11 @@ static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_
THEAD_VSETVLI_T4X0E8M8D1
THEAD_VLB_V_V0T0
"add t0, t0, t4\n\t"
THEAD_VLB_V_V0T0
THEAD_VLB_V_V8T0
"add t0, t0, t4\n\t"
THEAD_VLB_V_V0T0
THEAD_VLB_V_V16T0
"add t0, t0, t4\n\t"
THEAD_VLB_V_V0T0
THEAD_VLB_V_V24T0
: : "r" (datap) : "memory", "t0", "t4");
} else {
asm volatile (

View file

@ -50,6 +50,7 @@ atomic_t hart_lottery __section(".sdata")
#endif
;
unsigned long boot_cpu_hartid;
EXPORT_SYMBOL_GPL(boot_cpu_hartid);
/*
* Place kernel memory regions on the resource tree so that

View file

@ -454,7 +454,7 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
val.data_u64 = 0;
if (user_mode(regs)) {
if (copy_from_user_nofault(&val, (u8 __user *)addr, len))
if (copy_from_user(&val, (u8 __user *)addr, len))
return -1;
} else {
memcpy(&val, (u8 *)addr, len);
@ -555,7 +555,7 @@ static int handle_scalar_misaligned_store(struct pt_regs *regs)
return -EOPNOTSUPP;
if (user_mode(regs)) {
if (copy_to_user_nofault((u8 __user *)addr, &val, len))
if (copy_to_user((u8 __user *)addr, &val, len))
return -1;
} else {
memcpy((u8 *)addr, &val, len);

View file

@ -30,7 +30,7 @@ SECTIONS
*(.data .data.* .gnu.linkonce.d.*)
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
}
} :text
.note : { *(.note.*) } :text :note

View file

@ -8,7 +8,7 @@
#include <linux/types.h>
/* All SiFive vendor extensions supported in Linux */
const struct riscv_isa_ext_data riscv_isa_vendor_ext_sifive[] = {
static const struct riscv_isa_ext_data riscv_isa_vendor_ext_sifive[] = {
__RISCV_ISA_EXT_DATA(xsfvfnrclipxfqf, RISCV_ISA_VENDOR_EXT_XSFVFNRCLIPXFQF),
__RISCV_ISA_EXT_DATA(xsfvfwmaccqqq, RISCV_ISA_VENDOR_EXT_XSFVFWMACCQQQ),
__RISCV_ISA_EXT_DATA(xsfvqmaccdod, RISCV_ISA_VENDOR_EXT_XSFVQMACCDOD),

View file

@ -265,7 +265,7 @@ static __always_inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *r
addr = kernel_stack_pointer(regs) + n * sizeof(long);
if (!regs_within_kernel_stack(regs, addr))
return 0;
return READ_ONCE_NOCHECK(addr);
return READ_ONCE_NOCHECK(*(unsigned long *)addr);
}
/**

View file

@ -54,6 +54,7 @@ static inline bool ers_result_indicates_abort(pci_ers_result_t ers_res)
case PCI_ERS_RESULT_CAN_RECOVER:
case PCI_ERS_RESULT_RECOVERED:
case PCI_ERS_RESULT_NEED_RESET:
case PCI_ERS_RESULT_NONE:
return false;
default:
return true;
@ -78,10 +79,6 @@ static bool is_driver_supported(struct pci_driver *driver)
return false;
if (!driver->err_handler->error_detected)
return false;
if (!driver->err_handler->slot_reset)
return false;
if (!driver->err_handler->resume)
return false;
return true;
}
@ -106,6 +103,10 @@ static pci_ers_result_t zpci_event_do_error_state_clear(struct pci_dev *pdev,
struct zpci_dev *zdev = to_zpci(pdev);
int rc;
/* The underlying device may have been disabled by the event */
if (!zdev_enabled(zdev))
return PCI_ERS_RESULT_NEED_RESET;
pr_info("%s: Unblocking device access for examination\n", pci_name(pdev));
rc = zpci_reset_load_store_blocked(zdev);
if (rc) {
@ -114,8 +115,11 @@ static pci_ers_result_t zpci_event_do_error_state_clear(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
if (driver->err_handler->mmio_enabled) {
if (driver->err_handler->mmio_enabled)
ers_res = driver->err_handler->mmio_enabled(pdev);
else
ers_res = PCI_ERS_RESULT_NONE;
if (ers_result_indicates_abort(ers_res)) {
pr_info("%s: Automatic recovery failed after MMIO re-enable\n",
pci_name(pdev));
@ -124,7 +128,6 @@ static pci_ers_result_t zpci_event_do_error_state_clear(struct pci_dev *pdev,
pr_debug("%s: Driver needs reset to recover\n", pci_name(pdev));
return ers_res;
}
}
pr_debug("%s: Unblocking DMA\n", pci_name(pdev));
rc = zpci_clear_error_state(zdev);
@ -150,7 +153,12 @@ static pci_ers_result_t zpci_event_do_reset(struct pci_dev *pdev,
return ers_res;
}
pdev->error_state = pci_channel_io_normal;
if (driver->err_handler->slot_reset)
ers_res = driver->err_handler->slot_reset(pdev);
else
ers_res = PCI_ERS_RESULT_NONE;
if (ers_result_indicates_abort(ers_res)) {
pr_info("%s: Automatic recovery failed after slot reset\n", pci_name(pdev));
return ers_res;
@ -214,7 +222,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
goto out_unlock;
}
if (ers_res == PCI_ERS_RESULT_CAN_RECOVER) {
if (ers_res != PCI_ERS_RESULT_NEED_RESET) {
ers_res = zpci_event_do_error_state_clear(pdev, driver);
if (ers_result_indicates_abort(ers_res)) {
status_str = "failed (abort on MMIO enable)";
@ -225,6 +233,16 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
if (ers_res == PCI_ERS_RESULT_NEED_RESET)
ers_res = zpci_event_do_reset(pdev, driver);
/*
* ers_res can be PCI_ERS_RESULT_NONE either because the driver
* decided to return it, indicating that it abstains from voting
* on how to recover, or because it didn't implement the callback.
* Both cases assume, that if there is nothing else causing a
* disconnect, we recovered successfully.
*/
if (ers_res == PCI_ERS_RESULT_NONE)
ers_res = PCI_ERS_RESULT_RECOVERED;
if (ers_res != PCI_ERS_RESULT_RECOVERED) {
pr_err("%s: Automatic recovery failed; operator intervention is required\n",
pci_name(pdev));
@ -273,6 +291,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
struct pci_dev *pdev = NULL;
pci_ers_result_t ers_res;
u32 fh = 0;
int rc;
zpci_dbg(3, "err fid:%x, fh:%x, pec:%x\n",
ccdf->fid, ccdf->fh, ccdf->pec);
@ -281,6 +301,15 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
if (zdev) {
mutex_lock(&zdev->state_lock);
rc = clp_refresh_fh(zdev->fid, &fh);
if (rc)
goto no_pdev;
if (!fh || ccdf->fh != fh) {
/* Ignore events with stale handles */
zpci_dbg(3, "err fid:%x, fh:%x (stale %x)\n",
ccdf->fid, fh, ccdf->fh);
goto no_pdev;
}
zpci_update_fh(zdev, ccdf->fh);
if (zdev->zbus->bus)
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);

View file

@ -9,6 +9,14 @@
#include <asm/cpufeature.h>
#include <asm/msr.h>
/*
* Define bits that are always set to 1 in DR7, only bit 10 is
* architecturally reserved to '1'.
*
* This is also the init/reset value for DR7.
*/
#define DR7_FIXED_1 0x00000400
DECLARE_PER_CPU(unsigned long, cpu_dr7);
#ifndef CONFIG_PARAVIRT_XXL
@ -100,8 +108,8 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value)
static inline void hw_breakpoint_disable(void)
{
/* Zero the control register for HW Breakpoint */
set_debugreg(0UL, 7);
/* Reset the control register for HW Breakpoint */
set_debugreg(DR7_FIXED_1, 7);
/* Zero-out the individual HW breakpoint address registers */
set_debugreg(0UL, 0);
@ -125,9 +133,12 @@ static __always_inline unsigned long local_db_save(void)
return 0;
get_debugreg(dr7, 7);
dr7 &= ~0x400; /* architecturally set bit */
/* Architecturally set bit */
dr7 &= ~DR7_FIXED_1;
if (dr7)
set_debugreg(0, 7);
set_debugreg(DR7_FIXED_1, 7);
/*
* Ensure the compiler doesn't lower the above statements into
* the critical section; disabling breakpoints late would not

View file

@ -31,6 +31,7 @@
#include <asm/apic.h>
#include <asm/pvclock-abi.h>
#include <asm/debugreg.h>
#include <asm/desc.h>
#include <asm/mtrr.h>
#include <asm/msr-index.h>
@ -249,7 +250,6 @@ enum x86_intercept_stage;
#define DR7_BP_EN_MASK 0x000000ff
#define DR7_GE (1 << 9)
#define DR7_GD (1 << 13)
#define DR7_FIXED_1 0x00000400
#define DR7_VOLATILE 0xffff2bff
#define KVM_GUESTDBG_VALID_MASK \

View file

@ -15,7 +15,26 @@
which debugging register was responsible for the trap. The other bits
are either reserved or not of interest to us. */
/* Define reserved bits in DR6 which are always set to 1 */
/*
* Define bits in DR6 which are set to 1 by default.
*
* This is also the DR6 architectural value following Power-up, Reset or INIT.
*
* Note, with the introduction of Bus Lock Detection (BLD) and Restricted
* Transactional Memory (RTM), the DR6 register has been modified:
*
* 1) BLD flag (bit 11) is no longer reserved to 1 if the CPU supports
* Bus Lock Detection. The assertion of a bus lock could clear it.
*
* 2) RTM flag (bit 16) is no longer reserved to 1 if the CPU supports
* restricted transactional memory. #DB occurred inside an RTM region
* could clear it.
*
* Apparently, DR6.BLD and DR6.RTM are active low bits.
*
* As a result, DR6_RESERVED is an incorrect name now, but it is kept for
* compatibility.
*/
#define DR6_RESERVED (0xFFFF0FF0)
#define DR_TRAP0 (0x1) /* db0 */

View file

@ -2243,20 +2243,16 @@ EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
#endif
#endif
/*
* Clear all 6 debug registers:
*/
static void clear_all_debug_regs(void)
static void initialize_debug_regs(void)
{
int i;
for (i = 0; i < 8; i++) {
/* Ignore db4, db5 */
if ((i == 4) || (i == 5))
continue;
set_debugreg(0, i);
}
/* Control register first -- to make sure everything is disabled. */
set_debugreg(DR7_FIXED_1, 7);
set_debugreg(DR6_RESERVED, 6);
/* dr5 and dr4 don't exist */
set_debugreg(0, 3);
set_debugreg(0, 2);
set_debugreg(0, 1);
set_debugreg(0, 0);
}
#ifdef CONFIG_KGDB
@ -2417,7 +2413,7 @@ void cpu_init(void)
load_mm_ldt(&init_mm);
clear_all_debug_regs();
initialize_debug_regs();
dbg_restore_debug_regs();
doublefault_init_cpu_tss();

View file

@ -385,7 +385,7 @@ static void kgdb_disable_hw_debug(struct pt_regs *regs)
struct perf_event *bp;
/* Disable hardware debugging while we are in kgdb: */
set_debugreg(0UL, 7);
set_debugreg(DR7_FIXED_1, 7);
for (i = 0; i < HBP_NUM; i++) {
if (!breakinfo[i].enabled)
continue;

View file

@ -93,7 +93,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
/* Only print out debug registers if they are in their non-default state. */
if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
(d6 == DR6_RESERVED) && (d7 == 0x400))
(d6 == DR6_RESERVED) && (d7 == DR7_FIXED_1))
return;
printk("%sDR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",

View file

@ -133,7 +133,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
/* Only print out debug registers if they are in their non-default state. */
if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
(d6 == DR6_RESERVED) && (d7 == 0x400))) {
(d6 == DR6_RESERVED) && (d7 == DR7_FIXED_1))) {
printk("%sDR0: %016lx DR1: %016lx DR2: %016lx\n",
log_lvl, d0, d1, d2);
printk("%sDR3: %016lx DR6: %016lx DR7: %016lx\n",

View file

@ -1022,24 +1022,32 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
#endif
}
static __always_inline unsigned long debug_read_clear_dr6(void)
static __always_inline unsigned long debug_read_reset_dr6(void)
{
unsigned long dr6;
get_debugreg(dr6, 6);
dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
/*
* The Intel SDM says:
*
* Certain debug exceptions may clear bits 0-3. The remaining
* contents of the DR6 register are never cleared by the
* processor. To avoid confusion in identifying debug
* exceptions, debug handlers should clear the register before
* returning to the interrupted task.
* Certain debug exceptions may clear bits 0-3 of DR6.
*
* Keep it simple: clear DR6 immediately.
* BLD induced #DB clears DR6.BLD and any other debug
* exception doesn't modify DR6.BLD.
*
* RTM induced #DB clears DR6.RTM and any other debug
* exception sets DR6.RTM.
*
* To avoid confusion in identifying debug exceptions,
* debug handlers should set DR6.BLD and DR6.RTM, and
* clear other DR6 bits before returning.
*
* Keep it simple: write DR6 with its architectural reset
* value 0xFFFF0FF0, defined as DR6_RESERVED, immediately.
*/
get_debugreg(dr6, 6);
set_debugreg(DR6_RESERVED, 6);
dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
return dr6;
}
@ -1239,13 +1247,13 @@ out:
/* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug)
{
exc_debug_kernel(regs, debug_read_clear_dr6());
exc_debug_kernel(regs, debug_read_reset_dr6());
}
/* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
{
exc_debug_user(regs, debug_read_clear_dr6());
exc_debug_user(regs, debug_read_reset_dr6());
}
#ifdef CONFIG_X86_FRED
@ -1264,7 +1272,7 @@ DEFINE_FREDENTRY_DEBUG(exc_debug)
{
/*
* FRED #DB stores DR6 on the stack in the format which
* debug_read_clear_dr6() returns for the IDT entry points.
* debug_read_reset_dr6() returns for the IDT entry points.
*/
unsigned long dr6 = fred_event_data(regs);
@ -1279,7 +1287,7 @@ DEFINE_FREDENTRY_DEBUG(exc_debug)
/* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug)
{
unsigned long dr6 = debug_read_clear_dr6();
unsigned long dr6 = debug_read_reset_dr6();
if (user_mode(regs))
exc_debug_user(regs, dr6);

View file

@ -11035,7 +11035,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (unlikely(vcpu->arch.switch_db_regs &&
!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_AUTO_SWITCH))) {
set_debugreg(0, 7);
set_debugreg(DR7_FIXED_1, 7);
set_debugreg(vcpu->arch.eff_db[0], 0);
set_debugreg(vcpu->arch.eff_db[1], 1);
set_debugreg(vcpu->arch.eff_db[2], 2);
@ -11044,7 +11044,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
kvm_x86_call(set_dr6)(vcpu, vcpu->arch.dr6);
} else if (unlikely(hw_breakpoint_active())) {
set_debugreg(0, 7);
set_debugreg(DR7_FIXED_1, 7);
}
vcpu->arch.host_debugctl = get_debugctlmsr();

View file

@ -128,23 +128,27 @@ static void part_stat_read_all(struct block_device *part,
static void bdev_count_inflight_rw(struct block_device *part,
unsigned int inflight[2], bool mq_driver)
{
int write = 0;
int read = 0;
int cpu;
if (mq_driver) {
blk_mq_in_driver_rw(part, inflight);
} else {
for_each_possible_cpu(cpu) {
inflight[READ] += part_stat_local_read_cpu(
part, in_flight[READ], cpu);
inflight[WRITE] += part_stat_local_read_cpu(
part, in_flight[WRITE], cpu);
}
return;
}
if (WARN_ON_ONCE((int)inflight[READ] < 0))
inflight[READ] = 0;
if (WARN_ON_ONCE((int)inflight[WRITE] < 0))
inflight[WRITE] = 0;
for_each_possible_cpu(cpu) {
read += part_stat_local_read_cpu(part, in_flight[READ], cpu);
write += part_stat_local_read_cpu(part, in_flight[WRITE], cpu);
}
/*
* While iterating all CPUs, some IOs may be issued from a CPU already
* traversed and complete on a CPU that has not yet been traversed,
* causing the inflight number to be negative.
*/
inflight[READ] = read > 0 ? read : 0;
inflight[WRITE] = write > 0 ? write : 0;
}
/**

View file

@ -21,10 +21,10 @@
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/unaligned.h>
#define WP512_DIGEST_SIZE 64
#define WP384_DIGEST_SIZE 48
@ -37,9 +37,6 @@
struct wp512_ctx {
u8 bitLength[WP512_LENGTHBYTES];
u8 buffer[WP512_BLOCK_SIZE];
int bufferBits;
int bufferPos;
u64 hash[WP512_DIGEST_SIZE/8];
};
@ -779,16 +776,16 @@ static const u64 rc[WHIRLPOOL_ROUNDS] = {
* The core Whirlpool transform.
*/
static __no_kmsan_checks void wp512_process_buffer(struct wp512_ctx *wctx) {
static __no_kmsan_checks void wp512_process_buffer(struct wp512_ctx *wctx,
const u8 *buffer) {
int i, r;
u64 K[8]; /* the round key */
u64 block[8]; /* mu(buffer) */
u64 state[8]; /* the cipher state */
u64 L[8];
const __be64 *buffer = (const __be64 *)wctx->buffer;
for (i = 0; i < 8; i++)
block[i] = be64_to_cpu(buffer[i]);
block[i] = get_unaligned_be64(buffer + i * 8);
state[0] = block[0] ^ (K[0] = wctx->hash[0]);
state[1] = block[1] ^ (K[1] = wctx->hash[1]);
@ -991,8 +988,6 @@ static int wp512_init(struct shash_desc *desc) {
int i;
memset(wctx->bitLength, 0, 32);
wctx->bufferBits = wctx->bufferPos = 0;
wctx->buffer[0] = 0;
for (i = 0; i < 8; i++) {
wctx->hash[i] = 0L;
}
@ -1000,84 +995,54 @@ static int wp512_init(struct shash_desc *desc) {
return 0;
}
static int wp512_update(struct shash_desc *desc, const u8 *source,
unsigned int len)
static void wp512_add_length(u8 *bitLength, u64 value)
{
struct wp512_ctx *wctx = shash_desc_ctx(desc);
int sourcePos = 0;
unsigned int bits_len = len * 8; // convert to number of bits
int sourceGap = (8 - ((int)bits_len & 7)) & 7;
int bufferRem = wctx->bufferBits & 7;
u32 carry;
int i;
u32 b, carry;
u8 *buffer = wctx->buffer;
u8 *bitLength = wctx->bitLength;
int bufferBits = wctx->bufferBits;
int bufferPos = wctx->bufferPos;
u64 value = bits_len;
for (i = 31, carry = 0; i >= 0 && (carry != 0 || value != 0ULL); i--) {
carry += bitLength[i] + ((u32)value & 0xff);
bitLength[i] = (u8)carry;
carry >>= 8;
value >>= 8;
}
while (bits_len > 8) {
b = ((source[sourcePos] << sourceGap) & 0xff) |
((source[sourcePos + 1] & 0xff) >> (8 - sourceGap));
buffer[bufferPos++] |= (u8)(b >> bufferRem);
bufferBits += 8 - bufferRem;
if (bufferBits == WP512_BLOCK_SIZE * 8) {
wp512_process_buffer(wctx);
bufferBits = bufferPos = 0;
}
buffer[bufferPos] = b << (8 - bufferRem);
bufferBits += bufferRem;
bits_len -= 8;
sourcePos++;
}
if (bits_len > 0) {
b = (source[sourcePos] << sourceGap) & 0xff;
buffer[bufferPos] |= b >> bufferRem;
} else {
b = 0;
}
if (bufferRem + bits_len < 8) {
bufferBits += bits_len;
} else {
bufferPos++;
bufferBits += 8 - bufferRem;
bits_len -= 8 - bufferRem;
if (bufferBits == WP512_BLOCK_SIZE * 8) {
wp512_process_buffer(wctx);
bufferBits = bufferPos = 0;
}
buffer[bufferPos] = b << (8 - bufferRem);
bufferBits += (int)bits_len;
}
wctx->bufferBits = bufferBits;
wctx->bufferPos = bufferPos;
return 0;
}
static int wp512_final(struct shash_desc *desc, u8 *out)
static int wp512_update(struct shash_desc *desc, const u8 *source,
unsigned int len)
{
struct wp512_ctx *wctx = shash_desc_ctx(desc);
unsigned int remain = len % WP512_BLOCK_SIZE;
u64 bits_len = (len - remain) * 8ull;
u8 *bitLength = wctx->bitLength;
wp512_add_length(bitLength, bits_len);
do {
wp512_process_buffer(wctx, source);
source += WP512_BLOCK_SIZE;
bits_len -= WP512_BLOCK_SIZE * 8;
} while (bits_len);
return remain;
}
static int wp512_finup(struct shash_desc *desc, const u8 *src,
unsigned int bufferPos, u8 *out)
{
struct wp512_ctx *wctx = shash_desc_ctx(desc);
int i;
u8 *buffer = wctx->buffer;
u8 *bitLength = wctx->bitLength;
int bufferBits = wctx->bufferBits;
int bufferPos = wctx->bufferPos;
__be64 *digest = (__be64 *)out;
u8 buffer[WP512_BLOCK_SIZE];
buffer[bufferPos] |= 0x80U >> (bufferBits & 7);
wp512_add_length(bitLength, bufferPos * 8);
memcpy(buffer, src, bufferPos);
buffer[bufferPos] = 0x80U;
bufferPos++;
if (bufferPos > WP512_BLOCK_SIZE - WP512_LENGTHBYTES) {
if (bufferPos < WP512_BLOCK_SIZE)
memset(&buffer[bufferPos], 0, WP512_BLOCK_SIZE - bufferPos);
wp512_process_buffer(wctx);
wp512_process_buffer(wctx, buffer);
bufferPos = 0;
}
if (bufferPos < WP512_BLOCK_SIZE - WP512_LENGTHBYTES)
@ -1086,31 +1051,32 @@ static int wp512_final(struct shash_desc *desc, u8 *out)
bufferPos = WP512_BLOCK_SIZE - WP512_LENGTHBYTES;
memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES],
bitLength, WP512_LENGTHBYTES);
wp512_process_buffer(wctx);
wp512_process_buffer(wctx, buffer);
memzero_explicit(buffer, sizeof(buffer));
for (i = 0; i < WP512_DIGEST_SIZE/8; i++)
digest[i] = cpu_to_be64(wctx->hash[i]);
wctx->bufferBits = bufferBits;
wctx->bufferPos = bufferPos;
return 0;
}
static int wp384_final(struct shash_desc *desc, u8 *out)
static int wp384_finup(struct shash_desc *desc, const u8 *src,
unsigned int len, u8 *out)
{
u8 D[64];
wp512_final(desc, D);
wp512_finup(desc, src, len, D);
memcpy(out, D, WP384_DIGEST_SIZE);
memzero_explicit(D, WP512_DIGEST_SIZE);
return 0;
}
static int wp256_final(struct shash_desc *desc, u8 *out)
static int wp256_finup(struct shash_desc *desc, const u8 *src,
unsigned int len, u8 *out)
{
u8 D[64];
wp512_final(desc, D);
wp512_finup(desc, src, len, D);
memcpy(out, D, WP256_DIGEST_SIZE);
memzero_explicit(D, WP512_DIGEST_SIZE);
@ -1121,11 +1087,12 @@ static struct shash_alg wp_algs[3] = { {
.digestsize = WP512_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
.final = wp512_final,
.finup = wp512_finup,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp512",
.cra_driver_name = "wp512-generic",
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -1133,11 +1100,12 @@ static struct shash_alg wp_algs[3] = { {
.digestsize = WP384_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
.final = wp384_final,
.finup = wp384_finup,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp384",
.cra_driver_name = "wp384-generic",
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@ -1145,11 +1113,12 @@ static struct shash_alg wp_algs[3] = { {
.digestsize = WP256_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
.final = wp256_final,
.finup = wp256_finup,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp256",
.cra_driver_name = "wp256-generic",
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}

View file

@ -1450,7 +1450,7 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_VERSION, "ASUSPRO D840MB_M840SA"),
DMI_MATCH(DMI_PRODUCT_NAME, "ASUSPRO D840MB_M840SA"),
},
/* 320 is broken, there is no known good version. */
},

View file

@ -1148,8 +1148,8 @@ exit:
blk_mq_end_request(req, res);
}
static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
int res, unsigned issue_flags)
static struct io_uring_cmd *__ublk_prep_compl_io_cmd(struct ublk_io *io,
struct request *req)
{
/* read cmd first because req will overwrite it */
struct io_uring_cmd *cmd = io->cmd;
@ -1164,6 +1164,13 @@ static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
io->req = req;
return cmd;
}
static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
int res, unsigned issue_flags)
{
struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req);
/* tell ublksrv one io request is coming */
io_uring_cmd_done(cmd, res, 0, issue_flags);
@ -1416,6 +1423,14 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
static inline bool ublk_belong_to_same_batch(const struct ublk_io *io,
const struct ublk_io *io2)
{
return (io_uring_cmd_ctx_handle(io->cmd) ==
io_uring_cmd_ctx_handle(io2->cmd)) &&
(io->task == io2->task);
}
static void ublk_queue_rqs(struct rq_list *rqlist)
{
struct rq_list requeue_list = { };
@ -1427,7 +1442,8 @@ static void ublk_queue_rqs(struct rq_list *rqlist)
struct ublk_queue *this_q = req->mq_hctx->driver_data;
struct ublk_io *this_io = &this_q->ios[req->tag];
if (io && io->task != this_io->task && !rq_list_empty(&submit_list))
if (io && !ublk_belong_to_same_batch(io, this_io) &&
!rq_list_empty(&submit_list))
ublk_queue_cmd_list(io, &submit_list);
io = this_io;
@ -2148,10 +2164,9 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
return 0;
}
static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io)
static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
struct request *req)
{
struct request *req = io->req;
/*
* We have handled UBLK_IO_NEED_GET_DATA command,
* so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
@ -2178,6 +2193,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
u32 cmd_op = cmd->cmd_op;
unsigned tag = ub_cmd->tag;
int ret = -EINVAL;
struct request *req;
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
@ -2236,11 +2252,19 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
goto out;
break;
case UBLK_IO_NEED_GET_DATA:
io->addr = ub_cmd->addr;
if (!ublk_get_data(ubq, io))
return -EIOCBQUEUED;
/*
* ublk_get_data() may fail and fallback to requeue, so keep
* uring_cmd active first and prepare for handling new requeued
* request
*/
req = io->req;
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
if (likely(ublk_get_data(ubq, io, req))) {
__ublk_prep_compl_io_cmd(io, req);
return UBLK_IO_RES_OK;
}
break;
default:
goto out;
}
@ -2825,7 +2849,8 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || info.nr_hw_queues > UBLK_MAX_NR_QUEUES)
if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth ||
info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues)
return -EINVAL;
if (capable(CAP_SYS_ADMIN))

View file

@ -103,10 +103,10 @@ static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
u8 *cap, u16 *cycle, u8 *flags, u8 *min_cycle)
{
struct cxl_mailbox *cxl_mbox;
u8 min_scrub_cycle = U8_MAX;
struct cxl_region_params *p;
struct cxl_memdev *cxlmd;
struct cxl_region *cxlr;
u8 min_scrub_cycle = 0;
int i, ret;
if (!cxl_ps_ctx->cxlr) {
@ -133,8 +133,12 @@ static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
if (ret)
return ret;
/*
* The min_scrub_cycle of a region is the max of minimum scrub
* cycles supported by memdevs that back the region.
*/
if (min_cycle)
min_scrub_cycle = min(*min_cycle, min_scrub_cycle);
min_scrub_cycle = max(*min_cycle, min_scrub_cycle);
}
if (min_cycle)
@ -1099,8 +1103,10 @@ int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt)
old_rec = xa_store(&array_rec->rec_gen_media,
le64_to_cpu(rec->media_hdr.phys_addr), rec,
GFP_KERNEL);
if (xa_is_err(old_rec))
if (xa_is_err(old_rec)) {
kfree(rec);
return xa_err(old_rec);
}
kfree(old_rec);
@ -1127,8 +1133,10 @@ int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt)
old_rec = xa_store(&array_rec->rec_dram,
le64_to_cpu(rec->media_hdr.phys_addr), rec,
GFP_KERNEL);
if (xa_is_err(old_rec))
if (xa_is_err(old_rec)) {
kfree(rec);
return xa_err(old_rec);
}
kfree(old_rec);
@ -1315,7 +1323,7 @@ cxl_mem_get_rec_dram(struct cxl_memdev *cxlmd,
attrbs.bank = ctx->bank;
break;
case EDAC_REPAIR_RANK_SPARING:
attrbs.repair_type = CXL_BANK_SPARING;
attrbs.repair_type = CXL_RANK_SPARING;
break;
default:
return NULL;

View file

@ -544,7 +544,7 @@ static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
u32 flags;
if (rpc_in->op_size < sizeof(uuid_t))
return ERR_PTR(-EINVAL);
return false;
feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid);
if (IS_ERR(feat))

View file

@ -31,40 +31,38 @@ static void cxl_cper_trace_uncorr_port_prot_err(struct pci_dev *pdev,
ras_cap.header_log);
}
static void cxl_cper_trace_corr_prot_err(struct pci_dev *pdev,
static void cxl_cper_trace_corr_prot_err(struct cxl_memdev *cxlmd,
struct cxl_ras_capability_regs ras_cap)
{
u32 status = ras_cap.cor_status & ~ras_cap.cor_mask;
struct cxl_dev_state *cxlds;
cxlds = pci_get_drvdata(pdev);
if (!cxlds)
return;
trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
trace_cxl_aer_correctable_error(cxlmd, status);
}
static void cxl_cper_trace_uncorr_prot_err(struct pci_dev *pdev,
static void
cxl_cper_trace_uncorr_prot_err(struct cxl_memdev *cxlmd,
struct cxl_ras_capability_regs ras_cap)
{
u32 status = ras_cap.uncor_status & ~ras_cap.uncor_mask;
struct cxl_dev_state *cxlds;
u32 fe;
cxlds = pci_get_drvdata(pdev);
if (!cxlds)
return;
if (hweight32(status) > 1)
fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
ras_cap.cap_control));
else
fe = status;
trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe,
trace_cxl_aer_uncorrectable_error(cxlmd, status, fe,
ras_cap.header_log);
}
static int match_memdev_by_parent(struct device *dev, const void *uport)
{
if (is_cxl_memdev(dev) && dev->parent == uport)
return 1;
return 0;
}
static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
{
unsigned int devfn = PCI_DEVFN(data->prot_err.agent_addr.device,
@ -73,13 +71,12 @@ static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
pci_get_domain_bus_and_slot(data->prot_err.agent_addr.segment,
data->prot_err.agent_addr.bus,
devfn);
struct cxl_memdev *cxlmd;
int port_type;
if (!pdev)
return;
guard(device)(&pdev->dev);
port_type = pci_pcie_type(pdev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT ||
port_type == PCI_EXP_TYPE_DOWNSTREAM ||
@ -92,10 +89,20 @@ static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
return;
}
guard(device)(&pdev->dev);
if (!pdev->dev.driver)
return;
struct device *mem_dev __free(put_device) = bus_find_device(
&cxl_bus_type, NULL, pdev, match_memdev_by_parent);
if (!mem_dev)
return;
cxlmd = to_cxl_memdev(mem_dev);
if (data->severity == AER_CORRECTABLE)
cxl_cper_trace_corr_prot_err(pdev, data->ras_cap);
cxl_cper_trace_corr_prot_err(cxlmd, data->ras_cap);
else
cxl_cper_trace_uncorr_prot_err(pdev, data->ras_cap);
cxl_cper_trace_uncorr_prot_err(cxlmd, data->ras_cap);
}
static void cxl_cper_prot_err_work_fn(struct work_struct *work)

View file

@ -1209,7 +1209,9 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_PRIMARY;
/* Asymmetric dual-rank DIMM support. */
if (csrow_sec_enabled(2 * dimm, ctrl, pvt))
cs_mode |= CS_EVEN_SECONDARY;
if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_SECONDARY;
@ -1230,12 +1232,13 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
return cs_mode;
}
static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
int csrow_nr, int dimm)
static int calculate_cs_size(u32 mask, unsigned int cs_mode)
{
u32 msb, weight, num_zero_bits;
u32 addr_mask_deinterleaved;
int size = 0;
int msb, weight, num_zero_bits;
u32 deinterleaved_mask;
if (!mask)
return 0;
/*
* The number of zero bits in the mask is equal to the number of bits
@ -1248,19 +1251,30 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
* without swapping with the most significant bit. This can be handled
* by keeping the MSB where it is and ignoring the single zero bit.
*/
msb = fls(addr_mask_orig) - 1;
weight = hweight_long(addr_mask_orig);
msb = fls(mask) - 1;
weight = hweight_long(mask);
num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
/* Take the number of zero bits off from the top of the mask. */
addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
deinterleaved_mask = GENMASK(msb - num_zero_bits, 1);
edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", deinterleaved_mask);
return (deinterleaved_mask >> 2) + 1;
}
static int __addr_mask_to_cs_size(u32 addr_mask, u32 addr_mask_sec,
unsigned int cs_mode, int csrow_nr, int dimm)
{
int size;
edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
edac_dbg(1, " Primary AddrMask: 0x%x\n", addr_mask);
/* Register [31:1] = Address [39:9]. Size is in kBs here. */
size = (addr_mask_deinterleaved >> 2) + 1;
size = calculate_cs_size(addr_mask, cs_mode);
edac_dbg(1, " Secondary AddrMask: 0x%x\n", addr_mask_sec);
size += calculate_cs_size(addr_mask_sec, cs_mode);
/* Return size in MBs. */
return size >> 10;
@ -1269,8 +1283,8 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
u32 addr_mask = 0, addr_mask_sec = 0;
int cs_mask_nr = csrow_nr;
u32 addr_mask_orig;
int dimm, size = 0;
/* No Chip Selects are enabled. */
@ -1308,13 +1322,13 @@ static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
if (!pvt->flags.zn_regs_v2)
cs_mask_nr >>= 1;
/* Asymmetric dual-rank DIMM support. */
if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
else
addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
if (cs_mode & (CS_EVEN_PRIMARY | CS_ODD_PRIMARY))
addr_mask = pvt->csels[umc].csmasks[cs_mask_nr];
return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm);
if (cs_mode & (CS_EVEN_SECONDARY | CS_ODD_SECONDARY))
addr_mask_sec = pvt->csels[umc].csmasks_sec[cs_mask_nr];
return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, dimm);
}
static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
@ -3512,9 +3526,10 @@ static void gpu_get_err_info(struct mce *m, struct err_info *err)
static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr];
u32 addr_mask_sec = pvt->csels[umc].csmasks_sec[csrow_nr];
return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1);
return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, csrow_nr >> 1);
}
static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)

View file

@ -321,10 +321,12 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
const struct firmware *fw;
int r;
r = request_firmware(&fw, fw_name, adev->dev);
r = firmware_request_nowarn(&fw, fw_name, adev->dev);
if (r) {
dev_err(adev->dev, "can't load firmware \"%s\"\n",
fw_name);
if (amdgpu_discovery == 2)
dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
else
drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
return r;
}
@ -459,16 +461,12 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
/* Read from file if it is the preferred option */
fw_name = amdgpu_discovery_get_fw_name(adev);
if (fw_name != NULL) {
dev_info(adev->dev, "use ip discovery information from file");
drm_dbg(&adev->ddev, "use ip discovery information from file");
r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
if (r) {
dev_err(adev->dev, "failed to read ip discovery binary from file\n");
r = -EINVAL;
if (r)
goto out;
}
} else {
drm_dbg(&adev->ddev, "use ip discovery information from memory");
r = amdgpu_discovery_read_binary_from_mem(
adev, adev->mman.discovery_bin);
if (r)
@ -1338,10 +1336,8 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
int r;
r = amdgpu_discovery_init(adev);
if (r) {
DRM_ERROR("amdgpu_discovery_init failed\n");
if (r)
return r;
}
wafl_ver = 0;
adev->gfx.xcc_mask = 0;
@ -2579,8 +2575,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
default:
r = amdgpu_discovery_reg_base_init(adev);
if (r)
return -EINVAL;
if (r) {
drm_err(&adev->ddev, "discovery failed: %d\n", r);
return r;
}
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);

View file

@ -2235,6 +2235,25 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 3, 0):
adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
if (adev->gfx.me_fw_version >= 167 &&
adev->gfx.pfp_fw_version >= 196 &&
adev->gfx.mec_fw_version >= 474) {
adev->gfx.enable_cleaner_shader = true;
r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
if (r) {
adev->gfx.enable_cleaner_shader = false;
dev_err(adev->dev, "Failed to initialize cleaner shader\n");
}
}
break;
case IP_VERSION(9, 4, 2):
adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);

View file

@ -1630,11 +1630,13 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x50) {
r = mes_v11_0_set_hw_resources_1(&adev->mes);
if (r) {
DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
goto failure;
}
}
r = mes_v11_0_query_sched_status(&adev->mes);
if (r) {

View file

@ -1742,6 +1742,7 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x4b)
mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
mes_v12_0_init_aggregated_doorbell(&adev->mes);

View file

@ -1374,9 +1374,22 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
/* add firmware version checks here */
if (0 && !adev->sdma.disable_uq)
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(6, 0, 0):
if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 2):
if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 3):
if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
default:
break;
}
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)

View file

@ -1349,9 +1349,15 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
/* add firmware version checks here */
if (0 && !adev->sdma.disable_uq)
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
if ((adev->sdma.instance[0].fw_version >= 7836028) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
default:
break;
}
return r;
}

View file

@ -4718,16 +4718,16 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
/* Rescale from [min..max] to [0..MAX_BACKLIGHT_LEVEL] */
static inline u32 scale_input_to_fw(int min, int max, u64 input)
{
return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
return DIV_ROUND_CLOSEST_ULL(input * MAX_BACKLIGHT_LEVEL, max - min);
}
/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
/* Rescale from [0..MAX_BACKLIGHT_LEVEL] to [min..max] */
static inline u32 scale_fw_to_input(int min, int max, u64 input)
{
return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), MAX_BACKLIGHT_LEVEL);
}
static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
@ -4947,7 +4947,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
caps->ac_level, caps->dc_level);
} else
props.brightness = props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
drm_info(drm, "Using custom brightness curve\n");

View file

@ -1029,6 +1029,10 @@ enum dc_edid_status dm_helpers_read_local_edid(
return EDID_NO_RESPONSE;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
if (!edid ||
edid->extensions >= sizeof(sink->dc_edid.raw_edid) / EDID_LENGTH)
return EDID_BAD_INPUT;
sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);

View file

@ -348,10 +348,16 @@ static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata,
* 200 ms. We'll assume that the panel driver will have the hardcoded
* delay in its prepare and always disable HPD.
*
* If HPD somehow makes sense on some future panel we'll have to
* change this to be conditional on someone specifying that HPD should
* be used.
* For DisplayPort bridge type, we need HPD. So we use the bridge type
* to conditionally disable HPD.
* NOTE: The bridge type is set in ti_sn_bridge_probe() but enable_comms()
* can be called before. So for DisplayPort, HPD will be enabled once
* bridge type is set. We are using bridge type instead of "no-hpd"
* property because it is not used properly in devicetree description
* and hence is unreliable.
*/
if (pdata->bridge.type != DRM_MODE_CONNECTOR_DisplayPort)
regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
HPD_DISABLE);
@ -1195,9 +1201,14 @@ static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
int val = 0;
pm_runtime_get_sync(pdata->dev);
/*
* Runtime reference is grabbed in ti_sn_bridge_hpd_enable()
* as the chip won't report HPD just after being powered on.
* HPD_DEBOUNCED_STATE reflects correct state only after the
* debounce time (~100-400 ms).
*/
regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val);
pm_runtime_put_autosuspend(pdata->dev);
return val & HPD_DEBOUNCED_STATE ? connector_status_connected
: connector_status_disconnected;
@ -1220,6 +1231,26 @@ static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *
debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
}
static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
/*
* Device needs to be powered on before reading the HPD state
* for reliable hpd detection in ti_sn_bridge_detect() due to
* the high debounce time.
*/
pm_runtime_get_sync(pdata->dev);
}
static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
pm_runtime_put_autosuspend(pdata->dev);
}
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
@ -1234,6 +1265,8 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.debugfs_init = ti_sn65dsi86_debugfs_init,
.hpd_enable = ti_sn_bridge_hpd_enable,
.hpd_disable = ti_sn_bridge_hpd_disable,
};
static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
@ -1321,8 +1354,26 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort)
pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) {
pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HPD;
/*
* If comms were already enabled they would have been enabled
* with the wrong value of HPD_DISABLE. Update it now. Comms
* could be enabled if anyone is holding a pm_runtime reference
* (like if a GPIO is in use). Note that in most cases nobody
* is doing AUX channel xfers before the bridge is added so
* HPD doesn't _really_ matter then. The only exception is in
* the eDP case where the panel wants to read the EDID before
* the bridge is added. We always consistently have HPD disabled
* for eDP.
*/
mutex_lock(&pdata->comms_mutex);
if (pdata->comms_enabled)
regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG,
HPD_DISABLE, 0);
mutex_unlock(&pdata->comms_mutex);
};
drm_bridge_add(&pdata->bridge);

View file

@ -708,11 +708,14 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_audio ||
bridge_connector->bridge_dp_audio) {
struct device *dev;
struct drm_bridge *bridge;
if (bridge_connector->bridge_hdmi_audio)
dev = bridge_connector->bridge_hdmi_audio->hdmi_audio_dev;
bridge = bridge_connector->bridge_hdmi_audio;
else
dev = bridge_connector->bridge_dp_audio->hdmi_audio_dev;
bridge = bridge_connector->bridge_dp_audio;
dev = bridge->hdmi_audio_dev;
ret = drm_connector_hdmi_audio_init(connector, dev,
&drm_bridge_connector_hdmi_audio_funcs,

View file

@ -725,7 +725,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
* monitor doesn't power down exactly after the throw away read.
*/
if (!aux->is_remote) {
ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV);
ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS);
if (ret < 0)
return ret;
}

View file

@ -343,17 +343,18 @@ EXPORT_SYMBOL(drm_writeback_connector_init_with_encoder);
/**
* drm_writeback_connector_cleanup - Cleanup the writeback connector
* @dev: DRM device
* @wb_connector: Pointer to the writeback connector to clean up
* @data: Pointer to the writeback connector to clean up
*
* This will decrement the reference counter of blobs and destroy properties. It
* will also clean the remaining jobs in this writeback connector. Caution: This helper will not
* clean up the attached encoder and the drm_connector.
*/
static void drm_writeback_connector_cleanup(struct drm_device *dev,
struct drm_writeback_connector *wb_connector)
void *data)
{
unsigned long flags;
struct drm_writeback_job *pos, *n;
struct drm_writeback_connector *wb_connector = data;
delete_writeback_properties(dev);
drm_property_blob_put(wb_connector->pixel_formats_blob_ptr);
@ -405,7 +406,7 @@ int drmm_writeback_connector_init(struct drm_device *dev,
if (ret)
return ret;
ret = drmm_add_action_or_reset(dev, (void *)drm_writeback_connector_cleanup,
ret = drmm_add_action_or_reset(dev, drm_writeback_connector_cleanup,
wb_connector);
if (ret)
return ret;

View file

@ -103,7 +103,7 @@ static void get_ana_cp_int_prop(u64 vco_clk,
DIV_ROUND_DOWN_ULL(curve_1_interpolated, CURVE0_MULTIPLIER)));
ana_cp_int_temp =
DIV_ROUND_CLOSEST_ULL(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1),
DIV64_U64_ROUND_CLOSEST(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1),
CURVE2_MULTIPLIER);
*ana_cp_int = max(1, min(ana_cp_int_temp, 127));

View file

@ -108,7 +108,7 @@ static unsigned int config_bit(const u64 config)
return other_bit(config);
}
static u32 config_mask(const u64 config)
static __always_inline u32 config_mask(const u64 config)
{
unsigned int bit = config_bit(config);

View file

@ -104,6 +104,8 @@ int xe_display_create(struct xe_device *xe)
spin_lock_init(&xe->display.fb_tracking.lock);
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
if (!xe->display.hotplug.dp_wq)
return -ENOMEM;
return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
}

Some files were not shown because too many files have changed in this diff Show more