mirror of
https://github.com/torvalds/linux.git
synced 2025-08-15 14:11:42 +02:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.16-rc3). No conflicts or adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
62deb67fc5
269 changed files with 4455 additions and 1495 deletions
9
.mailmap
9
.mailmap
|
@ -197,6 +197,7 @@ Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
|
|||
Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
|
||||
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
|
||||
Danilo Krummrich <dakr@kernel.org> <dakr@redhat.com>
|
||||
David Brownell <david-b@pacbell.net>
|
||||
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
|
||||
David Heidelberg <david@ixit.cz> <d.okias@gmail.com>
|
||||
|
@ -282,6 +283,7 @@ Gustavo Padovan <gustavo@las.ic.unicamp.br>
|
|||
Gustavo Padovan <padovan@profusion.mobi>
|
||||
Hamza Mahfooz <hamzamahfooz@linux.microsoft.com> <hamza.mahfooz@amd.com>
|
||||
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
|
||||
Hans de Goede <hansg@kernel.org> <hdegoede@redhat.com>
|
||||
Hans Verkuil <hverkuil@xs4all.nl> <hansverk@cisco.com>
|
||||
Hans Verkuil <hverkuil@xs4all.nl> <hverkuil-cisco@xs4all.nl>
|
||||
Harry Yoo <harry.yoo@oracle.com> <42.hyeyoo@gmail.com>
|
||||
|
@ -691,9 +693,10 @@ Serge Hallyn <sergeh@kernel.org> <serge.hallyn@canonical.com>
|
|||
Serge Hallyn <sergeh@kernel.org> <serue@us.ibm.com>
|
||||
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
|
||||
Shakeel Butt <shakeel.butt@linux.dev> <shakeelb@google.com>
|
||||
Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
|
||||
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@intel.com>
|
||||
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@oracle.com>
|
||||
Shannon Nelson <sln@onemain.com> <shannon.nelson@amd.com>
|
||||
Shannon Nelson <sln@onemain.com> <snelson@pensando.io>
|
||||
Shannon Nelson <sln@onemain.com> <shannon.nelson@intel.com>
|
||||
Shannon Nelson <sln@onemain.com> <shannon.nelson@oracle.com>
|
||||
Sharath Chandra Vurukala <quic_sharathv@quicinc.com> <sharathv@codeaurora.org>
|
||||
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
|
||||
Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
|
||||
|
|
|
@ -270,6 +270,8 @@ configured for Unix Extensions (and the client has not disabled
|
|||
illegal Windows/NTFS/SMB characters to a remap range (this mount parameter
|
||||
is the default for SMB3). This remap (``mapposix``) range is also
|
||||
compatible with Mac (and "Services for Mac" on some older Windows).
|
||||
When POSIX Extensions for SMB 3.1.1 are negotiated, remapping is automatically
|
||||
disabled.
|
||||
|
||||
CIFS VFS Mount Options
|
||||
======================
|
||||
|
|
|
@ -352,6 +352,83 @@ For reaching best IO performance, ublk server should align its segment
|
|||
parameter of `struct ublk_param_segment` with backend for avoiding
|
||||
unnecessary IO split, which usually hurts io_uring performance.
|
||||
|
||||
Auto Buffer Registration
|
||||
------------------------
|
||||
|
||||
The ``UBLK_F_AUTO_BUF_REG`` feature automatically handles buffer registration
|
||||
and unregistration for I/O requests, which simplifies the buffer management
|
||||
process and reduces overhead in the ublk server implementation.
|
||||
|
||||
This is another feature flag for using zero copy, and it is compatible with
|
||||
``UBLK_F_SUPPORT_ZERO_COPY``.
|
||||
|
||||
Feature Overview
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
This feature automatically registers request buffers to the io_uring context
|
||||
before delivering I/O commands to the ublk server and unregisters them when
|
||||
completing I/O commands. This eliminates the need for manual buffer
|
||||
registration/unregistration via ``UBLK_IO_REGISTER_IO_BUF`` and
|
||||
``UBLK_IO_UNREGISTER_IO_BUF`` commands, then IO handling in ublk server
|
||||
can avoid dependency on the two uring_cmd operations.
|
||||
|
||||
IOs can't be issued concurrently to io_uring if there is any dependency
|
||||
among these IOs. So this way not only simplifies ublk server implementation,
|
||||
but also makes concurrent IO handling becomes possible by removing the
|
||||
dependency on buffer registration & unregistration commands.
|
||||
|
||||
Usage Requirements
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1. The ublk server must create a sparse buffer table on the same ``io_ring_ctx``
|
||||
used for ``UBLK_IO_FETCH_REQ`` and ``UBLK_IO_COMMIT_AND_FETCH_REQ``. If
|
||||
uring_cmd is issued on a different ``io_ring_ctx``, manual buffer
|
||||
unregistration is required.
|
||||
|
||||
2. Buffer registration data must be passed via uring_cmd's ``sqe->addr`` with the
|
||||
following structure::
|
||||
|
||||
struct ublk_auto_buf_reg {
|
||||
__u16 index; /* Buffer index for registration */
|
||||
__u8 flags; /* Registration flags */
|
||||
__u8 reserved0; /* Reserved for future use */
|
||||
__u32 reserved1; /* Reserved for future use */
|
||||
};
|
||||
|
||||
ublk_auto_buf_reg_to_sqe_addr() is for converting the above structure into
|
||||
``sqe->addr``.
|
||||
|
||||
3. All reserved fields in ``ublk_auto_buf_reg`` must be zeroed.
|
||||
|
||||
4. Optional flags can be passed via ``ublk_auto_buf_reg.flags``.
|
||||
|
||||
Fallback Behavior
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
If auto buffer registration fails:
|
||||
|
||||
1. When ``UBLK_AUTO_BUF_REG_FALLBACK`` is enabled:
|
||||
|
||||
- The uring_cmd is completed
|
||||
- ``UBLK_IO_F_NEED_REG_BUF`` is set in ``ublksrv_io_desc.op_flags``
|
||||
- The ublk server must manually deal with the failure, such as, register
|
||||
the buffer manually, or using user copy feature for retrieving the data
|
||||
for handling ublk IO
|
||||
|
||||
2. If fallback is not enabled:
|
||||
|
||||
- The ublk I/O request fails silently
|
||||
- The uring_cmd won't be completed
|
||||
|
||||
Limitations
|
||||
~~~~~~~~~~~
|
||||
|
||||
- Requires same ``io_ring_ctx`` for all operations
|
||||
- May require manual buffer management in fallback cases
|
||||
- io_ring_ctx buffer table has a max size of 16K, which may not be enough
|
||||
in case that too many ublk devices are handled by this single io_ring_ctx
|
||||
and each one has very large queue depth
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
Device-tree bindings for persistent memory regions
|
||||
-----------------------------------------------------
|
||||
|
||||
Persistent memory refers to a class of memory devices that are:
|
||||
|
||||
a) Usable as main system memory (i.e. cacheable), and
|
||||
b) Retain their contents across power failure.
|
||||
|
||||
Given b) it is best to think of persistent memory as a kind of memory mapped
|
||||
storage device. To ensure data integrity the operating system needs to manage
|
||||
persistent regions separately to the normal memory pool. To aid with that this
|
||||
binding provides a standardised interface for discovering where persistent
|
||||
memory regions exist inside the physical address space.
|
||||
|
||||
Bindings for the region nodes:
|
||||
-----------------------------
|
||||
|
||||
Required properties:
|
||||
- compatible = "pmem-region"
|
||||
|
||||
- reg = <base, size>;
|
||||
The reg property should specify an address range that is
|
||||
translatable to a system physical address range. This address
|
||||
range should be mappable as normal system memory would be
|
||||
(i.e cacheable).
|
||||
|
||||
If the reg property contains multiple address ranges
|
||||
each address range will be treated as though it was specified
|
||||
in a separate device node. Having multiple address ranges in a
|
||||
node implies no special relationship between the two ranges.
|
||||
|
||||
Optional properties:
|
||||
- Any relevant NUMA associativity properties for the target platform.
|
||||
|
||||
- volatile; This property indicates that this region is actually
|
||||
backed by non-persistent memory. This lets the OS know that it
|
||||
may skip the cache flushes required to ensure data is made
|
||||
persistent after a write.
|
||||
|
||||
If this property is absent then the OS must assume that the region
|
||||
is backed by non-volatile memory.
|
||||
|
||||
Examples:
|
||||
--------------------
|
||||
|
||||
/*
|
||||
* This node specifies one 4KB region spanning from
|
||||
* 0x5000 to 0x5fff that is backed by non-volatile memory.
|
||||
*/
|
||||
pmem@5000 {
|
||||
compatible = "pmem-region";
|
||||
reg = <0x00005000 0x00001000>;
|
||||
};
|
||||
|
||||
/*
|
||||
* This node specifies two 4KB regions that are backed by
|
||||
* volatile (normal) memory.
|
||||
*/
|
||||
pmem@6000 {
|
||||
compatible = "pmem-region";
|
||||
reg = < 0x00006000 0x00001000
|
||||
0x00008000 0x00001000 >;
|
||||
volatile;
|
||||
};
|
||||
|
48
Documentation/devicetree/bindings/pmem/pmem-region.yaml
Normal file
48
Documentation/devicetree/bindings/pmem/pmem-region.yaml
Normal file
|
@ -0,0 +1,48 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/pmem-region.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
maintainers:
|
||||
- Oliver O'Halloran <oohall@gmail.com>
|
||||
|
||||
title: Persistent Memory Regions
|
||||
|
||||
description: |
|
||||
Persistent memory refers to a class of memory devices that are:
|
||||
|
||||
a) Usable as main system memory (i.e. cacheable), and
|
||||
b) Retain their contents across power failure.
|
||||
|
||||
Given b) it is best to think of persistent memory as a kind of memory mapped
|
||||
storage device. To ensure data integrity the operating system needs to manage
|
||||
persistent regions separately to the normal memory pool. To aid with that this
|
||||
binding provides a standardised interface for discovering where persistent
|
||||
memory regions exist inside the physical address space.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: pmem-region
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
volatile:
|
||||
description:
|
||||
Indicates the region is volatile (non-persistent) and the OS can skip
|
||||
cache flushes for writes
|
||||
type: boolean
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
pmem@5000 {
|
||||
compatible = "pmem-region";
|
||||
reg = <0x00005000 0x00001000>;
|
||||
};
|
|
@ -584,7 +584,6 @@ encoded manner. The codes are the following:
|
|||
ms may share
|
||||
gd stack segment growns down
|
||||
pf pure PFN range
|
||||
dw disabled write to the mapped file
|
||||
lo pages are locked in memory
|
||||
io memory mapped I/O area
|
||||
sr sequential read advise provided
|
||||
|
@ -607,8 +606,11 @@ encoded manner. The codes are the following:
|
|||
mt arm64 MTE allocation tags are enabled
|
||||
um userfaultfd missing tracking
|
||||
uw userfaultfd wr-protect tracking
|
||||
ui userfaultfd minor fault
|
||||
ss shadow/guarded control stack page
|
||||
sl sealed
|
||||
lf lock on fault pages
|
||||
dp always lazily freeable mapping
|
||||
== =======================================
|
||||
|
||||
Note that there is no guarantee that every flag and associated mnemonic will
|
||||
|
|
|
@ -7,6 +7,9 @@ protocol: genetlink-legacy
|
|||
doc: Partial family for Ethtool Netlink.
|
||||
uapi-header: linux/ethtool_netlink_generated.h
|
||||
|
||||
c-family-name: ethtool-genl-name
|
||||
c-version-name: ethtool-genl-version
|
||||
|
||||
definitions:
|
||||
-
|
||||
name: udp-tunnel-type
|
||||
|
|
|
@ -290,6 +290,7 @@ an involved disclosed party. The current ambassadors list:
|
|||
AMD Tom Lendacky <thomas.lendacky@amd.com>
|
||||
Ampere Darren Hart <darren@os.amperecomputing.com>
|
||||
ARM Catalin Marinas <catalin.marinas@arm.com>
|
||||
IBM Power Madhavan Srinivasan <maddy@linux.ibm.com>
|
||||
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
Intel Tony Luck <tony.luck@intel.com>
|
||||
Qualcomm Trilok Soni <quic_tsoni@quicinc.com>
|
||||
|
|
83
MAINTAINERS
83
MAINTAINERS
|
@ -207,7 +207,7 @@ X: arch/*/include/uapi/
|
|||
X: include/uapi/
|
||||
|
||||
ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-hwmon@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/hwmon/abituguru.c
|
||||
|
@ -371,7 +371,7 @@ S: Maintained
|
|||
F: drivers/platform/x86/quickstart.c
|
||||
|
||||
ACPI SERIAL MULTI INSTANTIATE DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/platform/x86/serial-multi-instantiate.c
|
||||
|
@ -1157,7 +1157,6 @@ F: arch/x86/include/asm/amd/node.h
|
|||
F: arch/x86/kernel/amd_node.c
|
||||
|
||||
AMD PDS CORE DRIVER
|
||||
M: Shannon Nelson <shannon.nelson@amd.com>
|
||||
M: Brett Creeley <brett.creeley@amd.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -3551,7 +3550,7 @@ F: arch/arm64/boot/Makefile
|
|||
F: scripts/make_fit.py
|
||||
|
||||
ARM64 PLATFORM DRIVERS
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
R: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
|
@ -3712,7 +3711,7 @@ F: drivers/platform/x86/asus*.c
|
|||
F: drivers/platform/x86/eeepc*.c
|
||||
|
||||
ASUS TF103C DOCK DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git
|
||||
|
@ -5614,14 +5613,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
|
|||
F: drivers/usb/chipidea/
|
||||
|
||||
CHIPONE ICN8318 I2C TOUCHSCREEN DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/input/touchscreen/chipone,icn8318.yaml
|
||||
F: drivers/input/touchscreen/chipone_icn8318.c
|
||||
|
||||
CHIPONE ICN8505 I2C TOUCHSCREEN DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/input/touchscreen/chipone_icn8505.c
|
||||
|
@ -6255,6 +6254,7 @@ F: include/linux/cpuhotplug.h
|
|||
F: include/linux/smpboot.h
|
||||
F: kernel/cpu.c
|
||||
F: kernel/smpboot.*
|
||||
F: rust/helper/cpu.c
|
||||
F: rust/kernel/cpu.rs
|
||||
|
||||
CPU IDLE TIME MANAGEMENT FRAMEWORK
|
||||
|
@ -6918,7 +6918,7 @@ F: include/dt-bindings/pmu/exynos_ppmu.h
|
|||
F: include/linux/devfreq-event.h
|
||||
|
||||
DEVICE RESOURCE MANAGEMENT HELPERS
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
R: Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
S: Maintained
|
||||
F: include/linux/devm-helpers.h
|
||||
|
@ -7517,7 +7517,7 @@ F: drivers/gpu/drm/gud/
|
|||
F: include/drm/gud.h
|
||||
|
||||
DRM DRIVER FOR GRAIN MEDIA GM12U320 PROJECTORS
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
F: drivers/gpu/drm/tiny/gm12u320.c
|
||||
|
@ -7917,7 +7917,7 @@ F: drivers/gpu/drm/ci/xfails/vkms*
|
|||
F: drivers/gpu/drm/vkms/
|
||||
|
||||
DRM DRIVER FOR VIRTUALBOX VIRTUAL GPU
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
|
@ -8318,7 +8318,7 @@ F: drivers/gpu/drm/panel/
|
|||
F: include/drm/drm_panel.h
|
||||
|
||||
DRM PRIVACY-SCREEN CLASS
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
|
@ -9941,7 +9941,6 @@ F: drivers/fwctl/mlx5/
|
|||
|
||||
FWCTL PDS DRIVER
|
||||
M: Brett Creeley <brett.creeley@amd.com>
|
||||
R: Shannon Nelson <shannon.nelson@amd.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/fwctl/pds/
|
||||
|
@ -10222,7 +10221,7 @@ S: Maintained
|
|||
F: Documentation/devicetree/bindings/connector/gocontroll,moduline-module-slot.yaml
|
||||
|
||||
GOODIX TOUCHSCREEN
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/input/touchscreen/goodix*
|
||||
|
@ -10261,7 +10260,7 @@ F: include/dt-bindings/clock/google,gs101.h
|
|||
K: [gG]oogle.?[tT]ensor
|
||||
|
||||
GPD POCKET FAN DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/platform/x86/gpd-pocket-fan.c
|
||||
|
@ -11422,7 +11421,7 @@ F: drivers/i2c/busses/i2c-via.c
|
|||
F: drivers/i2c/busses/i2c-viapro.c
|
||||
|
||||
I2C/SMBUS INTEL CHT WHISKEY COVE PMIC DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/i2c/busses/i2c-cht-wc.c
|
||||
|
@ -12012,13 +12011,13 @@ S: Supported
|
|||
F: sound/soc/intel/
|
||||
|
||||
INTEL ATOMISP2 DUMMY / POWER-MANAGEMENT DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/platform/x86/intel/atomisp2/pm.c
|
||||
|
||||
INTEL ATOMISP2 LED DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/platform/x86/intel/atomisp2/led.c
|
||||
|
@ -13679,7 +13678,7 @@ S: Maintained
|
|||
F: drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
|
||||
|
||||
LETSKETCH HID TABLET DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
|
||||
|
@ -13729,7 +13728,7 @@ F: drivers/ata/sata_gemini.c
|
|||
F: drivers/ata/sata_gemini.h
|
||||
|
||||
LIBATA SATA AHCI PLATFORM devices support
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-ide@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/ata/ahci_platform.c
|
||||
|
@ -13799,7 +13798,7 @@ M: Oliver O'Halloran <oohall@gmail.com>
|
|||
L: nvdimm@lists.linux.dev
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
F: Documentation/devicetree/bindings/pmem/pmem-region.txt
|
||||
F: Documentation/devicetree/bindings/pmem/pmem-region.yaml
|
||||
F: drivers/nvdimm/of_pmem.c
|
||||
|
||||
LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
|
||||
|
@ -14099,7 +14098,7 @@ F: Documentation/admin-guide/ldm.rst
|
|||
F: block/partitions/ldm.*
|
||||
|
||||
LOGITECH HID GAMING KEYBOARDS
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
|
||||
|
@ -14781,7 +14780,7 @@ F: Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml
|
|||
F: drivers/power/supply/max17040_battery.c
|
||||
|
||||
MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS
|
||||
R: Hans de Goede <hdegoede@redhat.com>
|
||||
R: Hans de Goede <hansg@kernel.org>
|
||||
R: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
R: Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
R: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
|
||||
|
@ -15583,7 +15582,7 @@ Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
|||
F: drivers/net/ethernet/mellanox/mlxfw/
|
||||
|
||||
MELLANOX HARDWARE PLATFORM SUPPORT
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
M: Vadim Pasternak <vadimp@nvidia.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
|
@ -15920,6 +15919,7 @@ R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
|||
R: Nico Pache <npache@redhat.com>
|
||||
R: Ryan Roberts <ryan.roberts@arm.com>
|
||||
R: Dev Jain <dev.jain@arm.com>
|
||||
R: Barry Song <baohua@kernel.org>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
|
@ -16539,7 +16539,7 @@ S: Maintained
|
|||
F: drivers/platform/surface/surface_gpe.c
|
||||
|
||||
MICROSOFT SURFACE HARDWARE PLATFORM SUPPORT
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
M: Maximilian Luz <luzmaximilian@gmail.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
|
@ -17707,7 +17707,7 @@ F: tools/include/nolibc/
|
|||
F: tools/testing/selftests/nolibc/
|
||||
|
||||
NOVATEK NVT-TS I2C TOUCHSCREEN DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/input/touchscreen/novatek,nvt-ts.yaml
|
||||
|
@ -19377,7 +19377,7 @@ F: crypto/pcrypt.c
|
|||
F: include/crypto/pcrypt.h
|
||||
|
||||
PDS DSC VIRTIO DATA PATH ACCELERATOR
|
||||
R: Shannon Nelson <shannon.nelson@amd.com>
|
||||
R: Brett Creeley <brett.creeley@amd.com>
|
||||
F: drivers/vdpa/pds/
|
||||
|
||||
PECI HARDWARE MONITORING DRIVERS
|
||||
|
@ -19399,7 +19399,6 @@ F: include/linux/peci-cpu.h
|
|||
F: include/linux/peci.h
|
||||
|
||||
PENSANDO ETHERNET DRIVERS
|
||||
M: Shannon Nelson <shannon.nelson@amd.com>
|
||||
M: Brett Creeley <brett.creeley@amd.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -22172,7 +22171,7 @@ R: Tejun Heo <tj@kernel.org>
|
|||
R: David Vernet <void@manifault.com>
|
||||
R: Andrea Righi <arighi@nvidia.com>
|
||||
R: Changwoo Min <changwoo@igalia.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: sched-ext@lists.linux.dev
|
||||
S: Maintained
|
||||
W: https://github.com/sched-ext/scx
|
||||
T: git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git
|
||||
|
@ -22709,7 +22708,7 @@ K: fu[57]40
|
|||
K: [^@]sifive
|
||||
|
||||
SILEAD TOUCHSCREEN DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -22742,7 +22741,7 @@ F: Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
|
|||
F: drivers/i3c/master/svc-i3c-master.c
|
||||
|
||||
SIMPLEFB FB DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/display/simple-framebuffer.yaml
|
||||
|
@ -22871,7 +22870,7 @@ F: Documentation/hwmon/emc2103.rst
|
|||
F: drivers/hwmon/emc2103.c
|
||||
|
||||
SMSC SCH5627 HARDWARE MONITOR DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-hwmon@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/hwmon/sch5627.rst
|
||||
|
@ -23526,7 +23525,7 @@ S: Supported
|
|||
F: Documentation/process/stable-kernel-rules.rst
|
||||
|
||||
STAGING - ATOMISP DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
M: Mauro Carvalho Chehab <mchehab@kernel.org>
|
||||
R: Sakari Ailus <sakari.ailus@linux.intel.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
|
@ -23822,7 +23821,7 @@ F: arch/m68k/sun3*/
|
|||
F: drivers/net/ethernet/i825xx/sun3*
|
||||
|
||||
SUN4I LOW RES ADC ATTACHED TABLET KEYS DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
|
||||
|
@ -25590,7 +25589,7 @@ F: Documentation/hid/hiddev.rst
|
|||
F: drivers/hid/usbhid/
|
||||
|
||||
USB INTEL XHCI ROLE MUX DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/usb/roles/intel-xhci-usb-role-switch.c
|
||||
|
@ -25781,7 +25780,7 @@ F: Documentation/firmware-guide/acpi/intel-pmc-mux.rst
|
|||
F: drivers/usb/typec/mux/intel_pmc_mux.c
|
||||
|
||||
USB TYPEC PI3USB30532 MUX DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/usb/typec/mux/pi3usb30532.c
|
||||
|
@ -25810,7 +25809,7 @@ F: drivers/usb/host/uhci*
|
|||
|
||||
USB VIDEO CLASS
|
||||
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-media@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.ideasonboard.org/uvc/
|
||||
|
@ -26341,7 +26340,7 @@ F: include/uapi/linux/virtio_snd.h
|
|||
F: sound/virtio/*
|
||||
|
||||
VIRTUAL BOX GUEST DEVICE DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
M: Arnd Bergmann <arnd@arndb.de>
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
S: Maintained
|
||||
|
@ -26350,7 +26349,7 @@ F: include/linux/vbox_utils.h
|
|||
F: include/uapi/linux/vbox*.h
|
||||
|
||||
VIRTUAL BOX SHARED FOLDER VFS DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: fs/vboxsf/*
|
||||
|
@ -26605,7 +26604,7 @@ F: drivers/mmc/host/wbsd.*
|
|||
|
||||
WACOM PROTOCOL 4 SERIAL TABLETS
|
||||
M: Julian Squires <julian@cipht.net>
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/input/tablet/wacom_serial4.c
|
||||
|
@ -26772,7 +26771,7 @@ F: include/linux/wwan.h
|
|||
F: include/uapi/linux/wwan.h
|
||||
|
||||
X-POWERS AXP288 PMIC DRIVERS
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
S: Maintained
|
||||
F: drivers/acpi/pmic/intel_pmic_xpower.c
|
||||
N: axp288
|
||||
|
@ -26864,14 +26863,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm
|
|||
F: arch/x86/mm/
|
||||
|
||||
X86 PLATFORM ANDROID TABLETS DSDT FIXUP DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git
|
||||
F: drivers/platform/x86/x86-android-tablets/
|
||||
|
||||
X86 PLATFORM DRIVERS
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Hans de Goede <hansg@kernel.org>
|
||||
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
5
Makefile
5
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1832,12 +1832,9 @@ rustfmtcheck: rustfmt
|
|||
# Misc
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Run misc checks when ${KBUILD_EXTRA_WARN} contains 1
|
||||
PHONY += misc-check
|
||||
ifneq ($(findstring 1,$(KBUILD_EXTRA_WARN)),)
|
||||
misc-check:
|
||||
$(Q)$(srctree)/scripts/misc-check
|
||||
endif
|
||||
|
||||
all: misc-check
|
||||
|
||||
|
|
|
@ -1107,14 +1107,36 @@ static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
|
|||
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
|
||||
|
||||
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
|
||||
#define __vcpu_sys_reg(v,r) \
|
||||
(*({ \
|
||||
|
||||
#define __vcpu_assign_sys_reg(v, r, val) \
|
||||
do { \
|
||||
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
|
||||
u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
|
||||
u64 __v = (val); \
|
||||
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
|
||||
*__r = kvm_vcpu_apply_reg_masks((v), (r), *__r);\
|
||||
__r; \
|
||||
}))
|
||||
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
|
||||
\
|
||||
ctxt_sys_reg(ctxt, (r)) = __v; \
|
||||
} while (0)
|
||||
|
||||
#define __vcpu_rmw_sys_reg(v, r, op, val) \
|
||||
do { \
|
||||
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
|
||||
u64 __v = ctxt_sys_reg(ctxt, (r)); \
|
||||
__v op (val); \
|
||||
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
|
||||
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
|
||||
\
|
||||
ctxt_sys_reg(ctxt, (r)) = __v; \
|
||||
} while (0)
|
||||
|
||||
#define __vcpu_sys_reg(v,r) \
|
||||
({ \
|
||||
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
|
||||
u64 __v = ctxt_sys_reg(ctxt, (r)); \
|
||||
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
|
||||
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
|
||||
__v; \
|
||||
})
|
||||
|
||||
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
|
||||
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
|
||||
|
|
|
@ -108,16 +108,16 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
|
|||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTV_CTL_EL0, ctl);
|
||||
break;
|
||||
case TIMER_PTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTP_CTL_EL0, ctl);
|
||||
break;
|
||||
case TIMER_HVTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTHV_CTL_EL2, ctl);
|
||||
break;
|
||||
case TIMER_HPTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTHP_CTL_EL2, ctl);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
|
@ -130,16 +130,16 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
|
|||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTV_CVAL_EL0, cval);
|
||||
break;
|
||||
case TIMER_PTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, cval);
|
||||
break;
|
||||
case TIMER_HVTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTHV_CVAL_EL2, cval);
|
||||
break;
|
||||
case TIMER_HPTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, cval);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
|
@ -1036,7 +1036,7 @@ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
|
|||
if (vcpu_has_nv(vcpu)) {
|
||||
struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
|
||||
|
||||
offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
|
||||
offs->vcpu_offset = __ctxt_sys_reg(&vcpu->arch.ctxt, CNTVOFF_EL2);
|
||||
offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset;
|
||||
}
|
||||
|
||||
|
|
|
@ -216,9 +216,9 @@ void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
|
|||
void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
if (val & OSLAR_EL1_OSLK)
|
||||
__vcpu_sys_reg(vcpu, OSLSR_EL1) |= OSLSR_EL1_OSLK;
|
||||
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK);
|
||||
else
|
||||
__vcpu_sys_reg(vcpu, OSLSR_EL1) &= ~OSLSR_EL1_OSLK;
|
||||
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK);
|
||||
|
||||
preempt_disable();
|
||||
kvm_arch_vcpu_put(vcpu);
|
||||
|
|
|
@ -103,8 +103,8 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
|
|||
fp_state.sve_state = vcpu->arch.sve_state;
|
||||
fp_state.sve_vl = vcpu->arch.sve_max_vl;
|
||||
fp_state.sme_state = NULL;
|
||||
fp_state.svcr = &__vcpu_sys_reg(vcpu, SVCR);
|
||||
fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR);
|
||||
fp_state.svcr = __ctxt_sys_reg(&vcpu->arch.ctxt, SVCR);
|
||||
fp_state.fpmr = __ctxt_sys_reg(&vcpu->arch.ctxt, FPMR);
|
||||
fp_state.fp_type = &vcpu->arch.fp_type;
|
||||
|
||||
if (vcpu_has_sve(vcpu))
|
||||
|
|
|
@ -37,7 +37,7 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
|||
if (unlikely(vcpu_has_nv(vcpu)))
|
||||
vcpu_write_sys_reg(vcpu, val, reg);
|
||||
else if (!__vcpu_write_sys_reg_to_cpu(val, reg))
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
}
|
||||
|
||||
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
|
||||
|
@ -51,7 +51,7 @@ static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
|
|||
} else if (has_vhe()) {
|
||||
write_sysreg_el1(val, SYS_SPSR);
|
||||
} else {
|
||||
__vcpu_sys_reg(vcpu, SPSR_EL1) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, val);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
|
|||
if (!vcpu_el1_is_32bit(vcpu))
|
||||
return;
|
||||
|
||||
__vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
|
||||
__vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2));
|
||||
}
|
||||
|
||||
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
|
||||
|
@ -456,7 +456,7 @@ static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
if (vcpu_has_sve(vcpu)) {
|
||||
zcr_el1 = read_sysreg_el1(SYS_ZCR);
|
||||
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
|
||||
__vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1);
|
||||
|
||||
/*
|
||||
* The guest's state is always saved using the guest's max VL.
|
||||
|
|
|
@ -307,11 +307,11 @@ static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq);
|
||||
vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq);
|
||||
|
||||
__vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
|
||||
__vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
|
||||
__vcpu_assign_sys_reg(vcpu, DACR32_EL2, read_sysreg(dacr32_el2));
|
||||
__vcpu_assign_sys_reg(vcpu, IFSR32_EL2, read_sysreg(ifsr32_el2));
|
||||
|
||||
if (has_vhe() || kvm_debug_regs_in_use(vcpu))
|
||||
__vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
|
||||
__vcpu_assign_sys_reg(vcpu, DBGVCR32_EL2, read_sysreg(dbgvcr32_el2));
|
||||
}
|
||||
|
||||
static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -26,7 +26,7 @@ void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
|
|||
|
||||
static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
|
||||
__vcpu_assign_sys_reg(vcpu, ZCR_EL1, read_sysreg_el1(SYS_ZCR));
|
||||
/*
|
||||
* On saving/restoring guest sve state, always use the maximum VL for
|
||||
* the guest. The layout of the data when saving the sve state depends
|
||||
|
@ -79,7 +79,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
|
|||
|
||||
has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
|
||||
if (has_fpmr)
|
||||
__vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
|
||||
__vcpu_assign_sys_reg(vcpu, FPMR, read_sysreg_s(SYS_FPMR));
|
||||
|
||||
if (system_supports_sve())
|
||||
__hyp_sve_restore_host();
|
||||
|
|
|
@ -223,9 +223,9 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
val = read_sysreg_el0(SYS_CNTP_CVAL);
|
||||
if (map.direct_ptimer == vcpu_ptimer(vcpu))
|
||||
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, val);
|
||||
if (map.direct_ptimer == vcpu_hptimer(vcpu))
|
||||
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, val);
|
||||
|
||||
offset = read_sysreg_s(SYS_CNTPOFF_EL2);
|
||||
|
||||
|
|
|
@ -18,17 +18,17 @@
|
|||
static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* These registers are common with EL1 */
|
||||
__vcpu_sys_reg(vcpu, PAR_EL1) = read_sysreg(par_el1);
|
||||
__vcpu_sys_reg(vcpu, TPIDR_EL1) = read_sysreg(tpidr_el1);
|
||||
__vcpu_assign_sys_reg(vcpu, PAR_EL1, read_sysreg(par_el1));
|
||||
__vcpu_assign_sys_reg(vcpu, TPIDR_EL1, read_sysreg(tpidr_el1));
|
||||
|
||||
__vcpu_sys_reg(vcpu, ESR_EL2) = read_sysreg_el1(SYS_ESR);
|
||||
__vcpu_sys_reg(vcpu, AFSR0_EL2) = read_sysreg_el1(SYS_AFSR0);
|
||||
__vcpu_sys_reg(vcpu, AFSR1_EL2) = read_sysreg_el1(SYS_AFSR1);
|
||||
__vcpu_sys_reg(vcpu, FAR_EL2) = read_sysreg_el1(SYS_FAR);
|
||||
__vcpu_sys_reg(vcpu, MAIR_EL2) = read_sysreg_el1(SYS_MAIR);
|
||||
__vcpu_sys_reg(vcpu, VBAR_EL2) = read_sysreg_el1(SYS_VBAR);
|
||||
__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2) = read_sysreg_el1(SYS_CONTEXTIDR);
|
||||
__vcpu_sys_reg(vcpu, AMAIR_EL2) = read_sysreg_el1(SYS_AMAIR);
|
||||
__vcpu_assign_sys_reg(vcpu, ESR_EL2, read_sysreg_el1(SYS_ESR));
|
||||
__vcpu_assign_sys_reg(vcpu, AFSR0_EL2, read_sysreg_el1(SYS_AFSR0));
|
||||
__vcpu_assign_sys_reg(vcpu, AFSR1_EL2, read_sysreg_el1(SYS_AFSR1));
|
||||
__vcpu_assign_sys_reg(vcpu, FAR_EL2, read_sysreg_el1(SYS_FAR));
|
||||
__vcpu_assign_sys_reg(vcpu, MAIR_EL2, read_sysreg_el1(SYS_MAIR));
|
||||
__vcpu_assign_sys_reg(vcpu, VBAR_EL2, read_sysreg_el1(SYS_VBAR));
|
||||
__vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR));
|
||||
__vcpu_assign_sys_reg(vcpu, AMAIR_EL2, read_sysreg_el1(SYS_AMAIR));
|
||||
|
||||
/*
|
||||
* In VHE mode those registers are compatible between EL1 and EL2,
|
||||
|
@ -46,21 +46,21 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
|
|||
* are always trapped, ensuring that the in-memory
|
||||
* copy is always up-to-date. A small blessing...
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, SCTLR_EL2) = read_sysreg_el1(SYS_SCTLR);
|
||||
__vcpu_sys_reg(vcpu, TTBR0_EL2) = read_sysreg_el1(SYS_TTBR0);
|
||||
__vcpu_sys_reg(vcpu, TTBR1_EL2) = read_sysreg_el1(SYS_TTBR1);
|
||||
__vcpu_sys_reg(vcpu, TCR_EL2) = read_sysreg_el1(SYS_TCR);
|
||||
__vcpu_assign_sys_reg(vcpu, SCTLR_EL2, read_sysreg_el1(SYS_SCTLR));
|
||||
__vcpu_assign_sys_reg(vcpu, TTBR0_EL2, read_sysreg_el1(SYS_TTBR0));
|
||||
__vcpu_assign_sys_reg(vcpu, TTBR1_EL2, read_sysreg_el1(SYS_TTBR1));
|
||||
__vcpu_assign_sys_reg(vcpu, TCR_EL2, read_sysreg_el1(SYS_TCR));
|
||||
|
||||
if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
|
||||
__vcpu_sys_reg(vcpu, TCR2_EL2) = read_sysreg_el1(SYS_TCR2);
|
||||
__vcpu_assign_sys_reg(vcpu, TCR2_EL2, read_sysreg_el1(SYS_TCR2));
|
||||
|
||||
if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
|
||||
__vcpu_sys_reg(vcpu, PIRE0_EL2) = read_sysreg_el1(SYS_PIRE0);
|
||||
__vcpu_sys_reg(vcpu, PIR_EL2) = read_sysreg_el1(SYS_PIR);
|
||||
__vcpu_assign_sys_reg(vcpu, PIRE0_EL2, read_sysreg_el1(SYS_PIRE0));
|
||||
__vcpu_assign_sys_reg(vcpu, PIR_EL2, read_sysreg_el1(SYS_PIR));
|
||||
}
|
||||
|
||||
if (ctxt_has_s1poe(&vcpu->arch.ctxt))
|
||||
__vcpu_sys_reg(vcpu, POR_EL2) = read_sysreg_el1(SYS_POR);
|
||||
__vcpu_assign_sys_reg(vcpu, POR_EL2, read_sysreg_el1(SYS_POR));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -70,13 +70,13 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
val = read_sysreg_el1(SYS_CNTKCTL);
|
||||
val &= CNTKCTL_VALID_BITS;
|
||||
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) &= ~CNTKCTL_VALID_BITS;
|
||||
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val;
|
||||
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, &=, ~CNTKCTL_VALID_BITS);
|
||||
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, |=, val);
|
||||
}
|
||||
|
||||
__vcpu_sys_reg(vcpu, SP_EL2) = read_sysreg(sp_el1);
|
||||
__vcpu_sys_reg(vcpu, ELR_EL2) = read_sysreg_el1(SYS_ELR);
|
||||
__vcpu_sys_reg(vcpu, SPSR_EL2) = read_sysreg_el1(SYS_SPSR);
|
||||
__vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1));
|
||||
__vcpu_assign_sys_reg(vcpu, ELR_EL2, read_sysreg_el1(SYS_ELR));
|
||||
__vcpu_assign_sys_reg(vcpu, SPSR_EL2, read_sysreg_el1(SYS_SPSR));
|
||||
}
|
||||
|
||||
static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -1757,7 +1757,7 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
|
|||
|
||||
out:
|
||||
for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
|
||||
(void)__vcpu_sys_reg(vcpu, sr);
|
||||
__vcpu_rmw_sys_reg(vcpu, sr, |=, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
|
|||
val |= lower_32_bits(val);
|
||||
}
|
||||
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
|
||||
/* Recreate the perf event to reflect the updated sample_period */
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
|
@ -204,7 +204,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
|
|||
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
|
||||
{
|
||||
kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
|
||||
__vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, counter_index_to_reg(select_idx), val);
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
}
|
||||
|
||||
|
@ -239,7 +239,7 @@ static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
|
|||
|
||||
reg = counter_index_to_reg(pmc->idx);
|
||||
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
|
||||
kvm_pmu_release_perf_event(pmc);
|
||||
}
|
||||
|
@ -503,14 +503,14 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
|
|||
reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
|
||||
if (!kvm_pmc_is_64bit(pmc))
|
||||
reg = lower_32_bits(reg);
|
||||
__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
|
||||
__vcpu_assign_sys_reg(vcpu, counter_index_to_reg(i), reg);
|
||||
|
||||
/* No overflow? move on */
|
||||
if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
|
||||
continue;
|
||||
|
||||
/* Mark overflow */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(i));
|
||||
|
||||
if (kvm_pmu_counter_can_chain(pmc))
|
||||
kvm_pmu_counter_increment(vcpu, BIT(i + 1),
|
||||
|
@ -556,7 +556,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
|||
perf_event->attr.sample_period = period;
|
||||
perf_event->hw.sample_period = period;
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(idx));
|
||||
|
||||
if (kvm_pmu_counter_can_chain(pmc))
|
||||
kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
|
||||
|
@ -602,7 +602,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
|||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
/* The reset bits don't indicate any state, and shouldn't be saved. */
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
|
||||
__vcpu_assign_sys_reg(vcpu, PMCR_EL0, (val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P)));
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_C)
|
||||
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
|
||||
|
@ -779,7 +779,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
|||
u64 reg;
|
||||
|
||||
reg = counter_index_to_evtreg(pmc->idx);
|
||||
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
|
||||
__vcpu_assign_sys_reg(vcpu, reg, (data & kvm_pmu_evtyper_mask(vcpu->kvm)));
|
||||
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
}
|
||||
|
@ -914,9 +914,9 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, mask);
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, mask);
|
||||
}
|
||||
|
@ -1038,7 +1038,7 @@ static void kvm_arm_set_nr_counters(struct kvm *kvm, unsigned int nr)
|
|||
u64 val = __vcpu_sys_reg(vcpu, MDCR_EL2);
|
||||
val &= ~MDCR_EL2_HPMN;
|
||||
val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters);
|
||||
__vcpu_sys_reg(vcpu, MDCR_EL2) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -228,7 +228,7 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
|||
* to reverse-translate virtual EL2 system registers for a
|
||||
* non-VHE guest hypervisor.
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
|
||||
switch (reg) {
|
||||
case CNTHCTL_EL2:
|
||||
|
@ -263,7 +263,7 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
|||
return;
|
||||
|
||||
memory_write:
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, reg, val);
|
||||
}
|
||||
|
||||
/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
|
||||
|
@ -605,7 +605,7 @@ static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
|
||||
return -EINVAL;
|
||||
|
||||
__vcpu_sys_reg(vcpu, rd->reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -791,7 +791,7 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
mask |= GENMASK(n - 1, 0);
|
||||
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= mask;
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -799,7 +799,7 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -811,7 +811,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
return 0;
|
||||
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -819,7 +819,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK;
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -835,7 +835,7 @@ static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
* The value of PMCR.N field is included when the
|
||||
* vCPU register is read via kvm_vcpu_read_pmcr().
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, r->reg) = pmcr;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, pmcr);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -907,7 +907,7 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
return false;
|
||||
|
||||
if (p->is_write)
|
||||
__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
|
||||
__vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
|
||||
else
|
||||
/* return PMSELR.SEL field */
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
|
||||
|
@ -1076,7 +1076,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
|
|||
{
|
||||
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val & mask;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, val & mask);
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
return 0;
|
||||
|
@ -1103,10 +1103,10 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
val = p->regval & mask;
|
||||
if (r->Op2 & 0x1)
|
||||
/* accessing PMCNTENSET_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
|
||||
else
|
||||
/* accessing PMCNTENCLR_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, val);
|
||||
} else {
|
||||
|
@ -1129,10 +1129,10 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
|
||||
if (r->Op2 & 0x1)
|
||||
/* accessing PMINTENSET_EL1 */
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
|
||||
else
|
||||
/* accessing PMINTENCLR_EL1 */
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
|
||||
}
|
||||
|
@ -1151,10 +1151,10 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
if (p->is_write) {
|
||||
if (r->CRm & 0x2)
|
||||
/* accessing PMOVSSET_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
|
||||
else
|
||||
/* accessing PMOVSCLR_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
|
||||
}
|
||||
|
@ -1185,8 +1185,8 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
if (!vcpu_mode_priv(vcpu))
|
||||
return undef_access(vcpu, p, r);
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
|
||||
p->regval & ARMV8_PMU_USERENR_MASK;
|
||||
__vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
|
||||
(p->regval & ARMV8_PMU_USERENR_MASK));
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
|
||||
& ARMV8_PMU_USERENR_MASK;
|
||||
|
@ -1237,7 +1237,7 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
|||
if (!kvm_supports_32bit_el0())
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, val);
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
return 0;
|
||||
|
@ -2213,7 +2213,7 @@ static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
if (kvm_has_mte(vcpu->kvm))
|
||||
clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
|
||||
|
||||
__vcpu_sys_reg(vcpu, r->reg) = clidr;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, clidr);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
@ -2227,7 +2227,7 @@ static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
|
||||
return -EINVAL;
|
||||
|
||||
__vcpu_sys_reg(vcpu, rd->reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2404,7 +2404,7 @@ static bool access_sp_el1(struct kvm_vcpu *vcpu,
|
|||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
__vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
|
||||
__vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval);
|
||||
else
|
||||
p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
|
||||
|
||||
|
@ -2428,7 +2428,7 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
|
|||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
__vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
|
||||
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval);
|
||||
else
|
||||
p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
|
||||
|
||||
|
@ -2440,7 +2440,7 @@ static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
|
|||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
__vcpu_sys_reg(vcpu, CNTKCTL_EL1) = p->regval;
|
||||
__vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval);
|
||||
else
|
||||
p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
|
||||
|
||||
|
@ -2454,7 +2454,9 @@ static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
|
||||
val |= HCR_E2H;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, val);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
|
||||
|
@ -2625,7 +2627,7 @@ static bool access_mdcr(struct kvm_vcpu *vcpu,
|
|||
u64_replace_bits(val, hpmn, MDCR_EL2_HPMN);
|
||||
}
|
||||
|
||||
__vcpu_sys_reg(vcpu, MDCR_EL2) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
|
||||
|
||||
/*
|
||||
* Request a reload of the PMU to enable/disable the counters
|
||||
|
@ -2754,7 +2756,7 @@ static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
|||
|
||||
static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
__vcpu_sys_reg(vcpu, r->reg) = vcpu->kvm->arch.nr_pmu_counters;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters);
|
||||
return vcpu->kvm->arch.nr_pmu_counters;
|
||||
}
|
||||
|
||||
|
@ -4790,7 +4792,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
|||
r->reset(vcpu, r);
|
||||
|
||||
if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
|
||||
(void)__vcpu_sys_reg(vcpu, r->reg);
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
|
||||
}
|
||||
|
||||
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
|
||||
|
@ -5012,7 +5014,7 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
|||
if (r->set_user) {
|
||||
ret = (r->set_user)(vcpu, r, val);
|
||||
} else {
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, val);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -137,7 +137,7 @@ static inline u64 reset_unknown(struct kvm_vcpu *vcpu,
|
|||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= NR_SYS_REGS);
|
||||
__vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, 0x1de7ec7edbadc0deULL);
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
|
@ -145,7 +145,7 @@ static inline u64 reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= NR_SYS_REGS);
|
||||
__vcpu_sys_reg(vcpu, r->reg) = r->val;
|
||||
__vcpu_assign_sys_reg(vcpu, r->reg, r->val);
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
||||
|
|
|
@ -356,12 +356,12 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
|
|||
val = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
|
||||
val &= ~ICH_HCR_EL2_EOIcount_MASK;
|
||||
val |= (s_cpu_if->vgic_hcr & ICH_HCR_EL2_EOIcount_MASK);
|
||||
__vcpu_sys_reg(vcpu, ICH_HCR_EL2) = val;
|
||||
__vcpu_sys_reg(vcpu, ICH_VMCR_EL2) = s_cpu_if->vgic_vmcr;
|
||||
__vcpu_assign_sys_reg(vcpu, ICH_HCR_EL2, val);
|
||||
__vcpu_assign_sys_reg(vcpu, ICH_VMCR_EL2, s_cpu_if->vgic_vmcr);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
__vcpu_sys_reg(vcpu, ICH_AP0RN(i)) = s_cpu_if->vgic_ap0r[i];
|
||||
__vcpu_sys_reg(vcpu, ICH_AP1RN(i)) = s_cpu_if->vgic_ap1r[i];
|
||||
__vcpu_assign_sys_reg(vcpu, ICH_AP0RN(i), s_cpu_if->vgic_ap0r[i]);
|
||||
__vcpu_assign_sys_reg(vcpu, ICH_AP1RN(i), s_cpu_if->vgic_ap1r[i]);
|
||||
}
|
||||
|
||||
for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) {
|
||||
|
@ -370,7 +370,7 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
|
|||
val &= ~ICH_LR_STATE;
|
||||
val |= s_cpu_if->vgic_lr[i] & ICH_LR_STATE;
|
||||
|
||||
__vcpu_sys_reg(vcpu, ICH_LRN(i)) = val;
|
||||
__vcpu_assign_sys_reg(vcpu, ICH_LRN(i), val);
|
||||
s_cpu_if->vgic_lr[i] = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,14 +38,14 @@ void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
|
|||
unsigned int todo = min_t(unsigned int, len, SZ_4K);
|
||||
|
||||
kernel_neon_begin();
|
||||
poly1305_blocks_neon(state, src, todo, 1);
|
||||
poly1305_blocks_neon(state, src, todo, padbit);
|
||||
kernel_neon_end();
|
||||
|
||||
len -= todo;
|
||||
src += todo;
|
||||
} while (len);
|
||||
} else
|
||||
poly1305_blocks(state, src, len, 1);
|
||||
poly1305_blocks(state, src, len, padbit);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(poly1305_blocks_arch);
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
/ {
|
||||
#size-cells = <0x02>;
|
||||
#address-cells = <0x02>;
|
||||
model-name = "microwatt";
|
||||
model = "microwatt";
|
||||
compatible = "microwatt-soc";
|
||||
|
||||
aliases {
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
*/
|
||||
|
||||
/dts-v1/;
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
|
||||
/ {
|
||||
compatible = "fsl,mpc8315erdb";
|
||||
|
@ -358,6 +359,15 @@
|
|||
interrupt-parent = <&ipic>;
|
||||
fsl,mpc8313-wakeup-timer = <>m1>;
|
||||
};
|
||||
|
||||
gpio: gpio-controller@c00 {
|
||||
compatible = "fsl,mpc8314-gpio";
|
||||
reg = <0xc00 0x100>;
|
||||
interrupts = <74 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-parent = <&ipic>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
};
|
||||
};
|
||||
|
||||
pci0: pci@e0008500 {
|
||||
|
|
|
@ -183,7 +183,7 @@
|
|||
/*
|
||||
* Used to name C functions called from asm
|
||||
*/
|
||||
#ifdef CONFIG_PPC_KERNEL_PCREL
|
||||
#if defined(__powerpc64__) && defined(CONFIG_PPC_KERNEL_PCREL)
|
||||
#define CFUNC(name) name@notoc
|
||||
#else
|
||||
#define CFUNC(name) name
|
||||
|
|
|
@ -23,10 +23,10 @@
|
|||
#define TCSETSW _IOW('t', 21, struct termios)
|
||||
#define TCSETSF _IOW('t', 22, struct termios)
|
||||
|
||||
#define TCGETA _IOR('t', 23, struct termio)
|
||||
#define TCSETA _IOW('t', 24, struct termio)
|
||||
#define TCSETAW _IOW('t', 25, struct termio)
|
||||
#define TCSETAF _IOW('t', 28, struct termio)
|
||||
#define TCGETA 0x40147417 /* _IOR('t', 23, struct termio) */
|
||||
#define TCSETA 0x80147418 /* _IOW('t', 24, struct termio) */
|
||||
#define TCSETAW 0x80147419 /* _IOW('t', 25, struct termio) */
|
||||
#define TCSETAF 0x8014741c /* _IOW('t', 28, struct termio) */
|
||||
|
||||
#define TCSBRK _IO('t', 29)
|
||||
#define TCXONC _IO('t', 30)
|
||||
|
|
|
@ -1509,6 +1509,8 @@ int eeh_pe_configure(struct eeh_pe *pe)
|
|||
/* Invalid PE ? */
|
||||
if (!pe)
|
||||
return -ENODEV;
|
||||
else
|
||||
ret = eeh_ops->configure_bridge(pe);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ ldflags-$(CONFIG_LD_ORPHAN_WARN) += -Wl,--orphan-handling=$(CONFIG_LD_ORPHAN_WAR
|
|||
ldflags-y += $(filter-out $(CC_AUTO_VAR_INIT_ZERO_ENABLER) $(CC_FLAGS_FTRACE) -Wa$(comma)%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
|
||||
|
||||
CC32FLAGS := -m32
|
||||
CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc
|
||||
CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc -mpcrel
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
# This flag is supported by clang for 64-bit but not 32-bit so it will cause
|
||||
# an unused command line flag warning for this file.
|
||||
|
|
|
@ -89,7 +89,7 @@ config X86
|
|||
select ARCH_HAS_DMA_OPS if GART_IOMMU || XEN
|
||||
select ARCH_HAS_EARLY_DEBUG if KGDB
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_EXECMEM_ROX if X86_64
|
||||
select ARCH_HAS_EXECMEM_ROX if X86_64 && STRICT_MODULE_RWX
|
||||
select ARCH_HAS_FAST_MULTIPLIER
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
|
|
@ -5,12 +5,20 @@
|
|||
#include <asm-generic/module.h>
|
||||
#include <asm/orc_types.h>
|
||||
|
||||
struct its_array {
|
||||
#ifdef CONFIG_MITIGATION_ITS
|
||||
void **pages;
|
||||
int num;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct mod_arch_specific {
|
||||
#ifdef CONFIG_UNWINDER_ORC
|
||||
unsigned int num_orcs;
|
||||
int *orc_unwind_ip;
|
||||
struct orc_entry *orc_unwind;
|
||||
#endif
|
||||
struct its_array its_pages;
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_MODULE_H */
|
||||
|
|
|
@ -24,4 +24,26 @@ int ia32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs);
|
|||
int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs);
|
||||
int x32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* To prevent immediate repeat of single step trap on return from SIGTRAP
|
||||
* handler if the trap flag (TF) is set without an external debugger attached,
|
||||
* clear the software event flag in the augmented SS, ensuring no single-step
|
||||
* trap is pending upon ERETU completion.
|
||||
*
|
||||
* Note, this function should be called in sigreturn() before the original
|
||||
* state is restored to make sure the TF is read from the entry frame.
|
||||
*/
|
||||
static __always_inline void prevent_single_step_upon_eretu(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* If the trap flag (TF) is set, i.e., the sigreturn() SYSCALL instruction
|
||||
* is being single-stepped, do not clear the software event flag in the
|
||||
* augmented SS, thus a debugger won't skip over the following instruction.
|
||||
*/
|
||||
#ifdef CONFIG_X86_FRED
|
||||
if (!(regs->flags & X86_EFLAGS_TF))
|
||||
regs->fred_ss.swevent = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_SIGHANDLING_H */
|
||||
|
|
|
@ -106,7 +106,7 @@ void tdx_init(void);
|
|||
|
||||
typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args);
|
||||
|
||||
static inline u64 sc_retry(sc_func_t func, u64 fn,
|
||||
static __always_inline u64 sc_retry(sc_func_t func, u64 fn,
|
||||
struct tdx_module_args *args)
|
||||
{
|
||||
int retry = RDRAND_RETRY_LOOPS;
|
||||
|
|
|
@ -116,6 +116,24 @@ static struct module *its_mod;
|
|||
#endif
|
||||
static void *its_page;
|
||||
static unsigned int its_offset;
|
||||
struct its_array its_pages;
|
||||
|
||||
static void *__its_alloc(struct its_array *pages)
|
||||
{
|
||||
void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
void *tmp = krealloc(pages->pages, (pages->num+1) * sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return NULL;
|
||||
|
||||
pages->pages = tmp;
|
||||
pages->pages[pages->num++] = page;
|
||||
|
||||
return no_free_ptr(page);
|
||||
}
|
||||
|
||||
/* Initialize a thunk with the "jmp *reg; int3" instructions. */
|
||||
static void *its_init_thunk(void *thunk, int reg)
|
||||
|
@ -151,6 +169,21 @@ static void *its_init_thunk(void *thunk, int reg)
|
|||
return thunk + offset;
|
||||
}
|
||||
|
||||
static void its_pages_protect(struct its_array *pages)
|
||||
{
|
||||
for (int i = 0; i < pages->num; i++) {
|
||||
void *page = pages->pages[i];
|
||||
execmem_restore_rox(page, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
static void its_fini_core(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
|
||||
its_pages_protect(&its_pages);
|
||||
kfree(its_pages.pages);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
void its_init_mod(struct module *mod)
|
||||
{
|
||||
|
@ -173,10 +206,8 @@ void its_fini_mod(struct module *mod)
|
|||
its_page = NULL;
|
||||
mutex_unlock(&text_mutex);
|
||||
|
||||
for (int i = 0; i < mod->its_num_pages; i++) {
|
||||
void *page = mod->its_page_array[i];
|
||||
execmem_restore_rox(page, PAGE_SIZE);
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
|
||||
its_pages_protect(&mod->arch.its_pages);
|
||||
}
|
||||
|
||||
void its_free_mod(struct module *mod)
|
||||
|
@ -184,37 +215,33 @@ void its_free_mod(struct module *mod)
|
|||
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
|
||||
return;
|
||||
|
||||
for (int i = 0; i < mod->its_num_pages; i++) {
|
||||
void *page = mod->its_page_array[i];
|
||||
for (int i = 0; i < mod->arch.its_pages.num; i++) {
|
||||
void *page = mod->arch.its_pages.pages[i];
|
||||
execmem_free(page);
|
||||
}
|
||||
kfree(mod->its_page_array);
|
||||
kfree(mod->arch.its_pages.pages);
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
static void *its_alloc(void)
|
||||
{
|
||||
void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE);
|
||||
struct its_array *pages = &its_pages;
|
||||
void *page;
|
||||
|
||||
#ifdef CONFIG_MODULE
|
||||
if (its_mod)
|
||||
pages = &its_mod->arch.its_pages;
|
||||
#endif
|
||||
|
||||
page = __its_alloc(pages);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
if (its_mod) {
|
||||
void *tmp = krealloc(its_mod->its_page_array,
|
||||
(its_mod->its_num_pages+1) * sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return NULL;
|
||||
execmem_make_temp_rw(page, PAGE_SIZE);
|
||||
if (pages == &its_pages)
|
||||
set_memory_x((unsigned long)page, 1);
|
||||
|
||||
its_mod->its_page_array = tmp;
|
||||
its_mod->its_page_array[its_mod->its_num_pages++] = page;
|
||||
|
||||
execmem_make_temp_rw(page, PAGE_SIZE);
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
return no_free_ptr(page);
|
||||
return page;
|
||||
}
|
||||
|
||||
static void *its_allocate_thunk(int reg)
|
||||
|
@ -268,7 +295,9 @@ u8 *its_static_thunk(int reg)
|
|||
return thunk;
|
||||
}
|
||||
|
||||
#endif
|
||||
#else
|
||||
static inline void its_fini_core(void) {}
|
||||
#endif /* CONFIG_MITIGATION_ITS */
|
||||
|
||||
/*
|
||||
* Nomenclature for variable names to simplify and clarify this code and ease
|
||||
|
@ -2338,6 +2367,8 @@ void __init alternative_instructions(void)
|
|||
apply_retpolines(__retpoline_sites, __retpoline_sites_end);
|
||||
apply_returns(__return_sites, __return_sites_end);
|
||||
|
||||
its_fini_core();
|
||||
|
||||
/*
|
||||
* Adjust all CALL instructions to point to func()-10, including
|
||||
* those in .altinstr_replacement.
|
||||
|
|
|
@ -152,6 +152,8 @@ SYSCALL32_DEFINE0(sigreturn)
|
|||
struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
|
||||
sigset_t set;
|
||||
|
||||
prevent_single_step_upon_eretu(regs);
|
||||
|
||||
if (!access_ok(frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
if (__get_user(set.sig[0], &frame->sc.oldmask)
|
||||
|
@ -175,6 +177,8 @@ SYSCALL32_DEFINE0(rt_sigreturn)
|
|||
struct rt_sigframe_ia32 __user *frame;
|
||||
sigset_t set;
|
||||
|
||||
prevent_single_step_upon_eretu(regs);
|
||||
|
||||
frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4);
|
||||
|
||||
if (!access_ok(frame, sizeof(*frame)))
|
||||
|
|
|
@ -250,6 +250,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
|||
sigset_t set;
|
||||
unsigned long uc_flags;
|
||||
|
||||
prevent_single_step_upon_eretu(regs);
|
||||
|
||||
frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
|
||||
if (!access_ok(frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
|
@ -366,6 +368,8 @@ COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn)
|
|||
sigset_t set;
|
||||
unsigned long uc_flags;
|
||||
|
||||
prevent_single_step_upon_eretu(regs);
|
||||
|
||||
frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
|
||||
|
||||
if (!access_ok(frame, sizeof(*frame)))
|
||||
|
|
|
@ -299,3 +299,27 @@ struct smp_ops smp_ops = {
|
|||
.send_call_func_single_ipi = native_send_call_func_single_ipi,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(smp_ops);
|
||||
|
||||
int arch_cpu_rescan_dead_smt_siblings(void)
|
||||
{
|
||||
enum cpuhp_smt_control old = cpu_smt_control;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If SMT has been disabled and SMT siblings are in HLT, bring them back
|
||||
* online and offline them again so that they end up in MWAIT proper.
|
||||
*
|
||||
* Called with hotplug enabled.
|
||||
*/
|
||||
if (old != CPU_SMT_DISABLED && old != CPU_SMT_FORCE_DISABLED)
|
||||
return 0;
|
||||
|
||||
ret = cpuhp_smt_enable();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cpuhp_smt_disable(old);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_cpu_rescan_dead_smt_siblings);
|
||||
|
|
|
@ -1244,6 +1244,10 @@ void play_dead_common(void)
|
|||
local_irq_disable();
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to flush the caches before going to sleep, lest we have
|
||||
* dirty data in our caches when we come back up.
|
||||
*/
|
||||
void __noreturn mwait_play_dead(unsigned int eax_hint)
|
||||
{
|
||||
struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead);
|
||||
|
@ -1289,50 +1293,6 @@ void __noreturn mwait_play_dead(unsigned int eax_hint)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to flush the caches before going to sleep, lest we have
|
||||
* dirty data in our caches when we come back up.
|
||||
*/
|
||||
static inline void mwait_play_dead_cpuid_hint(void)
|
||||
{
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
unsigned int highest_cstate = 0;
|
||||
unsigned int highest_subcstate = 0;
|
||||
int i;
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
||||
return;
|
||||
if (!this_cpu_has(X86_FEATURE_MWAIT))
|
||||
return;
|
||||
if (!this_cpu_has(X86_FEATURE_CLFLUSH))
|
||||
return;
|
||||
|
||||
eax = CPUID_LEAF_MWAIT;
|
||||
ecx = 0;
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
|
||||
/*
|
||||
* eax will be 0 if EDX enumeration is not valid.
|
||||
* Initialized below to cstate, sub_cstate value when EDX is valid.
|
||||
*/
|
||||
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
|
||||
eax = 0;
|
||||
} else {
|
||||
edx >>= MWAIT_SUBSTATE_SIZE;
|
||||
for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
|
||||
if (edx & MWAIT_SUBSTATE_MASK) {
|
||||
highest_cstate = i;
|
||||
highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
|
||||
}
|
||||
}
|
||||
eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
|
||||
(highest_subcstate - 1);
|
||||
}
|
||||
|
||||
mwait_play_dead(eax);
|
||||
}
|
||||
|
||||
/*
|
||||
* Kick all "offline" CPUs out of mwait on kexec(). See comment in
|
||||
* mwait_play_dead().
|
||||
|
@ -1383,9 +1343,9 @@ void native_play_dead(void)
|
|||
play_dead_common();
|
||||
tboot_shutdown(TB_SHUTDOWN_WFS);
|
||||
|
||||
mwait_play_dead_cpuid_hint();
|
||||
if (cpuidle_play_dead())
|
||||
hlt_play_dead();
|
||||
/* Below returns only on error. */
|
||||
cpuidle_play_dead();
|
||||
hlt_play_dead();
|
||||
}
|
||||
|
||||
#else /* ... !CONFIG_HOTPLUG_CPU */
|
||||
|
|
|
@ -4896,12 +4896,16 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
|
|||
{
|
||||
u64 error_code = PFERR_GUEST_FINAL_MASK;
|
||||
u8 level = PG_LEVEL_4K;
|
||||
u64 direct_bits;
|
||||
u64 end;
|
||||
int r;
|
||||
|
||||
if (!vcpu->kvm->arch.pre_fault_allowed)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (kvm_is_gfn_alias(vcpu->kvm, gpa_to_gfn(range->gpa)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* reload is efficient when called repeatedly, so we can do it on
|
||||
* every iteration.
|
||||
|
@ -4910,15 +4914,18 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
direct_bits = 0;
|
||||
if (kvm_arch_has_private_mem(vcpu->kvm) &&
|
||||
kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa)))
|
||||
error_code |= PFERR_PRIVATE_ACCESS;
|
||||
else
|
||||
direct_bits = gfn_to_gpa(kvm_gfn_direct_bits(vcpu->kvm));
|
||||
|
||||
/*
|
||||
* Shadow paging uses GVA for kvm page fault, so restrict to
|
||||
* two-dimensional paging.
|
||||
*/
|
||||
r = kvm_tdp_map_page(vcpu, range->gpa, error_code, &level);
|
||||
r = kvm_tdp_map_page(vcpu, range->gpa | direct_bits, error_code, &level);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -2871,6 +2871,33 @@ void __init sev_set_cpu_caps(void)
|
|||
}
|
||||
}
|
||||
|
||||
static bool is_sev_snp_initialized(void)
|
||||
{
|
||||
struct sev_user_data_snp_status *status;
|
||||
struct sev_data_snp_addr buf;
|
||||
bool initialized = false;
|
||||
int ret, error = 0;
|
||||
|
||||
status = snp_alloc_firmware_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!status)
|
||||
return false;
|
||||
|
||||
buf.address = __psp_pa(status);
|
||||
ret = sev_do_cmd(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &error);
|
||||
if (ret) {
|
||||
pr_err("SEV: SNP_PLATFORM_STATUS failed ret=%d, fw_error=%d (%#x)\n",
|
||||
ret, error, error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
initialized = !!status->state;
|
||||
|
||||
out:
|
||||
snp_free_firmware_page(status);
|
||||
|
||||
return initialized;
|
||||
}
|
||||
|
||||
void __init sev_hardware_setup(void)
|
||||
{
|
||||
unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
|
||||
|
@ -2975,6 +3002,14 @@ void __init sev_hardware_setup(void)
|
|||
sev_snp_supported = sev_snp_enabled && cc_platform_has(CC_ATTR_HOST_SEV_SNP);
|
||||
|
||||
out:
|
||||
if (sev_enabled) {
|
||||
init_args.probe = true;
|
||||
if (sev_platform_init(&init_args))
|
||||
sev_supported = sev_es_supported = sev_snp_supported = false;
|
||||
else if (sev_snp_supported)
|
||||
sev_snp_supported = is_sev_snp_initialized();
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_SEV))
|
||||
pr_info("SEV %s (ASIDs %u - %u)\n",
|
||||
sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
|
||||
|
@ -3001,15 +3036,6 @@ out:
|
|||
sev_supported_vmsa_features = 0;
|
||||
if (sev_es_debug_swap_enabled)
|
||||
sev_supported_vmsa_features |= SVM_SEV_FEAT_DEBUG_SWAP;
|
||||
|
||||
if (!sev_enabled)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Do both SNP and SEV initialization at KVM module load.
|
||||
*/
|
||||
init_args.probe = true;
|
||||
sev_platform_init(&init_args);
|
||||
}
|
||||
|
||||
void sev_hardware_unsetup(void)
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include <linux/initrd.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/bios_ebda.h>
|
||||
|
@ -749,8 +748,6 @@ void mark_rodata_ro(void)
|
|||
pr_info("Write protecting kernel text and read-only data: %luk\n",
|
||||
size >> 10);
|
||||
|
||||
execmem_cache_make_ro();
|
||||
|
||||
kernel_set_to_readonly = 1;
|
||||
|
||||
#ifdef CONFIG_CPA_DEBUG
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
#include <linux/gfp.h>
|
||||
#include <linux/kcore.h>
|
||||
#include <linux/bootmem_info.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/bios_ebda.h>
|
||||
|
@ -1392,8 +1391,6 @@ void mark_rodata_ro(void)
|
|||
(end - start) >> 10);
|
||||
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
|
||||
|
||||
execmem_cache_make_ro();
|
||||
|
||||
kernel_set_to_readonly = 1;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1257,6 +1257,9 @@ static int collapse_pmd_page(pmd_t *pmd, unsigned long addr,
|
|||
pgprot_t pgprot;
|
||||
int i = 0;
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_PSE))
|
||||
return 0;
|
||||
|
||||
addr &= PMD_MASK;
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
first = *pte;
|
||||
|
|
|
@ -192,7 +192,8 @@ out:
|
|||
|
||||
int arch_resume_nosmt(void)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We reached this while coming out of hibernation. This means
|
||||
* that SMT siblings are sleeping in hlt, as mwait is not safe
|
||||
|
@ -206,18 +207,10 @@ int arch_resume_nosmt(void)
|
|||
* Called with hotplug disabled.
|
||||
*/
|
||||
cpu_hotplug_enable();
|
||||
if (cpu_smt_control == CPU_SMT_DISABLED ||
|
||||
cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
|
||||
enum cpuhp_smt_control old = cpu_smt_control;
|
||||
|
||||
ret = cpuhp_smt_enable();
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = cpuhp_smt_disable(old);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
ret = arch_cpu_rescan_dead_smt_siblings();
|
||||
|
||||
cpu_hotplug_disable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -75,8 +75,9 @@ static inline void seamcall_err_ret(u64 fn, u64 err,
|
|||
args->r9, args->r10, args->r11);
|
||||
}
|
||||
|
||||
static inline int sc_retry_prerr(sc_func_t func, sc_err_func_t err_func,
|
||||
u64 fn, struct tdx_module_args *args)
|
||||
static __always_inline int sc_retry_prerr(sc_func_t func,
|
||||
sc_err_func_t err_func,
|
||||
u64 fn, struct tdx_module_args *args)
|
||||
{
|
||||
u64 sret = sc_retry(func, fn, args);
|
||||
|
||||
|
|
|
@ -998,20 +998,20 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
|||
if (!plug || rq_list_empty(&plug->mq_list))
|
||||
return false;
|
||||
|
||||
rq_list_for_each(&plug->mq_list, rq) {
|
||||
if (rq->q == q) {
|
||||
if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
|
||||
BIO_MERGE_OK)
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
rq = plug->mq_list.tail;
|
||||
if (rq->q == q)
|
||||
return blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
|
||||
BIO_MERGE_OK;
|
||||
else if (!plug->multiple_queues)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Only keep iterating plug list for merges if we have multiple
|
||||
* queues
|
||||
*/
|
||||
if (!plug->multiple_queues)
|
||||
break;
|
||||
rq_list_for_each(&plug->mq_list, rq) {
|
||||
if (rq->q != q)
|
||||
continue;
|
||||
if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
|
||||
BIO_MERGE_OK)
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -1225,6 +1225,7 @@ void blk_zone_write_plug_bio_endio(struct bio *bio)
|
|||
if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) {
|
||||
bio->bi_opf &= ~REQ_OP_MASK;
|
||||
bio->bi_opf |= REQ_OP_ZONE_APPEND;
|
||||
bio_clear_flag(bio, BIO_EMULATES_ZONE_APPEND);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1306,7 +1307,6 @@ again:
|
|||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
|
||||
bdev = bio->bi_bdev;
|
||||
submit_bio_noacct_nocheck(bio);
|
||||
|
||||
/*
|
||||
* blk-mq devices will reuse the extra reference on the request queue
|
||||
|
@ -1314,8 +1314,12 @@ again:
|
|||
* path for BIO-based devices will not do that. So drop this extra
|
||||
* reference here.
|
||||
*/
|
||||
if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO))
|
||||
if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO)) {
|
||||
bdev->bd_disk->fops->submit_bio(bio);
|
||||
blk_queue_exit(bdev->bd_disk->queue);
|
||||
} else {
|
||||
blk_mq_submit_bio(bio);
|
||||
}
|
||||
|
||||
put_zwplug:
|
||||
/* Drop the reference we took in disk_zone_wplug_schedule_bio_work(). */
|
||||
|
|
|
@ -566,7 +566,7 @@ static int __init crypto_hkdf_module_init(void)
|
|||
|
||||
static void __exit crypto_hkdf_module_exit(void) {}
|
||||
|
||||
module_init(crypto_hkdf_module_init);
|
||||
late_initcall(crypto_hkdf_module_init);
|
||||
module_exit(crypto_hkdf_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -126,8 +126,8 @@ struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *
|
|||
psp->ddev = ddev;
|
||||
memcpy(psp->psp_regs, conf->psp_regs, sizeof(psp->psp_regs));
|
||||
|
||||
psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN) + PSP_FW_ALIGN;
|
||||
psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz, GFP_KERNEL);
|
||||
psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN);
|
||||
psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz + PSP_FW_ALIGN, GFP_KERNEL);
|
||||
if (!psp->fw_buffer) {
|
||||
drm_err(ddev, "no memory for fw buffer");
|
||||
return NULL;
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
static DEFINE_MUTEX(isolated_cpus_lock);
|
||||
static DEFINE_MUTEX(round_robin_lock);
|
||||
|
||||
static unsigned long power_saving_mwait_eax;
|
||||
static unsigned int power_saving_mwait_eax;
|
||||
|
||||
static unsigned char tsc_detected_unstable;
|
||||
static unsigned char tsc_marked_unstable;
|
||||
|
|
|
@ -883,19 +883,16 @@ static int __init einj_init(void)
|
|||
}
|
||||
|
||||
einj_dev = faux_device_create("acpi-einj", NULL, &einj_device_ops);
|
||||
if (!einj_dev)
|
||||
return -ENODEV;
|
||||
|
||||
einj_initialized = true;
|
||||
if (einj_dev)
|
||||
einj_initialized = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit einj_exit(void)
|
||||
{
|
||||
if (einj_initialized)
|
||||
faux_device_destroy(einj_dev);
|
||||
|
||||
faux_device_destroy(einj_dev);
|
||||
}
|
||||
|
||||
module_init(einj_init);
|
||||
|
|
|
@ -476,7 +476,7 @@ bool cppc_allow_fast_switch(void)
|
|||
struct cpc_desc *cpc_ptr;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
|
||||
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
|
||||
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
|
||||
|
|
|
@ -23,8 +23,10 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
|
@ -2031,6 +2033,21 @@ void __init acpi_ec_ecdt_probe(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!strstarts(ecdt_ptr->id, "\\")) {
|
||||
/*
|
||||
* The ECDT table on some MSI notebooks contains invalid data, together
|
||||
* with an empty ID string ("").
|
||||
*
|
||||
* Section 5.2.15 of the ACPI specification requires the ID string to be
|
||||
* a "fully qualified reference to the (...) embedded controller device",
|
||||
* so this string always has to start with a backslash.
|
||||
*
|
||||
* By verifying this we can avoid such faulty ECDT tables in a safe way.
|
||||
*/
|
||||
pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ec = acpi_ec_alloc();
|
||||
if (!ec)
|
||||
goto out;
|
||||
|
|
|
@ -175,6 +175,12 @@ bool processor_physically_present(acpi_handle handle);
|
|||
static inline void acpi_early_processor_control_setup(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
|
||||
void acpi_idle_rescan_dead_smt_siblings(void);
|
||||
#else
|
||||
static inline void acpi_idle_rescan_dead_smt_siblings(void) {}
|
||||
#endif
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
Embedded Controller
|
||||
-------------------------------------------------------------------------- */
|
||||
|
|
|
@ -279,6 +279,9 @@ static int __init acpi_processor_driver_init(void)
|
|||
* after acpi_cppc_processor_probe() has been called for all online CPUs
|
||||
*/
|
||||
acpi_processor_init_invariance_cppc();
|
||||
|
||||
acpi_idle_rescan_dead_smt_siblings();
|
||||
|
||||
return 0;
|
||||
err:
|
||||
driver_unregister(&acpi_processor_driver);
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#include <acpi/processor.h>
|
||||
#include <linux/context_tracking.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* Include the apic definitions for x86 to have the APIC timer related defines
|
||||
* available also for UP (on SMP it gets magically included via linux/smp.h).
|
||||
|
@ -55,6 +57,12 @@ struct cpuidle_driver acpi_idle_driver = {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
|
||||
void acpi_idle_rescan_dead_smt_siblings(void)
|
||||
{
|
||||
if (cpuidle_get_driver() == &acpi_idle_driver)
|
||||
arch_cpu_rescan_dead_smt_siblings();
|
||||
}
|
||||
|
||||
static
|
||||
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
|
||||
|
||||
|
|
|
@ -666,6 +666,13 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
|
|||
DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* MACHENIKE L16P/L16P */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "MACHENIKE"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "L16P"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
|
||||
|
|
|
@ -1410,8 +1410,15 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
|
|||
|
||||
static bool ahci_broken_lpm(struct pci_dev *pdev)
|
||||
{
|
||||
/*
|
||||
* Platforms with LPM problems.
|
||||
* If driver_data is NULL, there is no existing BIOS version with
|
||||
* functioning LPM.
|
||||
* If driver_data is non-NULL, then driver_data contains the DMI BIOS
|
||||
* build date of the first BIOS version with functioning LPM (i.e. older
|
||||
* BIOS versions have broken LPM).
|
||||
*/
|
||||
static const struct dmi_system_id sysids[] = {
|
||||
/* Various Lenovo 50 series have LPM issues with older BIOSen */
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
|
@ -1438,13 +1445,30 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
|
|||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
|
||||
},
|
||||
.driver_data = "20180409", /* 2.35 */
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ASUSPRO D840MB_M840SA"),
|
||||
},
|
||||
/* 320 is broken, there is no known good version. */
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Note date based on release notes, 2.35 has been
|
||||
* reported to be good, but I've been unable to get
|
||||
* a hold of the reporter to get the DMI BIOS date.
|
||||
* TODO: fix this.
|
||||
* AMD 500 Series Chipset SATA Controller [1022:43eb]
|
||||
* on this motherboard timeouts on ports 5 and 6 when
|
||||
* LPM is enabled, at least with WDC WD20EFAX-68FB5N0
|
||||
* hard drives. LPM with the same drive works fine on
|
||||
* all other ports on the same controller.
|
||||
*/
|
||||
.driver_data = "20180310", /* 2.35 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR,
|
||||
"ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME,
|
||||
"ROG STRIX B550-F GAMING (WI-FI)"),
|
||||
},
|
||||
/* 3621 is broken, there is no known good version. */
|
||||
},
|
||||
{ } /* terminate list */
|
||||
};
|
||||
|
@ -1455,6 +1479,9 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
|
|||
if (!dmi)
|
||||
return false;
|
||||
|
||||
if (!dmi->driver_data)
|
||||
return true;
|
||||
|
||||
dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
|
||||
snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
|
||||
|
||||
|
|
|
@ -514,15 +514,19 @@ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
|
|||
EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
|
||||
|
||||
/**
|
||||
* ata_acpi_cbl_80wire - Check for 80 wire cable
|
||||
* ata_acpi_cbl_pata_type - Return PATA cable type
|
||||
* @ap: Port to check
|
||||
* @gtm: GTM data to use
|
||||
*
|
||||
* Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
|
||||
* Return ATA_CBL_PATA* according to the transfer mode selected by BIOS
|
||||
*/
|
||||
int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
|
||||
int ata_acpi_cbl_pata_type(struct ata_port *ap)
|
||||
{
|
||||
struct ata_device *dev;
|
||||
int ret = ATA_CBL_PATA_UNK;
|
||||
const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
|
||||
|
||||
if (!gtm)
|
||||
return ATA_CBL_PATA40;
|
||||
|
||||
ata_for_each_dev(dev, &ap->link, ENABLED) {
|
||||
unsigned int xfer_mask, udma_mask;
|
||||
|
@ -530,13 +534,17 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
|
|||
xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
|
||||
ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
|
||||
|
||||
if (udma_mask & ~ATA_UDMA_MASK_40C)
|
||||
return 1;
|
||||
ret = ATA_CBL_PATA40;
|
||||
|
||||
if (udma_mask & ~ATA_UDMA_MASK_40C) {
|
||||
ret = ATA_CBL_PATA80;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
|
||||
EXPORT_SYMBOL_GPL(ata_acpi_cbl_pata_type);
|
||||
|
||||
static void ata_acpi_gtf_to_tf(struct ata_device *dev,
|
||||
const struct ata_acpi_gtf *gtf,
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include <scsi/scsi_host.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#if defined(CONFIG_X86) && defined(CONFIG_X86_32)
|
||||
#include <asm/msr.h>
|
||||
static int use_msr;
|
||||
module_param_named(msr, use_msr, int, 0644);
|
||||
|
|
|
@ -1298,7 +1298,7 @@ static int pata_macio_pci_attach(struct pci_dev *pdev,
|
|||
priv->dev = &pdev->dev;
|
||||
|
||||
/* Get MMIO regions */
|
||||
if (pci_request_regions(pdev, "pata-macio")) {
|
||||
if (pcim_request_all_regions(pdev, "pata-macio")) {
|
||||
dev_err(&pdev->dev,
|
||||
"Cannot obtain PCI resources\n");
|
||||
return -EBUSY;
|
||||
|
|
|
@ -201,11 +201,9 @@ static int via_cable_detect(struct ata_port *ap) {
|
|||
two drives */
|
||||
if (ata66 & (0x10100000 >> (16 * ap->port_no)))
|
||||
return ATA_CBL_PATA80;
|
||||
|
||||
/* Check with ACPI so we can spot BIOS reported SATA bridges */
|
||||
if (ata_acpi_init_gtm(ap) &&
|
||||
ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
|
||||
return ATA_CBL_PATA80;
|
||||
return ATA_CBL_PATA40;
|
||||
return ata_acpi_cbl_pata_type(ap);
|
||||
}
|
||||
|
||||
static int via_pre_reset(struct ata_link *link, unsigned long deadline)
|
||||
|
@ -368,7 +366,8 @@ static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask)
|
|||
}
|
||||
|
||||
if (dev->class == ATA_DEV_ATAPI &&
|
||||
dmi_check_system(no_atapi_dma_dmi_table)) {
|
||||
(dmi_check_system(no_atapi_dma_dmi_table) ||
|
||||
config->id == PCI_DEVICE_ID_VIA_6415)) {
|
||||
ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
|
||||
mask &= ATA_MASK_PIO;
|
||||
}
|
||||
|
|
|
@ -288,7 +288,9 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
|
|||
struct sk_buff *new_skb;
|
||||
int result = 0;
|
||||
|
||||
if (!skb->len) return 0;
|
||||
if (skb->len < sizeof(struct atmtcp_hdr))
|
||||
goto done;
|
||||
|
||||
dev = vcc->dev_data;
|
||||
hdr = (struct atmtcp_hdr *) skb->data;
|
||||
if (hdr->length == ATMTCP_HDR_MAGIC) {
|
||||
|
|
|
@ -86,6 +86,7 @@ static struct device_driver faux_driver = {
|
|||
.name = "faux_driver",
|
||||
.bus = &faux_bus_type,
|
||||
.probe_type = PROBE_FORCE_SYNCHRONOUS,
|
||||
.suppress_bind_attrs = true,
|
||||
};
|
||||
|
||||
static void faux_device_release(struct device *dev)
|
||||
|
@ -169,7 +170,7 @@ struct faux_device *faux_device_create_with_groups(const char *name,
|
|||
* successful is almost impossible to determine by the caller.
|
||||
*/
|
||||
if (!dev->driver) {
|
||||
dev_err(dev, "probe did not succeed, tearing down the device\n");
|
||||
dev_dbg(dev, "probe did not succeed, tearing down the device\n");
|
||||
faux_device_destroy(faux_dev);
|
||||
faux_dev = NULL;
|
||||
}
|
||||
|
|
|
@ -1248,12 +1248,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
|||
lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
|
||||
lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
|
||||
|
||||
if (size_changed) {
|
||||
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
|
||||
lo->lo_backing_file);
|
||||
loop_set_size(lo, new_size);
|
||||
}
|
||||
|
||||
/* update the direct I/O flag if lo_offset changed */
|
||||
loop_update_dio(lo);
|
||||
|
||||
|
@ -1261,6 +1255,11 @@ out_unfreeze:
|
|||
blk_mq_unfreeze_queue(lo->lo_queue, memflags);
|
||||
if (partscan)
|
||||
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
|
||||
if (!err && size_changed) {
|
||||
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
|
||||
lo->lo_backing_file);
|
||||
loop_set_size(lo, new_size);
|
||||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
if (partscan)
|
||||
|
|
|
@ -26,9 +26,9 @@ fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
|
|||
}
|
||||
|
||||
/// Finds supply name for the CPU from DT.
|
||||
fn find_supply_names(dev: &Device, cpu: u32) -> Option<KVec<CString>> {
|
||||
fn find_supply_names(dev: &Device, cpu: cpu::CpuId) -> Option<KVec<CString>> {
|
||||
// Try "cpu0" for older DTs, fallback to "cpu".
|
||||
let name = (cpu == 0)
|
||||
let name = (cpu.as_u32() == 0)
|
||||
.then(|| find_supply_name_exact(dev, "cpu0"))
|
||||
.flatten()
|
||||
.or_else(|| find_supply_name_exact(dev, "cpu"))?;
|
||||
|
|
|
@ -1118,7 +1118,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
|
|||
* Catch exporters making buffers inaccessible even when
|
||||
* attachments preventing that exist.
|
||||
*/
|
||||
WARN_ON_ONCE(ret == EBUSY);
|
||||
WARN_ON_ONCE(ret == -EBUSY);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
|
|
@ -264,8 +264,7 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
|
|||
ubuf->sg = NULL;
|
||||
}
|
||||
} else {
|
||||
dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
|
||||
direction);
|
||||
dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -280,7 +279,7 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
|
|||
if (!ubuf->sg)
|
||||
return -EINVAL;
|
||||
|
||||
dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
|
||||
dma_sync_sgtable_for_device(dev, ubuf->sg, direction);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
|
|||
venc_freq /= 2;
|
||||
|
||||
dev_dbg(priv->dev,
|
||||
"vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
|
||||
"phy:%lluHz vclk=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
|
||||
phy_freq, vclk_freq, venc_freq, hdmi_freq,
|
||||
priv->venc.hdmi_use_enci);
|
||||
|
||||
|
|
|
@ -110,10 +110,7 @@
|
|||
#define HDMI_PLL_LOCK BIT(31)
|
||||
#define HDMI_PLL_LOCK_G12A (3 << 30)
|
||||
|
||||
#define PIXEL_FREQ_1000_1001(_freq) \
|
||||
DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
|
||||
#define PHY_FREQ_1000_1001(_freq) \
|
||||
(PIXEL_FREQ_1000_1001(DIV_ROUND_DOWN_ULL(_freq, 10ULL)) * 10)
|
||||
#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
|
||||
|
||||
/* VID PLL Dividers */
|
||||
enum {
|
||||
|
@ -772,6 +769,36 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
|
|||
pll_freq);
|
||||
}
|
||||
|
||||
static bool meson_vclk_freqs_are_matching_param(unsigned int idx,
|
||||
unsigned long long phy_freq,
|
||||
unsigned long long vclk_freq)
|
||||
{
|
||||
DRM_DEBUG_DRIVER("i = %d vclk_freq = %lluHz alt = %lluHz\n",
|
||||
idx, params[idx].vclk_freq,
|
||||
FREQ_1000_1001(params[idx].vclk_freq));
|
||||
DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
|
||||
idx, params[idx].phy_freq,
|
||||
FREQ_1000_1001(params[idx].phy_freq));
|
||||
|
||||
/* Match strict frequency */
|
||||
if (phy_freq == params[idx].phy_freq &&
|
||||
vclk_freq == params[idx].vclk_freq)
|
||||
return true;
|
||||
|
||||
/* Match 1000/1001 variant: vclk deviation has to be less than 1kHz
|
||||
* (drm EDID is defined in 1kHz steps, so everything smaller must be
|
||||
* rounding error) and the PHY freq deviation has to be less than
|
||||
* 10kHz (as the TMDS clock is 10 times the pixel clock, so anything
|
||||
* smaller must be rounding error as well).
|
||||
*/
|
||||
if (abs(vclk_freq - FREQ_1000_1001(params[idx].vclk_freq)) < 1000 &&
|
||||
abs(phy_freq - FREQ_1000_1001(params[idx].phy_freq)) < 10000)
|
||||
return true;
|
||||
|
||||
/* no match */
|
||||
return false;
|
||||
}
|
||||
|
||||
enum drm_mode_status
|
||||
meson_vclk_vic_supported_freq(struct meson_drm *priv,
|
||||
unsigned long long phy_freq,
|
||||
|
@ -790,19 +817,7 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv,
|
|||
}
|
||||
|
||||
for (i = 0 ; params[i].pixel_freq ; ++i) {
|
||||
DRM_DEBUG_DRIVER("i = %d pixel_freq = %lluHz alt = %lluHz\n",
|
||||
i, params[i].pixel_freq,
|
||||
PIXEL_FREQ_1000_1001(params[i].pixel_freq));
|
||||
DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
|
||||
i, params[i].phy_freq,
|
||||
PHY_FREQ_1000_1001(params[i].phy_freq));
|
||||
/* Match strict frequency */
|
||||
if (phy_freq == params[i].phy_freq &&
|
||||
vclk_freq == params[i].vclk_freq)
|
||||
return MODE_OK;
|
||||
/* Match 1000/1001 variant */
|
||||
if (phy_freq == PHY_FREQ_1000_1001(params[i].phy_freq) &&
|
||||
vclk_freq == PIXEL_FREQ_1000_1001(params[i].vclk_freq))
|
||||
if (meson_vclk_freqs_are_matching_param(i, phy_freq, vclk_freq))
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
|
@ -1075,10 +1090,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
|
|||
}
|
||||
|
||||
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
|
||||
if ((phy_freq == params[freq].phy_freq ||
|
||||
phy_freq == PHY_FREQ_1000_1001(params[freq].phy_freq)) &&
|
||||
(vclk_freq == params[freq].vclk_freq ||
|
||||
vclk_freq == PIXEL_FREQ_1000_1001(params[freq].vclk_freq))) {
|
||||
if (meson_vclk_freqs_are_matching_param(freq, phy_freq,
|
||||
vclk_freq)) {
|
||||
if (vclk_freq != params[freq].vclk_freq)
|
||||
vic_alternate_clock = true;
|
||||
else
|
||||
|
|
|
@ -5,6 +5,7 @@ config DRM_ST7571_I2C
|
|||
select DRM_GEM_SHMEM_HELPER
|
||||
select DRM_KMS_HELPER
|
||||
select REGMAP_I2C
|
||||
select VIDEOMODE_HELPERS
|
||||
help
|
||||
DRM driver for Sitronix ST7571 panels controlled over I2C.
|
||||
|
||||
|
|
|
@ -560,12 +560,6 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_connector_hdmi_audio_init(connector, dev->dev,
|
||||
&vc4_hdmi_audio_funcs,
|
||||
8, false, -1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
|
||||
|
||||
/*
|
||||
|
@ -2291,6 +2285,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_connector_hdmi_audio_init(&vc4_hdmi->connector, dev,
|
||||
&vc4_hdmi_audio_funcs, 8, false,
|
||||
-1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dai_link->cpus = &vc4_hdmi->audio.cpu;
|
||||
dai_link->codecs = &vc4_hdmi->audio.codec;
|
||||
dai_link->platforms = &vc4_hdmi->audio.platform;
|
||||
|
|
|
@ -941,11 +941,18 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
|
|||
* store it in the PPHSWP.
|
||||
*/
|
||||
#define CONTEXT_ACTIVE 1ULL
|
||||
static void xe_lrc_setup_utilization(struct xe_lrc *lrc)
|
||||
static int xe_lrc_setup_utilization(struct xe_lrc *lrc)
|
||||
{
|
||||
u32 *cmd;
|
||||
u32 *cmd, *buf = NULL;
|
||||
|
||||
cmd = lrc->bb_per_ctx_bo->vmap.vaddr;
|
||||
if (lrc->bb_per_ctx_bo->vmap.is_iomem) {
|
||||
buf = kmalloc(lrc->bb_per_ctx_bo->size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
cmd = buf;
|
||||
} else {
|
||||
cmd = lrc->bb_per_ctx_bo->vmap.vaddr;
|
||||
}
|
||||
|
||||
*cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
|
||||
*cmd++ = ENGINE_ID(0).addr;
|
||||
|
@ -966,9 +973,16 @@ static void xe_lrc_setup_utilization(struct xe_lrc *lrc)
|
|||
|
||||
*cmd++ = MI_BATCH_BUFFER_END;
|
||||
|
||||
if (buf) {
|
||||
xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bb_per_ctx_bo->vmap, 0,
|
||||
buf, (cmd - buf) * sizeof(*cmd));
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR,
|
||||
xe_bo_ggtt_addr(lrc->bb_per_ctx_bo) | 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define PVC_CTX_ASID (0x2e + 1)
|
||||
|
@ -1125,7 +1139,9 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
|
|||
map = __xe_lrc_start_seqno_map(lrc);
|
||||
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
|
||||
|
||||
xe_lrc_setup_utilization(lrc);
|
||||
err = xe_lrc_setup_utilization(lrc);
|
||||
if (err)
|
||||
goto err_lrc_finish;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -764,7 +764,7 @@ static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (range_size <= SZ_64K && !supports_4K_migration(vm->xe)) {
|
||||
if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
|
||||
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -423,13 +423,16 @@ static int fts_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
|
|||
break;
|
||||
case hwmon_pwm:
|
||||
switch (attr) {
|
||||
case hwmon_pwm_auto_channels_temp:
|
||||
if (data->fan_source[channel] == FTS_FAN_SOURCE_INVALID)
|
||||
case hwmon_pwm_auto_channels_temp: {
|
||||
u8 fan_source = data->fan_source[channel];
|
||||
|
||||
if (fan_source == FTS_FAN_SOURCE_INVALID || fan_source >= BITS_PER_LONG)
|
||||
*val = 0;
|
||||
else
|
||||
*val = BIT(data->fan_source[channel]);
|
||||
*val = BIT(fan_source);
|
||||
|
||||
return 0;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1511,13 +1511,6 @@ static int ltc4282_setup(struct ltc4282_state *st, struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (device_property_read_bool(dev, "adi,fault-log-enable")) {
|
||||
ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL,
|
||||
LTC4282_FAULT_LOG_EN_MASK);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (device_property_read_bool(dev, "adi,fault-log-enable")) {
|
||||
ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL, LTC4282_FAULT_LOG_EN_MASK);
|
||||
if (ret)
|
||||
|
|
|
@ -459,12 +459,10 @@ static ssize_t occ_show_power_1(struct device *dev,
|
|||
return sysfs_emit(buf, "%llu\n", val);
|
||||
}
|
||||
|
||||
static u64 occ_get_powr_avg(u64 *accum, u32 *samples)
|
||||
static u64 occ_get_powr_avg(u64 accum, u32 samples)
|
||||
{
|
||||
u64 divisor = get_unaligned_be32(samples);
|
||||
|
||||
return (divisor == 0) ? 0 :
|
||||
div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor);
|
||||
return (samples == 0) ? 0 :
|
||||
mul_u64_u32_div(accum, 1000000UL, samples);
|
||||
}
|
||||
|
||||
static ssize_t occ_show_power_2(struct device *dev,
|
||||
|
@ -489,8 +487,8 @@ static ssize_t occ_show_power_2(struct device *dev,
|
|||
get_unaligned_be32(&power->sensor_id),
|
||||
power->function_id, power->apss_channel);
|
||||
case 1:
|
||||
val = occ_get_powr_avg(&power->accumulator,
|
||||
&power->update_tag);
|
||||
val = occ_get_powr_avg(get_unaligned_be64(&power->accumulator),
|
||||
get_unaligned_be32(&power->update_tag));
|
||||
break;
|
||||
case 2:
|
||||
val = (u64)get_unaligned_be32(&power->update_tag) *
|
||||
|
@ -527,8 +525,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
|
|||
return sysfs_emit(buf, "%u_system\n",
|
||||
get_unaligned_be32(&power->sensor_id));
|
||||
case 1:
|
||||
val = occ_get_powr_avg(&power->system.accumulator,
|
||||
&power->system.update_tag);
|
||||
val = occ_get_powr_avg(get_unaligned_be64(&power->system.accumulator),
|
||||
get_unaligned_be32(&power->system.update_tag));
|
||||
break;
|
||||
case 2:
|
||||
val = (u64)get_unaligned_be32(&power->system.update_tag) *
|
||||
|
@ -541,8 +539,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
|
|||
return sysfs_emit(buf, "%u_proc\n",
|
||||
get_unaligned_be32(&power->sensor_id));
|
||||
case 5:
|
||||
val = occ_get_powr_avg(&power->proc.accumulator,
|
||||
&power->proc.update_tag);
|
||||
val = occ_get_powr_avg(get_unaligned_be64(&power->proc.accumulator),
|
||||
get_unaligned_be32(&power->proc.update_tag));
|
||||
break;
|
||||
case 6:
|
||||
val = (u64)get_unaligned_be32(&power->proc.update_tag) *
|
||||
|
@ -555,8 +553,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
|
|||
return sysfs_emit(buf, "%u_vdd\n",
|
||||
get_unaligned_be32(&power->sensor_id));
|
||||
case 9:
|
||||
val = occ_get_powr_avg(&power->vdd.accumulator,
|
||||
&power->vdd.update_tag);
|
||||
val = occ_get_powr_avg(get_unaligned_be64(&power->vdd.accumulator),
|
||||
get_unaligned_be32(&power->vdd.update_tag));
|
||||
break;
|
||||
case 10:
|
||||
val = (u64)get_unaligned_be32(&power->vdd.update_tag) *
|
||||
|
@ -569,8 +567,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
|
|||
return sysfs_emit(buf, "%u_vdn\n",
|
||||
get_unaligned_be32(&power->sensor_id));
|
||||
case 13:
|
||||
val = occ_get_powr_avg(&power->vdn.accumulator,
|
||||
&power->vdn.update_tag);
|
||||
val = occ_get_powr_avg(get_unaligned_be64(&power->vdn.accumulator),
|
||||
get_unaligned_be32(&power->vdn.update_tag));
|
||||
break;
|
||||
case 14:
|
||||
val = (u64)get_unaligned_be32(&power->vdn.update_tag) *
|
||||
|
@ -747,28 +745,29 @@ static ssize_t occ_show_extended(struct device *dev,
|
|||
}
|
||||
|
||||
/*
|
||||
* Some helper macros to make it easier to define an occ_attribute. Since these
|
||||
* are dynamically allocated, we shouldn't use the existing kernel macros which
|
||||
* A helper to make it easier to define an occ_attribute. Since these
|
||||
* are dynamically allocated, we cannot use the existing kernel macros which
|
||||
* stringify the name argument.
|
||||
*/
|
||||
#define ATTR_OCC(_name, _mode, _show, _store) { \
|
||||
.attr = { \
|
||||
.name = _name, \
|
||||
.mode = VERIFY_OCTAL_PERMISSIONS(_mode), \
|
||||
}, \
|
||||
.show = _show, \
|
||||
.store = _store, \
|
||||
}
|
||||
static void occ_init_attribute(struct occ_attribute *attr, int mode,
|
||||
ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf),
|
||||
ssize_t (*store)(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count),
|
||||
int nr, int index, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
#define SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index) { \
|
||||
.dev_attr = ATTR_OCC(_name, _mode, _show, _store), \
|
||||
.index = _index, \
|
||||
.nr = _nr, \
|
||||
}
|
||||
va_start(args, fmt);
|
||||
vsnprintf(attr->name, sizeof(attr->name), fmt, args);
|
||||
va_end(args);
|
||||
|
||||
#define OCC_INIT_ATTR(_name, _mode, _show, _store, _nr, _index) \
|
||||
((struct sensor_device_attribute_2) \
|
||||
SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index))
|
||||
attr->sensor.dev_attr.attr.name = attr->name;
|
||||
attr->sensor.dev_attr.attr.mode = mode;
|
||||
attr->sensor.dev_attr.show = show;
|
||||
attr->sensor.dev_attr.store = store;
|
||||
attr->sensor.index = index;
|
||||
attr->sensor.nr = nr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and instatiate sensor_device_attribute_2s. It's most efficient to
|
||||
|
@ -855,14 +854,15 @@ static int occ_setup_sensor_attrs(struct occ *occ)
|
|||
sensors->extended.num_sensors = 0;
|
||||
}
|
||||
|
||||
occ->attrs = devm_kzalloc(dev, sizeof(*occ->attrs) * num_attrs,
|
||||
occ->attrs = devm_kcalloc(dev, num_attrs, sizeof(*occ->attrs),
|
||||
GFP_KERNEL);
|
||||
if (!occ->attrs)
|
||||
return -ENOMEM;
|
||||
|
||||
/* null-terminated list */
|
||||
occ->group.attrs = devm_kzalloc(dev, sizeof(*occ->group.attrs) *
|
||||
num_attrs + 1, GFP_KERNEL);
|
||||
occ->group.attrs = devm_kcalloc(dev, num_attrs + 1,
|
||||
sizeof(*occ->group.attrs),
|
||||
GFP_KERNEL);
|
||||
if (!occ->group.attrs)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -872,43 +872,33 @@ static int occ_setup_sensor_attrs(struct occ *occ)
|
|||
s = i + 1;
|
||||
temp = ((struct temp_sensor_2 *)sensors->temp.data) + i;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name), "temp%d_label", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL,
|
||||
0, i);
|
||||
occ_init_attribute(attr, 0444, show_temp, NULL,
|
||||
0, i, "temp%d_label", s);
|
||||
attr++;
|
||||
|
||||
if (sensors->temp.version == 2 &&
|
||||
temp->fru_type == OCC_FRU_TYPE_VRM) {
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"temp%d_alarm", s);
|
||||
occ_init_attribute(attr, 0444, show_temp, NULL,
|
||||
1, i, "temp%d_alarm", s);
|
||||
} else {
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"temp%d_input", s);
|
||||
occ_init_attribute(attr, 0444, show_temp, NULL,
|
||||
1, i, "temp%d_input", s);
|
||||
}
|
||||
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL,
|
||||
1, i);
|
||||
attr++;
|
||||
|
||||
if (sensors->temp.version > 1) {
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"temp%d_fru_type", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_temp, NULL, 2, i);
|
||||
occ_init_attribute(attr, 0444, show_temp, NULL,
|
||||
2, i, "temp%d_fru_type", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"temp%d_fault", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_temp, NULL, 3, i);
|
||||
occ_init_attribute(attr, 0444, show_temp, NULL,
|
||||
3, i, "temp%d_fault", s);
|
||||
attr++;
|
||||
|
||||
if (sensors->temp.version == 0x10) {
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"temp%d_max", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_temp, NULL,
|
||||
4, i);
|
||||
occ_init_attribute(attr, 0444, show_temp, NULL,
|
||||
4, i, "temp%d_max", s);
|
||||
attr++;
|
||||
}
|
||||
}
|
||||
|
@ -917,14 +907,12 @@ static int occ_setup_sensor_attrs(struct occ *occ)
|
|||
for (i = 0; i < sensors->freq.num_sensors; ++i) {
|
||||
s = i + 1;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name), "freq%d_label", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL,
|
||||
0, i);
|
||||
occ_init_attribute(attr, 0444, show_freq, NULL,
|
||||
0, i, "freq%d_label", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name), "freq%d_input", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL,
|
||||
1, i);
|
||||
occ_init_attribute(attr, 0444, show_freq, NULL,
|
||||
1, i, "freq%d_input", s);
|
||||
attr++;
|
||||
}
|
||||
|
||||
|
@ -940,32 +928,24 @@ static int occ_setup_sensor_attrs(struct occ *occ)
|
|||
s = (i * 4) + 1;
|
||||
|
||||
for (j = 0; j < 4; ++j) {
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"power%d_label", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_power, NULL,
|
||||
nr++, i);
|
||||
occ_init_attribute(attr, 0444, show_power,
|
||||
NULL, nr++, i,
|
||||
"power%d_label", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"power%d_average", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_power, NULL,
|
||||
nr++, i);
|
||||
occ_init_attribute(attr, 0444, show_power,
|
||||
NULL, nr++, i,
|
||||
"power%d_average", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"power%d_average_interval", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_power, NULL,
|
||||
nr++, i);
|
||||
occ_init_attribute(attr, 0444, show_power,
|
||||
NULL, nr++, i,
|
||||
"power%d_average_interval", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"power%d_input", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_power, NULL,
|
||||
nr++, i);
|
||||
occ_init_attribute(attr, 0444, show_power,
|
||||
NULL, nr++, i,
|
||||
"power%d_input", s);
|
||||
attr++;
|
||||
|
||||
s++;
|
||||
|
@ -977,28 +957,20 @@ static int occ_setup_sensor_attrs(struct occ *occ)
|
|||
for (i = 0; i < sensors->power.num_sensors; ++i) {
|
||||
s = i + 1;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"power%d_label", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_power, NULL, 0, i);
|
||||
occ_init_attribute(attr, 0444, show_power, NULL,
|
||||
0, i, "power%d_label", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"power%d_average", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_power, NULL, 1, i);
|
||||
occ_init_attribute(attr, 0444, show_power, NULL,
|
||||
1, i, "power%d_average", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"power%d_average_interval", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_power, NULL, 2, i);
|
||||
occ_init_attribute(attr, 0444, show_power, NULL,
|
||||
2, i, "power%d_average_interval", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"power%d_input", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_power, NULL, 3, i);
|
||||
occ_init_attribute(attr, 0444, show_power, NULL,
|
||||
3, i, "power%d_input", s);
|
||||
attr++;
|
||||
}
|
||||
|
||||
|
@ -1006,56 +978,43 @@ static int occ_setup_sensor_attrs(struct occ *occ)
|
|||
}
|
||||
|
||||
if (sensors->caps.num_sensors >= 1) {
|
||||
snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
|
||||
0, 0);
|
||||
occ_init_attribute(attr, 0444, show_caps, NULL,
|
||||
0, 0, "power%d_label", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name), "power%d_cap", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
|
||||
1, 0);
|
||||
occ_init_attribute(attr, 0444, show_caps, NULL,
|
||||
1, 0, "power%d_cap", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name), "power%d_input", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
|
||||
2, 0);
|
||||
occ_init_attribute(attr, 0444, show_caps, NULL,
|
||||
2, 0, "power%d_input", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"power%d_cap_not_redundant", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
|
||||
3, 0);
|
||||
occ_init_attribute(attr, 0444, show_caps, NULL,
|
||||
3, 0, "power%d_cap_not_redundant", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name), "power%d_cap_max", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
|
||||
4, 0);
|
||||
occ_init_attribute(attr, 0444, show_caps, NULL,
|
||||
4, 0, "power%d_cap_max", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name), "power%d_cap_min", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
|
||||
5, 0);
|
||||
occ_init_attribute(attr, 0444, show_caps, NULL,
|
||||
5, 0, "power%d_cap_min", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name), "power%d_cap_user",
|
||||
s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0644, show_caps,
|
||||
occ_store_caps_user, 6, 0);
|
||||
occ_init_attribute(attr, 0644, show_caps, occ_store_caps_user,
|
||||
6, 0, "power%d_cap_user", s);
|
||||
attr++;
|
||||
|
||||
if (sensors->caps.version > 1) {
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"power%d_cap_user_source", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_caps, NULL, 7, 0);
|
||||
occ_init_attribute(attr, 0444, show_caps, NULL,
|
||||
7, 0, "power%d_cap_user_source", s);
|
||||
attr++;
|
||||
|
||||
if (sensors->caps.version > 2) {
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"power%d_cap_min_soft", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
show_caps, NULL,
|
||||
8, 0);
|
||||
occ_init_attribute(attr, 0444, show_caps, NULL,
|
||||
8, 0,
|
||||
"power%d_cap_min_soft", s);
|
||||
attr++;
|
||||
}
|
||||
}
|
||||
|
@ -1064,19 +1023,16 @@ static int occ_setup_sensor_attrs(struct occ *occ)
|
|||
for (i = 0; i < sensors->extended.num_sensors; ++i) {
|
||||
s = i + 1;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name), "extn%d_label", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
occ_show_extended, NULL, 0, i);
|
||||
occ_init_attribute(attr, 0444, occ_show_extended, NULL,
|
||||
0, i, "extn%d_label", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name), "extn%d_flags", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
occ_show_extended, NULL, 1, i);
|
||||
occ_init_attribute(attr, 0444, occ_show_extended, NULL,
|
||||
1, i, "extn%d_flags", s);
|
||||
attr++;
|
||||
|
||||
snprintf(attr->name, sizeof(attr->name), "extn%d_input", s);
|
||||
attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
|
||||
occ_show_extended, NULL, 2, i);
|
||||
occ_init_attribute(attr, 0444, occ_show_extended, NULL,
|
||||
2, i, "extn%d_input", s);
|
||||
attr++;
|
||||
}
|
||||
|
||||
|
|
|
@ -152,8 +152,8 @@ static __always_inline int __intel_idle(struct cpuidle_device *dev,
|
|||
int index, bool irqoff)
|
||||
{
|
||||
struct cpuidle_state *state = &drv->states[index];
|
||||
unsigned long eax = flg2MWAIT(state->flags);
|
||||
unsigned long ecx = 1*irqoff; /* break on interrupt flag */
|
||||
unsigned int eax = flg2MWAIT(state->flags);
|
||||
unsigned int ecx = 1*irqoff; /* break on interrupt flag */
|
||||
|
||||
mwait_idle_with_hints(eax, ecx);
|
||||
|
||||
|
@ -226,9 +226,9 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
|
|||
static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
unsigned long ecx = 1; /* break on interrupt flag */
|
||||
struct cpuidle_state *state = &drv->states[index];
|
||||
unsigned long eax = flg2MWAIT(state->flags);
|
||||
unsigned int eax = flg2MWAIT(state->flags);
|
||||
unsigned int ecx = 1; /* break on interrupt flag */
|
||||
|
||||
if (state->flags & CPUIDLE_FLAG_INIT_XSTATE)
|
||||
fpu_idle_fpregs();
|
||||
|
@ -2507,6 +2507,8 @@ static int __init intel_idle_init(void)
|
|||
pr_debug("Local APIC timer is reliable in %s\n",
|
||||
boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1");
|
||||
|
||||
arch_cpu_rescan_dead_smt_siblings();
|
||||
|
||||
return 0;
|
||||
|
||||
hp_setup_fail:
|
||||
|
@ -2518,7 +2520,7 @@ init_driver_fail:
|
|||
return retval;
|
||||
|
||||
}
|
||||
device_initcall(intel_idle_init);
|
||||
subsys_initcall_sync(intel_idle_init);
|
||||
|
||||
/*
|
||||
* We are not really modular, but we used to support that. Meaning we also
|
||||
|
|
|
@ -559,11 +559,11 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
|
|||
{
|
||||
unsigned int pd_index = iova_pd_index(iova);
|
||||
struct tegra_smmu *smmu = as->smmu;
|
||||
struct tegra_pd *pd = as->pd;
|
||||
u32 *pd = &as->pd->val[pd_index];
|
||||
unsigned long offset = pd_index * sizeof(*pd);
|
||||
|
||||
/* Set the page directory entry first */
|
||||
pd->val[pd_index] = value;
|
||||
*pd = value;
|
||||
|
||||
/* The flush the page directory entry from caches */
|
||||
dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
|
||||
|
|
|
@ -411,10 +411,11 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
|
|||
priv = cdev_to_priv(mcan_class);
|
||||
|
||||
priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
|
||||
if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
|
||||
ret = -EPROBE_DEFER;
|
||||
goto out_m_can_class_free_dev;
|
||||
} else {
|
||||
if (IS_ERR(priv->power)) {
|
||||
if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
|
||||
ret = -EPROBE_DEFER;
|
||||
goto out_m_can_class_free_dev;
|
||||
}
|
||||
priv->power = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1065,23 +1065,18 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
|
|||
|
||||
static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
|
||||
{
|
||||
int size, index, num_desc = HW_DSCP_NUM;
|
||||
struct airoha_eth *eth = qdma->eth;
|
||||
int id = qdma - ð->qdma[0];
|
||||
u32 status, buf_size;
|
||||
dma_addr_t dma_addr;
|
||||
const char *name;
|
||||
int size, index;
|
||||
u32 status;
|
||||
|
||||
size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
|
||||
if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
|
||||
|
||||
name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
|
||||
index = of_property_match_string(eth->dev->of_node,
|
||||
"memory-region-names", name);
|
||||
if (index >= 0) {
|
||||
|
@ -1099,8 +1094,12 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
|
|||
rmem = of_reserved_mem_lookup(np);
|
||||
of_node_put(np);
|
||||
dma_addr = rmem->base;
|
||||
/* Compute the number of hw descriptors according to the
|
||||
* reserved memory size and the payload buffer size
|
||||
*/
|
||||
num_desc = div_u64(rmem->size, buf_size);
|
||||
} else {
|
||||
size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
|
||||
size = buf_size * num_desc;
|
||||
if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
|
||||
GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
@ -1108,15 +1107,21 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
|
|||
|
||||
airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
|
||||
|
||||
size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
|
||||
if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
|
||||
/* QDMA0: 2KB. QDMA1: 1KB */
|
||||
airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
|
||||
HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
|
||||
FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
|
||||
FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
|
||||
airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
|
||||
FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
|
||||
airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
|
||||
LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
|
||||
HW_FWD_DESC_NUM_MASK,
|
||||
FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
|
||||
FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
|
||||
LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
|
||||
|
||||
return read_poll_timeout(airoha_qdma_rr, status,
|
||||
|
|
|
@ -819,8 +819,10 @@ airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
|
|||
int idle;
|
||||
|
||||
hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
|
||||
ib1 = READ_ONCE(hwe->ib1);
|
||||
if (!hwe)
|
||||
continue;
|
||||
|
||||
ib1 = READ_ONCE(hwe->ib1);
|
||||
state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
|
||||
if (state != AIROHA_FOE_STATE_BIND) {
|
||||
iter->hash = 0xffff;
|
||||
|
|
|
@ -10780,6 +10780,72 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
|
|||
bp->num_rss_ctx--;
|
||||
}
|
||||
|
||||
static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
|
||||
int rxr_id)
|
||||
{
|
||||
u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
|
||||
int i, vnic_rx;
|
||||
|
||||
/* Ntuple VNIC always has all the rx rings. Any change of ring id
|
||||
* must be updated because a future filter may use it.
|
||||
*/
|
||||
if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
|
||||
return true;
|
||||
|
||||
for (i = 0; i < tbl_size; i++) {
|
||||
if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
|
||||
vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
|
||||
else
|
||||
vnic_rx = bp->rss_indir_tbl[i];
|
||||
|
||||
if (rxr_id == vnic_rx)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
|
||||
u16 mru, int rxr_id)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
|
||||
return 0;
|
||||
|
||||
if (mru) {
|
||||
rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
|
||||
if (rc) {
|
||||
netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
|
||||
vnic->vnic_id, rc);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
vnic->mru = mru;
|
||||
bnxt_hwrm_vnic_update(bp, vnic,
|
||||
VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
|
||||
{
|
||||
struct ethtool_rxfh_context *ctx;
|
||||
unsigned long context;
|
||||
int rc;
|
||||
|
||||
xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
|
||||
struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
|
||||
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
|
||||
|
||||
rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
|
||||
{
|
||||
bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
|
||||
|
@ -15907,6 +15973,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
|
|||
struct bnxt_vnic_info *vnic;
|
||||
struct bnxt_napi *bnapi;
|
||||
int i, rc;
|
||||
u16 mru;
|
||||
|
||||
rxr = &bp->rx_ring[idx];
|
||||
clone = qmem;
|
||||
|
@ -15957,21 +16024,15 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
|
|||
napi_enable_locked(&bnapi->napi);
|
||||
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
|
||||
|
||||
mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
|
||||
for (i = 0; i < bp->nr_vnics; i++) {
|
||||
vnic = &bp->vnic_info[i];
|
||||
|
||||
rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
|
||||
if (rc) {
|
||||
netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
|
||||
vnic->vnic_id, rc);
|
||||
rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
|
||||
bnxt_hwrm_vnic_update(bp, vnic,
|
||||
VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
|
||||
|
||||
err_reset:
|
||||
netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
|
||||
|
@ -15993,10 +16054,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
|
|||
|
||||
for (i = 0; i < bp->nr_vnics; i++) {
|
||||
vnic = &bp->vnic_info[i];
|
||||
vnic->mru = 0;
|
||||
bnxt_hwrm_vnic_update(bp, vnic,
|
||||
VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
|
||||
|
||||
bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
|
||||
}
|
||||
bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
|
||||
/* Make sure NAPI sees that the VNIC is disabled */
|
||||
synchronize_net();
|
||||
rxr = &bp->rx_ring[idx];
|
||||
|
|
|
@ -231,10 +231,9 @@ void bnxt_ulp_stop(struct bnxt *bp)
|
|||
return;
|
||||
|
||||
mutex_lock(&edev->en_dev_lock);
|
||||
if (!bnxt_ulp_registered(edev)) {
|
||||
mutex_unlock(&edev->en_dev_lock);
|
||||
return;
|
||||
}
|
||||
if (!bnxt_ulp_registered(edev) ||
|
||||
(edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
|
||||
goto ulp_stop_exit;
|
||||
|
||||
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
|
||||
if (aux_priv) {
|
||||
|
@ -250,6 +249,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
|
|||
adrv->suspend(adev, pm);
|
||||
}
|
||||
}
|
||||
ulp_stop_exit:
|
||||
mutex_unlock(&edev->en_dev_lock);
|
||||
}
|
||||
|
||||
|
@ -258,19 +258,13 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
|
|||
struct bnxt_aux_priv *aux_priv = bp->aux_priv;
|
||||
struct bnxt_en_dev *edev = bp->edev;
|
||||
|
||||
if (!edev)
|
||||
return;
|
||||
|
||||
edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
|
||||
|
||||
if (err)
|
||||
if (!edev || err)
|
||||
return;
|
||||
|
||||
mutex_lock(&edev->en_dev_lock);
|
||||
if (!bnxt_ulp_registered(edev)) {
|
||||
mutex_unlock(&edev->en_dev_lock);
|
||||
return;
|
||||
}
|
||||
if (!bnxt_ulp_registered(edev) ||
|
||||
!(edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
|
||||
goto ulp_start_exit;
|
||||
|
||||
if (edev->ulp_tbl->msix_requested)
|
||||
bnxt_fill_msix_vecs(bp, edev->msix_entries);
|
||||
|
@ -287,6 +281,8 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
|
|||
adrv->resume(adev);
|
||||
}
|
||||
}
|
||||
ulp_start_exit:
|
||||
edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
|
||||
mutex_unlock(&edev->en_dev_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ config FTGMAC100
|
|||
depends on ARM || COMPILE_TEST
|
||||
depends on !64BIT || BROKEN
|
||||
select PHYLIB
|
||||
select FIXED_PHY
|
||||
select MDIO_ASPEED if MACH_ASPEED_G6
|
||||
select CRC32
|
||||
help
|
||||
|
|
|
@ -3534,9 +3534,6 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
|
|||
case e1000_pch_cnp:
|
||||
case e1000_pch_tgp:
|
||||
case e1000_pch_adp:
|
||||
case e1000_pch_mtp:
|
||||
case e1000_pch_lnp:
|
||||
case e1000_pch_ptp:
|
||||
case e1000_pch_nvp:
|
||||
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
|
||||
/* Stable 24MHz frequency */
|
||||
|
@ -3552,6 +3549,17 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
|
|||
adapter->cc.shift = shift;
|
||||
}
|
||||
break;
|
||||
case e1000_pch_mtp:
|
||||
case e1000_pch_lnp:
|
||||
case e1000_pch_ptp:
|
||||
/* System firmware can misreport this value, so set it to a
|
||||
* stable 38400KHz frequency.
|
||||
*/
|
||||
incperiod = INCPERIOD_38400KHZ;
|
||||
incvalue = INCVALUE_38400KHZ;
|
||||
shift = INCVALUE_SHIFT_38400KHZ;
|
||||
adapter->cc.shift = shift;
|
||||
break;
|
||||
case e1000_82574:
|
||||
case e1000_82583:
|
||||
/* Stable 25MHz frequency */
|
||||
|
|
|
@ -295,15 +295,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
|
|||
case e1000_pch_cnp:
|
||||
case e1000_pch_tgp:
|
||||
case e1000_pch_adp:
|
||||
case e1000_pch_mtp:
|
||||
case e1000_pch_lnp:
|
||||
case e1000_pch_ptp:
|
||||
case e1000_pch_nvp:
|
||||
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
|
||||
adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
|
||||
else
|
||||
adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
|
||||
break;
|
||||
case e1000_pch_mtp:
|
||||
case e1000_pch_lnp:
|
||||
case e1000_pch_ptp:
|
||||
adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
|
||||
break;
|
||||
case e1000_82574:
|
||||
case e1000_82583:
|
||||
adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
|
||||
|
|
|
@ -377,6 +377,50 @@ ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_arfs_cmp - Check if aRFS filter matches this flow.
|
||||
* @fltr_info: filter info of the saved ARFS entry.
|
||||
* @fk: flow dissector keys.
|
||||
* @n_proto: One of htons(ETH_P_IP) or htons(ETH_P_IPV6).
|
||||
* @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP.
|
||||
*
|
||||
* Since this function assumes limited values for n_proto and ip_proto, it
|
||||
* is meant to be called only from ice_rx_flow_steer().
|
||||
*
|
||||
* Return:
|
||||
* * true - fltr_info refers to the same flow as fk.
|
||||
* * false - fltr_info and fk refer to different flows.
|
||||
*/
|
||||
static bool
|
||||
ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk,
|
||||
__be16 n_proto, u8 ip_proto)
|
||||
{
|
||||
/* Determine if the filter is for IPv4 or IPv6 based on flow_type,
|
||||
* which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}.
|
||||
*/
|
||||
bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
|
||||
fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP;
|
||||
|
||||
/* Following checks are arranged in the quickest and most discriminative
|
||||
* fields first for early failure.
|
||||
*/
|
||||
if (is_v4)
|
||||
return n_proto == htons(ETH_P_IP) &&
|
||||
fltr_info->ip.v4.src_port == fk->ports.src &&
|
||||
fltr_info->ip.v4.dst_port == fk->ports.dst &&
|
||||
fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src &&
|
||||
fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst &&
|
||||
fltr_info->ip.v4.proto == ip_proto;
|
||||
|
||||
return fltr_info->ip.v6.src_port == fk->ports.src &&
|
||||
fltr_info->ip.v6.dst_port == fk->ports.dst &&
|
||||
fltr_info->ip.v6.proto == ip_proto &&
|
||||
!memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
|
||||
sizeof(struct in6_addr)) &&
|
||||
!memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
|
||||
sizeof(struct in6_addr));
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_rx_flow_steer - steer the Rx flow to where application is being run
|
||||
* @netdev: ptr to the netdev being adjusted
|
||||
|
@ -448,6 +492,10 @@ ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
|
|||
continue;
|
||||
|
||||
fltr_info = &arfs_entry->fltr_info;
|
||||
|
||||
if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto))
|
||||
continue;
|
||||
|
||||
ret = fltr_info->fltr_id;
|
||||
|
||||
if (fltr_info->q_index == rxq_idx ||
|
||||
|
|
|
@ -508,10 +508,14 @@ err_create_repr:
|
|||
*/
|
||||
int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
|
||||
{
|
||||
struct ice_repr *repr = ice_repr_create_vf(vf);
|
||||
struct devlink *devlink = priv_to_devlink(pf);
|
||||
struct ice_repr *repr;
|
||||
int err;
|
||||
|
||||
if (!ice_is_eswitch_mode_switchdev(pf))
|
||||
return 0;
|
||||
|
||||
repr = ice_repr_create_vf(vf);
|
||||
if (IS_ERR(repr))
|
||||
return PTR_ERR(repr);
|
||||
|
||||
|
|
|
@ -1823,7 +1823,7 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
|
|||
req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
|
||||
req->bpid_per_chan = 1;
|
||||
} else {
|
||||
req->chan_cnt = 1;
|
||||
req->chan_cnt = pfvf->hw.rx_chan_cnt;
|
||||
req->bpid_per_chan = 0;
|
||||
}
|
||||
|
||||
|
@ -1848,7 +1848,7 @@ int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable)
|
|||
req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
|
||||
req->bpid_per_chan = 1;
|
||||
} else {
|
||||
req->chan_cnt = 1;
|
||||
req->chan_cnt = pfvf->hw.rx_chan_cnt;
|
||||
req->bpid_per_chan = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -447,8 +447,10 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
|
|||
priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
|
||||
|
||||
phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0);
|
||||
if (phy_irq < 0) {
|
||||
dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
|
||||
if (phy_irq == -EPROBE_DEFER) {
|
||||
err = -EPROBE_DEFER;
|
||||
goto out;
|
||||
} else if (phy_irq < 0) {
|
||||
phy_irq = PHY_POLL;
|
||||
}
|
||||
|
||||
|
|
|
@ -127,11 +127,8 @@ static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
|
|||
return -EBUSY;
|
||||
|
||||
addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
|
||||
if (dma_mapping_error(fbd->dev, addr)) {
|
||||
free_page((unsigned long)msg);
|
||||
|
||||
if (dma_mapping_error(fbd->dev, addr))
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
mbx->buf_info[tail].msg = msg;
|
||||
mbx->buf_info[tail].addr = addr;
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
*/
|
||||
#define LAN743X_PTP_N_EVENT_CHAN 2
|
||||
#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
|
||||
#define LAN743X_PTP_N_EXTTS 4
|
||||
#define LAN743X_PTP_N_PPS 0
|
||||
#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
|
||||
#define LAN743X_PTP_N_EXTTS PCI11X1X_PTP_IO_MAX_CHANNELS
|
||||
#define LAN743X_PTP_N_PPS 0
|
||||
#define PTP_CMD_CTL_TIMEOUT_CNT 50
|
||||
|
||||
struct lan743x_adapter;
|
||||
|
|
|
@ -516,9 +516,9 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
|
|||
unsigned long start_time;
|
||||
unsigned long max_wait;
|
||||
unsigned long duration;
|
||||
int done = 0;
|
||||
bool fw_up;
|
||||
int opcode;
|
||||
bool done;
|
||||
int err;
|
||||
|
||||
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
|
||||
|
@ -526,6 +526,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
|
|||
*/
|
||||
max_wait = jiffies + (max_seconds * HZ);
|
||||
try_again:
|
||||
done = false;
|
||||
opcode = idev->opcode;
|
||||
start_time = jiffies;
|
||||
for (fw_up = ionic_is_fw_running(idev);
|
||||
|
|
|
@ -98,20 +98,11 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
|
|||
{
|
||||
struct cppi5_host_desc_t *first_desc, *next_desc;
|
||||
dma_addr_t buf_dma, next_desc_dma;
|
||||
struct prueth_swdata *swdata;
|
||||
struct page *page;
|
||||
u32 buf_dma_len;
|
||||
|
||||
first_desc = desc;
|
||||
next_desc = first_desc;
|
||||
|
||||
swdata = cppi5_hdesc_get_swdata(desc);
|
||||
if (swdata->type == PRUETH_SWDATA_PAGE) {
|
||||
page = swdata->data.page;
|
||||
page_pool_recycle_direct(page->pp, swdata->data.page);
|
||||
goto free_desc;
|
||||
}
|
||||
|
||||
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
|
||||
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
|
||||
|
||||
|
@ -135,7 +126,6 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
|
|||
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
|
||||
}
|
||||
|
||||
free_desc:
|
||||
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(prueth_xmit_free);
|
||||
|
@ -612,13 +602,8 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
|
|||
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
|
||||
cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
|
||||
swdata = cppi5_hdesc_get_swdata(first_desc);
|
||||
if (page) {
|
||||
swdata->type = PRUETH_SWDATA_PAGE;
|
||||
swdata->data.page = page;
|
||||
} else {
|
||||
swdata->type = PRUETH_SWDATA_XDPF;
|
||||
swdata->data.xdpf = xdpf;
|
||||
}
|
||||
swdata->type = PRUETH_SWDATA_XDPF;
|
||||
swdata->data.xdpf = xdpf;
|
||||
|
||||
/* Report BQL before sending the packet */
|
||||
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
|
||||
|
|
|
@ -1216,6 +1216,7 @@ void ath12k_fw_stats_init(struct ath12k *ar)
|
|||
INIT_LIST_HEAD(&ar->fw_stats.pdevs);
|
||||
INIT_LIST_HEAD(&ar->fw_stats.bcn);
|
||||
init_completion(&ar->fw_stats_complete);
|
||||
init_completion(&ar->fw_stats_done);
|
||||
}
|
||||
|
||||
void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
|
||||
|
@ -1228,8 +1229,9 @@ void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
|
|||
void ath12k_fw_stats_reset(struct ath12k *ar)
|
||||
{
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
ar->fw_stats.fw_stats_done = false;
|
||||
ath12k_fw_stats_free(&ar->fw_stats);
|
||||
ar->fw_stats.num_vdev_recvd = 0;
|
||||
ar->fw_stats.num_bcn_recvd = 0;
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -601,6 +601,12 @@ struct ath12k_sta {
|
|||
#define ATH12K_NUM_CHANS 101
|
||||
#define ATH12K_MAX_5GHZ_CHAN 173
|
||||
|
||||
static inline bool ath12k_is_2ghz_channel_freq(u32 freq)
|
||||
{
|
||||
return freq >= ATH12K_MIN_2GHZ_FREQ &&
|
||||
freq <= ATH12K_MAX_2GHZ_FREQ;
|
||||
}
|
||||
|
||||
enum ath12k_hw_state {
|
||||
ATH12K_HW_STATE_OFF,
|
||||
ATH12K_HW_STATE_ON,
|
||||
|
@ -626,7 +632,8 @@ struct ath12k_fw_stats {
|
|||
struct list_head pdevs;
|
||||
struct list_head vdevs;
|
||||
struct list_head bcn;
|
||||
bool fw_stats_done;
|
||||
u32 num_vdev_recvd;
|
||||
u32 num_bcn_recvd;
|
||||
};
|
||||
|
||||
struct ath12k_dbg_htt_stats {
|
||||
|
@ -806,6 +813,7 @@ struct ath12k {
|
|||
bool regdom_set_by_user;
|
||||
|
||||
struct completion fw_stats_complete;
|
||||
struct completion fw_stats_done;
|
||||
|
||||
struct completion mlo_setup_done;
|
||||
u32 mlo_setup_status;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue