mirror of
https://github.com/torvalds/linux.git
synced 2025-08-15 14:11:42 +02:00
Merge tag 'drm-misc-next-2025-04-29' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
drm-misc-next for v6.16-rc1: UAPI Changes: - panthor now fails in mmap_offset call for a BO created with DRM_PANTHOR_BO_NO_MMAP. - Add DRM_PANTHOR_BO_SET_LABEL ioctl and label panthor kernel BOs. Cross-subsystem Changes: - Add kmap_local_page_try_from_panic for drm/panic. - Add DT bindings for panels. - Update DT bindings for imagination. - Extend %p4cc in lib/vsprintf.c to support fourcc printing. Core Changes: - Remove the disgusting turds. - Register definition updates for DP. - DisplayID timing blocks refactor. - Remove now unused mipi_dsi_dsc_write_seq. - Convert panel drivers to not return error in prepare/enable and unprepare/disable calls. Driver Changes: - Assorted small fixes and featuers for rockchip, panthor, accel/ivpu, accel/amdxdna, hisilicon/hibmc, i915/backlight, sysfb, accel/qaic, udl, etnaviv, virtio, xlnx, panel/boe-bf060y8m-aj0, bridge/synopsis, panthor, panel/samsung/sofef00m, lontium/lt9611uxc, nouveau, panel/himax-hx8279, panfrost, st7571-i2c. - Improve hibmc interrupt handling and add HPD support. - Add NLT NL13676BC25-03F, Tianma TM070JDHG34-00, Himax HX8279/HX8279-D DDIC, Visionox G2647FB105, Sitronix ST7571 LCD Controller, panels. - Add zpos, alpha and blend to renesas. - Convert drivers to use drm_gem_is_imported, replacing gem->import_attach. - Support TI AM68 GPU in imagination. - Support panic handler in virtio. - Add support to get the panel from DP AUX bus in rockchip and add RK3588 support. - Make sofef00 only support the sofef00 panel, not another unrelated one. - Add debugfs BO dumping support to panthor, and print associated labels. - Implement heartbeat based hangcheck in ivpu. - Mass convert drivers to devm_drm_bridge_alloc api. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://lore.kernel.org/r/e2a958d9-e506-4962-8bae-0dbf2ecc000f@linux.intel.com
This commit is contained in:
commit
135130db6e
157 changed files with 6560 additions and 1427 deletions
|
@ -648,6 +648,38 @@ Examples::
|
|||
%p4cc Y10 little-endian (0x20303159)
|
||||
%p4cc NV12 big-endian (0xb231564e)
|
||||
|
||||
Generic FourCC code
|
||||
-------------------
|
||||
|
||||
::
|
||||
%p4c[hnlb] gP00 (0x67503030)
|
||||
|
||||
Print a generic FourCC code, as both ASCII characters and its numerical
|
||||
value as hexadecimal.
|
||||
|
||||
The generic FourCC code is always printed in the big-endian format,
|
||||
the most significant byte first. This is the opposite of V4L/DRM FourCCs.
|
||||
|
||||
The additional ``h``, ``n``, ``l``, and ``b`` specifiers define what
|
||||
endianness is used to load the stored bytes. The data might be interpreted
|
||||
using the host byte order, network byte order, little-endian, or big-endian.
|
||||
|
||||
Passed by reference.
|
||||
|
||||
Examples for a little-endian machine, given &(u32)0x67503030::
|
||||
|
||||
%p4ch gP00 (0x67503030)
|
||||
%p4cn 00Pg (0x30305067)
|
||||
%p4cl gP00 (0x67503030)
|
||||
%p4cb 00Pg (0x30305067)
|
||||
|
||||
Examples for a big-endian machine, given &(u32)0x67503030::
|
||||
|
||||
%p4ch gP00 (0x67503030)
|
||||
%p4cn 00Pg (0x30305067)
|
||||
%p4cl 00Pg (0x30305067)
|
||||
%p4cb gP00 (0x67503030)
|
||||
|
||||
Rust
|
||||
----
|
||||
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/himax,hx8279.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Himax HX8279/HX8279-D based MIPI-DSI panels
|
||||
|
||||
maintainers:
|
||||
- AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
|
||||
|
||||
description:
|
||||
The Himax HX8279 is a 1803 channel outputs source driver with MIPI
|
||||
TCON, which generates the horizontal and vertical control timing to
|
||||
the source and gate drivers.
|
||||
This DriverIC is most suitable for 1200x1920, 1080x1920, 1200x1600,
|
||||
and 600x1024 panels and outputs full RGB888 over two or four lanes,
|
||||
single or dual, MIPI-DSI video interface.
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common-dual.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- aoly,sl101pm1794fog-v15
|
||||
- startek,kd070fhfid078
|
||||
- const: himax,hx8279
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
iovcc-supply:
|
||||
description: I/O voltage supply
|
||||
|
||||
vdd-supply:
|
||||
description: Panel power supply
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- backlight
|
||||
- reset-gpios
|
||||
- iovcc-supply
|
||||
- vdd-supply
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
dsi {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
panel@0 {
|
||||
compatible = "startek,kd070fhfid078", "himax,hx8279";
|
||||
reg = <0>;
|
||||
backlight = <&backlight>;
|
||||
enable-gpios = <&pio 25 GPIO_ACTIVE_HIGH>;
|
||||
reset-gpios = <&pio 45 GPIO_ACTIVE_HIGH>;
|
||||
iovcc-supply = <&vreg_lcm_vio>;
|
||||
vdd-supply = <&vreg_lcm_vdd>;
|
||||
|
||||
port {
|
||||
panel_in: endpoint {
|
||||
remote-endpoint = <&dsi_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
...
|
|
@ -226,6 +226,8 @@ properties:
|
|||
- netron-dy,e231732
|
||||
# Newhaven Display International 480 x 272 TFT LCD panel
|
||||
- newhaven,nhd-4.3-480272ef-atxl
|
||||
# NLT Technologies, Ltd. 15.6" WXGA (1366×768) LVDS TFT LCD panel
|
||||
- nlt,nl13676bc25-03f
|
||||
# New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
|
||||
- nvd,9128
|
||||
# OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
|
||||
|
@ -286,6 +288,8 @@ properties:
|
|||
- startek,kd070wvfpa
|
||||
# Team Source Display Technology TST043015CMHX 4.3" WQVGA TFT LCD panel
|
||||
- team-source-display,tst043015cmhx
|
||||
# Tianma Micro-electronics P0700WXF1MBAA 7.0" WXGA (1280x800) LVDS TFT LCD panel
|
||||
- tianma,p0700wxf1mbaa
|
||||
# Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
|
||||
- tianma,tm070jdhg30
|
||||
# Tianma Micro-electronics TM070JDHG34-00 7.0" WXGA (1280x800) LVDS TFT LCD panel
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/visionox,g2647fb105.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Visionox G2647FB105 6.47" 1080x2340 MIPI-DSI Panel
|
||||
|
||||
maintainers:
|
||||
- Alexander Baransky <sanyapilot496@gmail.com>
|
||||
|
||||
description:
|
||||
The Visionox G2647FB105 is a 6.47 inch 1080x2340 MIPI-DSI CMD mode OLED panel.
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: visionox,g2647fb105
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
vdd3p3-supply:
|
||||
description: 3.3V source voltage rail
|
||||
|
||||
vddio-supply:
|
||||
description: I/O source voltage rail
|
||||
|
||||
vsn-supply:
|
||||
description: Negative source voltage rail
|
||||
|
||||
vsp-supply:
|
||||
description: Positive source voltage rail
|
||||
|
||||
reset-gpios: true
|
||||
port: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- vdd3p3-supply
|
||||
- vddio-supply
|
||||
- vsn-supply
|
||||
- vsp-supply
|
||||
- reset-gpios
|
||||
- port
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
dsi {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
panel@0 {
|
||||
compatible = "visionox,g2647fb105";
|
||||
reg = <0>;
|
||||
|
||||
vdd3p3-supply = <&vreg_l7c_3p0>;
|
||||
vddio-supply = <&vreg_l13a_1p8>;
|
||||
vsn-supply = <&vreg_ibb>;
|
||||
vsp-supply = <&vreg_lab>;
|
||||
|
||||
reset-gpios = <&pm6150l_gpios 9 GPIO_ACTIVE_LOW>;
|
||||
|
||||
port {
|
||||
panel_in: endpoint {
|
||||
remote-endpoint = <&mdss_dsi0_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
...
|
|
@ -15,6 +15,7 @@ properties:
|
|||
enum:
|
||||
- rockchip,rk3288-dp
|
||||
- rockchip,rk3399-edp
|
||||
- rockchip,rk3588-edp
|
||||
|
||||
clocks:
|
||||
minItems: 2
|
||||
|
@ -31,16 +32,23 @@ properties:
|
|||
maxItems: 1
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
reset-names:
|
||||
const: dp
|
||||
minItems: 1
|
||||
items:
|
||||
- const: dp
|
||||
- const: apb
|
||||
|
||||
rockchip,grf:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description:
|
||||
This SoC makes use of GRF regs.
|
||||
|
||||
aux-bus:
|
||||
$ref: /schemas/display/dp-aux-bus.yaml#
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- clocks
|
||||
|
@ -52,6 +60,19 @@ required:
|
|||
allOf:
|
||||
- $ref: /schemas/display/bridge/analogix,dp.yaml#
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- rockchip,rk3588-edp
|
||||
then:
|
||||
properties:
|
||||
resets:
|
||||
minItems: 2
|
||||
reset-names:
|
||||
minItems: 2
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
|
|
|
@ -73,12 +73,6 @@ properties:
|
|||
port:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
|
||||
assigned-clocks:
|
||||
maxItems: 2
|
||||
|
||||
assigned-clock-rates:
|
||||
maxItems: 2
|
||||
|
||||
iommus:
|
||||
maxItems: 1
|
||||
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/sitronix,st7571.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Sitronix ST7571 Display Controller
|
||||
|
||||
maintainers:
|
||||
- Marcus Folkesson <marcus.folkesson@gmail.com>
|
||||
|
||||
description:
|
||||
Sitronix ST7571 is a driver and controller for 4-level gray
|
||||
scale and monochrome dot matrix LCD panels.
|
||||
|
||||
allOf:
|
||||
- $ref: panel/panel-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: sitronix,st7571
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
sitronix,grayscale:
|
||||
type: boolean
|
||||
description:
|
||||
Display supports 4-level grayscale.
|
||||
|
||||
reset-gpios: true
|
||||
width-mm: true
|
||||
height-mm: true
|
||||
panel-timing: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- reset-gpios
|
||||
- width-mm
|
||||
- height-mm
|
||||
- panel-timing
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
i2c {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
display@3f {
|
||||
compatible = "sitronix,st7571";
|
||||
reg = <0x3f>;
|
||||
reset-gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
|
||||
width-mm = <37>;
|
||||
height-mm = <27>;
|
||||
|
||||
panel-timing {
|
||||
hactive = <128>;
|
||||
vactive = <96>;
|
||||
hback-porch = <0>;
|
||||
vback-porch = <0>;
|
||||
clock-frequency = <0>;
|
||||
hfront-porch = <0>;
|
||||
hsync-len = <0>;
|
||||
vfront-porch = <0>;
|
||||
vsync-len = <0>;
|
||||
};
|
||||
};
|
||||
};
|
|
@ -12,10 +12,28 @@ maintainers:
|
|||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- ti,am62-gpu
|
||||
- const: img,img-axe # IMG AXE GPU model/revision is fully discoverable
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- ti,am62-gpu
|
||||
- const: img,img-axe-1-16m
|
||||
# This deprecated element must be kept around to allow old kernels to
|
||||
# work with newer dts.
|
||||
- const: img,img-axe
|
||||
- const: img,img-rogue
|
||||
- items:
|
||||
- enum:
|
||||
- ti,j721s2-gpu
|
||||
- const: img,img-bxs-4-64
|
||||
- const: img,img-rogue
|
||||
|
||||
# This legacy combination of compatible strings was introduced early on
|
||||
# before the more specific GPU identifiers were used.
|
||||
- items:
|
||||
- enum:
|
||||
- ti,am62-gpu
|
||||
- const: img,img-axe
|
||||
deprecated: true
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
@ -35,6 +53,18 @@ properties:
|
|||
maxItems: 1
|
||||
|
||||
power-domains:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
power-domain-names:
|
||||
items:
|
||||
- const: a
|
||||
- const: b
|
||||
minItems: 1
|
||||
|
||||
dma-coherent: true
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
|
@ -47,11 +77,49 @@ required:
|
|||
additionalProperties: false
|
||||
|
||||
allOf:
|
||||
# Constraints added alongside the new compatible strings that would otherwise
|
||||
# create an ABI break.
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: ti,am62-gpu
|
||||
const: img,img-rogue
|
||||
then:
|
||||
required:
|
||||
- power-domains
|
||||
- power-domain-names
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: img,img-axe-1-16m
|
||||
then:
|
||||
properties:
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
power-domain-names:
|
||||
maxItems: 1
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: img,img-bxs-4-64
|
||||
then:
|
||||
properties:
|
||||
power-domains:
|
||||
minItems: 2
|
||||
power-domain-names:
|
||||
minItems: 2
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- ti,am62-gpu
|
||||
- ti,j721s2-gpu
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
|
@ -64,10 +132,12 @@ examples:
|
|||
#include <dt-bindings/soc/ti,sci_pm_domain.h>
|
||||
|
||||
gpu@fd00000 {
|
||||
compatible = "ti,am62-gpu", "img,img-axe";
|
||||
compatible = "ti,am62-gpu", "img,img-axe-1-16m", "img,img-axe",
|
||||
"img,img-rogue";
|
||||
reg = <0x0fd00000 0x20000>;
|
||||
clocks = <&k3_clks 187 0>;
|
||||
clock-names = "core";
|
||||
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
|
||||
power-domains = <&k3_pds 187 TI_SCI_PD_EXCLUSIVE>;
|
||||
power-domain-names = "a";
|
||||
};
|
||||
|
|
|
@ -129,6 +129,8 @@ patternProperties:
|
|||
description: Andes Technology Corporation
|
||||
"^anvo,.*":
|
||||
description: Anvo-Systems Dresden GmbH
|
||||
"^aoly,.*":
|
||||
description: Shenzhen Aoly Technology Co., Ltd.
|
||||
"^aosong,.*":
|
||||
description: Guangzhou Aosong Electronic Co., Ltd.
|
||||
"^apm,.*":
|
||||
|
|
|
@ -7682,6 +7682,12 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
|||
F: Documentation/devicetree/bindings/display/sitronix,st7586.txt
|
||||
F: drivers/gpu/drm/tiny/st7586.c
|
||||
|
||||
DRM DRIVER FOR SITRONIX ST7571 PANELS
|
||||
M: Marcus Folkesson <marcus.folkesson@gmail.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/display/sitronix,st7571.yaml
|
||||
F: drivers/gpu/drm/tiny/st7571-i2c.c
|
||||
|
||||
DRM DRIVER FOR SITRONIX ST7701 PANELS
|
||||
M: Jagan Teki <jagan@amarulasolutions.com>
|
||||
S: Maintained
|
||||
|
|
|
@ -525,7 +525,7 @@ aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset,
|
|||
if (!payload)
|
||||
return -EINVAL;
|
||||
|
||||
if (!slot_cf_has_space(offset, payload_len))
|
||||
if (!slot_has_space(*buf, offset, payload_len))
|
||||
return -ENOSPC;
|
||||
|
||||
buf->cu_idx = cu_idx;
|
||||
|
@ -558,7 +558,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
|
|||
if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (!slot_dpu_has_space(offset, arg_sz))
|
||||
if (!slot_has_space(*buf, offset, arg_sz))
|
||||
return -ENOSPC;
|
||||
|
||||
buf->inst_buf_addr = sn->buffer;
|
||||
|
@ -569,7 +569,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
|
|||
memcpy(buf->args, sn->prop_args, arg_sz);
|
||||
|
||||
/* Accurate buf size to hint firmware to do necessary copy */
|
||||
*size += sizeof(*buf) + arg_sz;
|
||||
*size = sizeof(*buf) + arg_sz;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -319,18 +319,16 @@ struct async_event_msg_resp {
|
|||
} __packed;
|
||||
|
||||
#define MAX_CHAIN_CMDBUF_SIZE SZ_4K
|
||||
#define slot_cf_has_space(offset, payload_size) \
|
||||
(MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
|
||||
offsetof(struct cmd_chain_slot_execbuf_cf, args[0]))
|
||||
#define slot_has_space(slot, offset, payload_size) \
|
||||
(MAX_CHAIN_CMDBUF_SIZE >= (offset) + (payload_size) + \
|
||||
sizeof(typeof(slot)))
|
||||
|
||||
struct cmd_chain_slot_execbuf_cf {
|
||||
__u32 cu_idx;
|
||||
__u32 arg_cnt;
|
||||
__u32 args[] __counted_by(arg_cnt);
|
||||
};
|
||||
|
||||
#define slot_dpu_has_space(offset, payload_size) \
|
||||
(MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
|
||||
offsetof(struct cmd_chain_slot_dpu, args[0]))
|
||||
struct cmd_chain_slot_dpu {
|
||||
__u64 inst_buf_addr;
|
||||
__u32 inst_size;
|
||||
|
|
|
@ -374,6 +374,9 @@ int ivpu_boot(struct ivpu_device *vdev)
|
|||
{
|
||||
int ret;
|
||||
|
||||
drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
|
||||
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
|
||||
|
||||
/* Update boot params located at first 4KB of FW memory */
|
||||
ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
|
||||
|
||||
|
@ -573,6 +576,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
|
|||
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
|
||||
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
|
||||
atomic64_set(&vdev->unique_id_counter, 0);
|
||||
atomic_set(&vdev->job_timeout_counter, 0);
|
||||
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
|
||||
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
|
||||
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
|
||||
|
|
|
@ -154,6 +154,7 @@ struct ivpu_device {
|
|||
struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
|
||||
struct xarray submitted_jobs_xa;
|
||||
struct ivpu_ipc_consumer job_done_consumer;
|
||||
atomic_t job_timeout_counter;
|
||||
|
||||
atomic64_t unique_id_counter;
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ struct ivpu_fw_info {
|
|||
u64 read_only_addr;
|
||||
u32 read_only_size;
|
||||
u32 sched_mode;
|
||||
u64 last_heartbeat;
|
||||
};
|
||||
|
||||
int ivpu_fw_init(struct ivpu_device *vdev);
|
||||
|
|
|
@ -30,7 +30,7 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
|
|||
"%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
|
||||
action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
|
||||
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
|
||||
(bool)bo->base.base.import_attach);
|
||||
(bool)drm_gem_is_imported(&bo->base.base));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -122,7 +122,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
|
|||
bo->ctx = NULL;
|
||||
}
|
||||
|
||||
if (bo->base.base.import_attach)
|
||||
if (drm_gem_is_imported(&bo->base.base))
|
||||
return;
|
||||
|
||||
dma_resv_lock(bo->base.base.resv, NULL);
|
||||
|
@ -461,7 +461,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
|
|||
if (bo->mmu_mapped)
|
||||
drm_printf(p, " mmu_mapped");
|
||||
|
||||
if (bo->base.base.import_attach)
|
||||
if (drm_gem_is_imported(&bo->base.base))
|
||||
drm_printf(p, " imported");
|
||||
|
||||
drm_printf(p, "\n");
|
||||
|
|
|
@ -34,6 +34,7 @@ module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
|
|||
MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
|
||||
|
||||
#define PM_RESCHEDULE_LIMIT 5
|
||||
#define PM_TDR_HEARTBEAT_LIMIT 30
|
||||
|
||||
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
|
||||
{
|
||||
|
@ -44,6 +45,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
|
|||
ivpu_fw_log_reset(vdev);
|
||||
ivpu_fw_load(vdev);
|
||||
fw->entry_point = fw->cold_boot_entry_point;
|
||||
fw->last_heartbeat = 0;
|
||||
}
|
||||
|
||||
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
|
||||
|
@ -189,7 +191,24 @@ static void ivpu_job_timeout_work(struct work_struct *work)
|
|||
{
|
||||
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
|
||||
struct ivpu_device *vdev = pm->vdev;
|
||||
u64 heartbeat;
|
||||
|
||||
if (ivpu_jsm_get_heartbeat(vdev, 0, &heartbeat) || heartbeat <= vdev->fw->last_heartbeat) {
|
||||
ivpu_err(vdev, "Job timeout detected, heartbeat not progressed\n");
|
||||
goto recovery;
|
||||
}
|
||||
|
||||
if (atomic_fetch_inc(&vdev->job_timeout_counter) > PM_TDR_HEARTBEAT_LIMIT) {
|
||||
ivpu_err(vdev, "Job timeout detected, heartbeat limit exceeded\n");
|
||||
goto recovery;
|
||||
}
|
||||
|
||||
vdev->fw->last_heartbeat = heartbeat;
|
||||
ivpu_start_job_timeout_detection(vdev);
|
||||
return;
|
||||
|
||||
recovery:
|
||||
atomic_set(&vdev->job_timeout_counter, 0);
|
||||
ivpu_pm_trigger_recovery(vdev, "TDR");
|
||||
}
|
||||
|
||||
|
@ -204,6 +223,7 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
|
|||
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
|
||||
{
|
||||
cancel_delayed_work_sync(&vdev->pm->job_timeout_work);
|
||||
atomic_set(&vdev->job_timeout_counter, 0);
|
||||
}
|
||||
|
||||
int ivpu_pm_suspend_cb(struct device *dev)
|
||||
|
|
|
@ -609,7 +609,7 @@ static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc
|
|||
struct scatterlist *sg;
|
||||
int ret = 0;
|
||||
|
||||
if (obj->import_attach)
|
||||
if (drm_gem_is_imported(obj))
|
||||
return -EINVAL;
|
||||
|
||||
for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
|
||||
|
@ -630,7 +630,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
|
|||
{
|
||||
struct qaic_bo *bo = to_qaic_bo(obj);
|
||||
|
||||
if (obj->import_attach) {
|
||||
if (drm_gem_is_imported(obj)) {
|
||||
/* DMABUF/PRIME Path */
|
||||
drm_prime_gem_destroy(obj, NULL);
|
||||
} else {
|
||||
|
@ -870,7 +870,7 @@ static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (bo->base.import_attach)
|
||||
if (drm_gem_is_imported(&bo->base))
|
||||
ret = qaic_prepare_import_bo(bo, hdr);
|
||||
else
|
||||
ret = qaic_prepare_export_bo(qdev, bo, hdr);
|
||||
|
@ -894,7 +894,7 @@ static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *b
|
|||
|
||||
static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
|
||||
{
|
||||
if (bo->base.import_attach)
|
||||
if (drm_gem_is_imported(&bo->base))
|
||||
qaic_unprepare_import_bo(bo);
|
||||
else
|
||||
qaic_unprepare_export_bo(qdev, bo);
|
||||
|
|
|
@ -27,7 +27,7 @@ config DRM_WERROR
|
|||
|
||||
config DRM_HEADER_TEST
|
||||
bool "Ensure DRM headers are self-contained and pass kernel-doc"
|
||||
depends on DRM && EXPERT
|
||||
depends on DRM && EXPERT && BROKEN
|
||||
default n
|
||||
help
|
||||
Ensure the DRM subsystem headers both under drivers/gpu/drm and
|
||||
|
|
|
@ -16,6 +16,7 @@ config DRM_AUX_BRIDGE
|
|||
tristate
|
||||
depends on DRM_BRIDGE && OF
|
||||
select AUXILIARY_BUS
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_PANEL_BRIDGE
|
||||
help
|
||||
Simple transparent bridge that is used by several non-DRM drivers to
|
||||
|
|
|
@ -664,9 +664,10 @@ static int anx6345_i2c_probe(struct i2c_client *client)
|
|||
struct device *dev;
|
||||
int i, err;
|
||||
|
||||
anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), GFP_KERNEL);
|
||||
if (!anx6345)
|
||||
return -ENOMEM;
|
||||
anx6345 = devm_drm_bridge_alloc(&client->dev, struct anx6345, bridge,
|
||||
&anx6345_bridge_funcs);
|
||||
if (IS_ERR(anx6345))
|
||||
return PTR_ERR(anx6345);
|
||||
|
||||
mutex_init(&anx6345->lock);
|
||||
|
||||
|
@ -738,7 +739,6 @@ static int anx6345_i2c_probe(struct i2c_client *client)
|
|||
/* Look for supported chip ID */
|
||||
anx6345_poweron(anx6345);
|
||||
if (anx6345_get_chip_id(anx6345)) {
|
||||
anx6345->bridge.funcs = &anx6345_bridge_funcs;
|
||||
drm_bridge_add(&anx6345->bridge);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -838,10 +838,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
|
|||
int ret;
|
||||
|
||||
/* Keep the panel disabled while we configure video */
|
||||
if (dp->plat_data->panel) {
|
||||
if (drm_panel_disable(dp->plat_data->panel))
|
||||
DRM_ERROR("failed to disable the panel\n");
|
||||
}
|
||||
drm_panel_disable(dp->plat_data->panel);
|
||||
|
||||
ret = analogix_dp_train_link(dp);
|
||||
if (ret) {
|
||||
|
@ -863,13 +860,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
|
|||
}
|
||||
|
||||
/* Safe to enable the panel now */
|
||||
if (dp->plat_data->panel) {
|
||||
ret = drm_panel_enable(dp->plat_data->panel);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to enable the panel\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
drm_panel_enable(dp->plat_data->panel);
|
||||
|
||||
/* Check whether panel supports fast training */
|
||||
ret = analogix_dp_fast_link_train_detection(dp);
|
||||
|
@ -955,67 +946,15 @@ static int analogix_dp_disable_psr(struct analogix_dp_device *dp)
|
|||
return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is a bit of a catch-all for panel preparation, hopefully
|
||||
* simplifying the logic of functions that need to prepare/unprepare the panel
|
||||
* below.
|
||||
*
|
||||
* If @prepare is true, this function will prepare the panel. Conversely, if it
|
||||
* is false, the panel will be unprepared.
|
||||
*
|
||||
* If @is_modeset_prepare is true, the function will disregard the current state
|
||||
* of the panel and either prepare/unprepare the panel based on @prepare. Once
|
||||
* it finishes, it will update dp->panel_is_modeset to reflect the current state
|
||||
* of the panel.
|
||||
*/
|
||||
static int analogix_dp_prepare_panel(struct analogix_dp_device *dp,
|
||||
bool prepare, bool is_modeset_prepare)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!dp->plat_data->panel)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dp->panel_lock);
|
||||
|
||||
/*
|
||||
* Exit early if this is a temporary prepare/unprepare and we're already
|
||||
* modeset (since we neither want to prepare twice or unprepare early).
|
||||
*/
|
||||
if (dp->panel_is_modeset && !is_modeset_prepare)
|
||||
goto out;
|
||||
|
||||
if (prepare)
|
||||
ret = drm_panel_prepare(dp->plat_data->panel);
|
||||
else
|
||||
ret = drm_panel_unprepare(dp->plat_data->panel);
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (is_modeset_prepare)
|
||||
dp->panel_is_modeset = prepare;
|
||||
|
||||
out:
|
||||
mutex_unlock(&dp->panel_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int analogix_dp_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct analogix_dp_device *dp = to_dp(connector);
|
||||
const struct drm_edid *drm_edid;
|
||||
int ret, num_modes = 0;
|
||||
int num_modes = 0;
|
||||
|
||||
if (dp->plat_data->panel) {
|
||||
num_modes += drm_panel_get_modes(dp->plat_data->panel, connector);
|
||||
} else {
|
||||
ret = analogix_dp_prepare_panel(dp, true, false);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to prepare panel (%d)\n", ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_edid = drm_edid_read_ddc(connector, &dp->aux.ddc);
|
||||
|
||||
drm_edid_connector_update(&dp->connector, drm_edid);
|
||||
|
@ -1024,10 +963,6 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
|
|||
num_modes += drm_edid_connector_add_modes(&dp->connector);
|
||||
drm_edid_free(drm_edid);
|
||||
}
|
||||
|
||||
ret = analogix_dp_prepare_panel(dp, false, false);
|
||||
if (ret)
|
||||
DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
|
||||
}
|
||||
|
||||
if (dp->plat_data->get_modes)
|
||||
|
@ -1082,24 +1017,13 @@ analogix_dp_detect(struct drm_connector *connector, bool force)
|
|||
{
|
||||
struct analogix_dp_device *dp = to_dp(connector);
|
||||
enum drm_connector_status status = connector_status_disconnected;
|
||||
int ret;
|
||||
|
||||
if (dp->plat_data->panel)
|
||||
return connector_status_connected;
|
||||
|
||||
ret = analogix_dp_prepare_panel(dp, true, false);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to prepare panel (%d)\n", ret);
|
||||
return connector_status_disconnected;
|
||||
}
|
||||
|
||||
if (!analogix_dp_detect_hpd(dp))
|
||||
status = connector_status_connected;
|
||||
|
||||
ret = analogix_dp_prepare_panel(dp, false, false);
|
||||
if (ret)
|
||||
DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1203,7 +1127,6 @@ static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
|
|||
struct analogix_dp_device *dp = bridge->driver_private;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *old_crtc_state;
|
||||
int ret;
|
||||
|
||||
crtc = analogix_dp_get_new_crtc(dp, old_state);
|
||||
if (!crtc)
|
||||
|
@ -1214,9 +1137,7 @@ static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
|
|||
if (old_crtc_state && old_crtc_state->self_refresh_active)
|
||||
return;
|
||||
|
||||
ret = analogix_dp_prepare_panel(dp, true, true);
|
||||
if (ret)
|
||||
DRM_ERROR("failed to setup the panel ret = %d\n", ret);
|
||||
drm_panel_prepare(dp->plat_data->panel);
|
||||
}
|
||||
|
||||
static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
|
||||
|
@ -1296,17 +1217,11 @@ static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
|
|||
static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
|
||||
{
|
||||
struct analogix_dp_device *dp = bridge->driver_private;
|
||||
int ret;
|
||||
|
||||
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
|
||||
return;
|
||||
|
||||
if (dp->plat_data->panel) {
|
||||
if (drm_panel_disable(dp->plat_data->panel)) {
|
||||
DRM_ERROR("failed to disable the panel\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
drm_panel_disable(dp->plat_data->panel);
|
||||
|
||||
disable_irq(dp->irq);
|
||||
|
||||
|
@ -1314,9 +1229,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
|
|||
|
||||
pm_runtime_put_sync(dp->dev);
|
||||
|
||||
ret = analogix_dp_prepare_panel(dp, false, true);
|
||||
if (ret)
|
||||
DRM_ERROR("failed to setup the panel ret = %d\n", ret);
|
||||
drm_panel_unprepare(dp->plat_data->panel);
|
||||
|
||||
dp->fast_train_enable = false;
|
||||
dp->psr_supported = false;
|
||||
|
@ -1505,6 +1418,10 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
|
|||
video_info->max_link_rate = 0x0A;
|
||||
video_info->max_lane_count = 0x04;
|
||||
break;
|
||||
case RK3588_EDP:
|
||||
video_info->max_link_rate = 0x14;
|
||||
video_info->max_lane_count = 0x04;
|
||||
break;
|
||||
case EXYNOS_DP:
|
||||
/*
|
||||
* NOTE: those property parseing code is used for
|
||||
|
@ -1540,6 +1457,26 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int analogix_dpaux_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us)
|
||||
{
|
||||
struct analogix_dp_device *dp = to_dp(aux);
|
||||
int val;
|
||||
int ret;
|
||||
|
||||
if (dp->force_hpd)
|
||||
return 0;
|
||||
|
||||
pm_runtime_get_sync(dp->dev);
|
||||
|
||||
ret = readx_poll_timeout(analogix_dp_get_plug_in_status, dp, val, !val,
|
||||
wait_us / 100, wait_us);
|
||||
|
||||
pm_runtime_mark_last_busy(dp->dev);
|
||||
pm_runtime_put_autosuspend(dp->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct analogix_dp_device *
|
||||
analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
|
||||
{
|
||||
|
@ -1560,9 +1497,6 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
|
|||
dp->dev = &pdev->dev;
|
||||
dp->dpms_mode = DRM_MODE_DPMS_OFF;
|
||||
|
||||
mutex_init(&dp->panel_lock);
|
||||
dp->panel_is_modeset = false;
|
||||
|
||||
/*
|
||||
* platform dp driver need containor_of the plat_data to get
|
||||
* the driver private data, so we need to store the point of
|
||||
|
@ -1625,10 +1559,10 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
|
|||
* that we can get the current state of the GPIO.
|
||||
*/
|
||||
dp->irq = gpiod_to_irq(dp->hpd_gpiod);
|
||||
irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
|
||||
irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN;
|
||||
} else {
|
||||
dp->irq = platform_get_irq(pdev, 0);
|
||||
irq_flags = 0;
|
||||
irq_flags = IRQF_NO_AUTOEN;
|
||||
}
|
||||
|
||||
if (dp->irq == -ENXIO) {
|
||||
|
@ -1645,7 +1579,18 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
|
|||
dev_err(&pdev->dev, "failed to request irq\n");
|
||||
goto err_disable_clk;
|
||||
}
|
||||
disable_irq(dp->irq);
|
||||
|
||||
dp->aux.name = "DP-AUX";
|
||||
dp->aux.transfer = analogix_dpaux_transfer;
|
||||
dp->aux.wait_hpd_asserted = analogix_dpaux_wait_hpd_asserted;
|
||||
dp->aux.dev = dp->dev;
|
||||
drm_dp_aux_init(&dp->aux);
|
||||
|
||||
pm_runtime_use_autosuspend(dp->dev);
|
||||
pm_runtime_set_autosuspend_delay(dp->dev, 100);
|
||||
ret = devm_pm_runtime_enable(dp->dev);
|
||||
if (ret)
|
||||
goto err_disable_clk;
|
||||
|
||||
return dp;
|
||||
|
||||
|
@ -1681,6 +1626,7 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
|
|||
if (dp->plat_data->power_on)
|
||||
dp->plat_data->power_on(dp->plat_data);
|
||||
|
||||
phy_set_mode(dp->phy, PHY_MODE_DP);
|
||||
phy_power_on(dp->phy);
|
||||
|
||||
analogix_dp_init_dp(dp);
|
||||
|
@ -1696,25 +1642,12 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
|
|||
dp->drm_dev = drm_dev;
|
||||
dp->encoder = dp->plat_data->encoder;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PM)) {
|
||||
pm_runtime_use_autosuspend(dp->dev);
|
||||
pm_runtime_set_autosuspend_delay(dp->dev, 100);
|
||||
pm_runtime_enable(dp->dev);
|
||||
} else {
|
||||
ret = analogix_dp_resume(dp);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
dp->aux.name = "DP-AUX";
|
||||
dp->aux.transfer = analogix_dpaux_transfer;
|
||||
dp->aux.dev = dp->dev;
|
||||
dp->aux.drm_dev = drm_dev;
|
||||
|
||||
ret = drm_dp_aux_register(&dp->aux);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to register AUX (%d)\n", ret);
|
||||
goto err_disable_pm_runtime;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = analogix_dp_create_bridge(drm_dev, dp);
|
||||
|
@ -1727,13 +1660,6 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
|
|||
|
||||
err_unregister_aux:
|
||||
drm_dp_aux_unregister(&dp->aux);
|
||||
err_disable_pm_runtime:
|
||||
if (IS_ENABLED(CONFIG_PM)) {
|
||||
pm_runtime_dont_use_autosuspend(dp->dev);
|
||||
pm_runtime_disable(dp->dev);
|
||||
} else {
|
||||
analogix_dp_suspend(dp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1744,19 +1670,9 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
|
|||
analogix_dp_bridge_disable(dp->bridge);
|
||||
dp->connector.funcs->destroy(&dp->connector);
|
||||
|
||||
if (dp->plat_data->panel) {
|
||||
if (drm_panel_unprepare(dp->plat_data->panel))
|
||||
DRM_ERROR("failed to turnoff the panel\n");
|
||||
}
|
||||
drm_panel_unprepare(dp->plat_data->panel);
|
||||
|
||||
drm_dp_aux_unregister(&dp->aux);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PM)) {
|
||||
pm_runtime_dont_use_autosuspend(dp->dev);
|
||||
pm_runtime_disable(dp->dev);
|
||||
} else {
|
||||
analogix_dp_suspend(dp);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(analogix_dp_unbind);
|
||||
|
||||
|
@ -1782,6 +1698,20 @@ int analogix_dp_stop_crc(struct drm_connector *connector)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(analogix_dp_stop_crc);
|
||||
|
||||
struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux)
|
||||
{
|
||||
struct analogix_dp_device *dp = to_dp(aux);
|
||||
|
||||
return dp->plat_data;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(analogix_dp_aux_to_plat_data);
|
||||
|
||||
struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp)
|
||||
{
|
||||
return &dp->aux;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(analogix_dp_get_aux);
|
||||
|
||||
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
|
||||
MODULE_DESCRIPTION("Analogix DP Core Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -169,9 +169,6 @@ struct analogix_dp_device {
|
|||
bool fast_train_enable;
|
||||
bool psr_supported;
|
||||
|
||||
struct mutex panel_lock;
|
||||
bool panel_is_modeset;
|
||||
|
||||
struct analogix_dp_plat_data *plat_data;
|
||||
};
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/phy/phy.h>
|
||||
|
||||
#include <drm/bridge/analogix_dp.h>
|
||||
|
||||
|
@ -513,10 +514,24 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
|
|||
void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
|
||||
{
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
reg = bwtype;
|
||||
if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62))
|
||||
writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
|
||||
|
||||
if (dp->phy) {
|
||||
union phy_configure_opts phy_cfg = {0};
|
||||
|
||||
phy_cfg.dp.link_rate =
|
||||
drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100;
|
||||
phy_cfg.dp.set_rate = true;
|
||||
ret = phy_configure(dp->phy, &phy_cfg);
|
||||
if (ret && ret != -EOPNOTSUPP) {
|
||||
dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
|
||||
|
@ -530,9 +545,22 @@ void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
|
|||
void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count)
|
||||
{
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
reg = count;
|
||||
writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
|
||||
|
||||
if (dp->phy) {
|
||||
union phy_configure_opts phy_cfg = {0};
|
||||
|
||||
phy_cfg.dp.lanes = dp->link_train.lane_count;
|
||||
phy_cfg.dp.set_lanes = true;
|
||||
ret = phy_configure(dp->phy, &phy_cfg);
|
||||
if (ret && ret != -EOPNOTSUPP) {
|
||||
dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
|
||||
|
@ -546,10 +574,34 @@ void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
|
|||
void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp)
|
||||
{
|
||||
u8 lane;
|
||||
int ret;
|
||||
|
||||
for (lane = 0; lane < dp->link_train.lane_count; lane++)
|
||||
writel(dp->link_train.training_lane[lane],
|
||||
dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane);
|
||||
|
||||
if (dp->phy) {
|
||||
union phy_configure_opts phy_cfg = {0};
|
||||
|
||||
for (lane = 0; lane < dp->link_train.lane_count; lane++) {
|
||||
u8 training_lane = dp->link_train.training_lane[lane];
|
||||
u8 vs, pe;
|
||||
|
||||
vs = (training_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
|
||||
DP_TRAIN_VOLTAGE_SWING_SHIFT;
|
||||
pe = (training_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
|
||||
DP_TRAIN_PRE_EMPHASIS_SHIFT;
|
||||
phy_cfg.dp.voltage[lane] = vs;
|
||||
phy_cfg.dp.pre[lane] = pe;
|
||||
}
|
||||
|
||||
phy_cfg.dp.set_voltages = true;
|
||||
ret = phy_configure(dp->phy, &phy_cfg);
|
||||
if (ret && ret != -EOPNOTSUPP) {
|
||||
dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane)
|
||||
|
|
|
@ -2569,12 +2569,6 @@ static const struct dev_pm_ops anx7625_pm_ops = {
|
|||
anx7625_runtime_pm_resume, NULL)
|
||||
};
|
||||
|
||||
static void anx7625_runtime_disable(void *data)
|
||||
{
|
||||
pm_runtime_dont_use_autosuspend(data);
|
||||
pm_runtime_disable(data);
|
||||
}
|
||||
|
||||
static int anx7625_link_bridge(struct drm_dp_aux *aux)
|
||||
{
|
||||
struct anx7625_data *platform = container_of(aux, struct anx7625_data, aux);
|
||||
|
@ -2708,11 +2702,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
|
|||
goto free_wq;
|
||||
}
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_set_autosuspend_delay(dev, 1000);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_suspend_ignore_children(dev, true);
|
||||
ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev);
|
||||
ret = devm_pm_runtime_enable(dev);
|
||||
if (ret)
|
||||
goto free_wq;
|
||||
|
||||
|
|
|
@ -210,9 +210,10 @@ static int display_connector_probe(struct platform_device *pdev)
|
|||
const char *label = NULL;
|
||||
int ret;
|
||||
|
||||
conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
|
||||
if (!conn)
|
||||
return -ENOMEM;
|
||||
conn = devm_drm_bridge_alloc(&pdev->dev, struct display_connector, bridge,
|
||||
&display_connector_bridge_funcs);
|
||||
if (IS_ERR(conn))
|
||||
return PTR_ERR(conn);
|
||||
|
||||
platform_set_drvdata(pdev, conn);
|
||||
|
||||
|
@ -362,7 +363,6 @@ static int display_connector_probe(struct platform_device *pdev)
|
|||
}
|
||||
}
|
||||
|
||||
conn->bridge.funcs = &display_connector_bridge_funcs;
|
||||
conn->bridge.of_node = pdev->dev.of_node;
|
||||
|
||||
if (conn->bridge.ddc)
|
||||
|
|
|
@ -190,8 +190,7 @@ int ldb_find_next_bridge_helper(struct ldb *ldb)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_find_next_bridge_helper);
|
||||
|
||||
void ldb_add_bridge_helper(struct ldb *ldb,
|
||||
const struct drm_bridge_funcs *bridge_funcs)
|
||||
void ldb_add_bridge_helper(struct ldb *ldb)
|
||||
{
|
||||
struct ldb_channel *ldb_ch;
|
||||
int i;
|
||||
|
@ -203,7 +202,6 @@ void ldb_add_bridge_helper(struct ldb *ldb,
|
|||
continue;
|
||||
|
||||
ldb_ch->bridge.driver_private = ldb_ch;
|
||||
ldb_ch->bridge.funcs = bridge_funcs;
|
||||
ldb_ch->bridge.of_node = ldb_ch->np;
|
||||
|
||||
drm_bridge_add(&ldb_ch->bridge);
|
||||
|
|
|
@ -88,8 +88,7 @@ int ldb_init_helper(struct ldb *ldb);
|
|||
|
||||
int ldb_find_next_bridge_helper(struct ldb *ldb);
|
||||
|
||||
void ldb_add_bridge_helper(struct ldb *ldb,
|
||||
const struct drm_bridge_funcs *bridge_funcs);
|
||||
void ldb_add_bridge_helper(struct ldb *ldb);
|
||||
|
||||
void ldb_remove_bridge_helper(struct ldb *ldb);
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ struct imx8qm_ldb_channel {
|
|||
struct imx8qm_ldb {
|
||||
struct ldb base;
|
||||
struct device *dev;
|
||||
struct imx8qm_ldb_channel channel[MAX_LDB_CHAN_NUM];
|
||||
struct imx8qm_ldb_channel *channel[MAX_LDB_CHAN_NUM];
|
||||
struct clk *clk_pixel;
|
||||
struct clk *clk_bypass;
|
||||
int active_chno;
|
||||
|
@ -107,7 +107,7 @@ static int imx8qm_ldb_bridge_atomic_check(struct drm_bridge *bridge,
|
|||
|
||||
if (is_split) {
|
||||
imx8qm_ldb_ch =
|
||||
&imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
|
||||
imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
|
||||
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
|
||||
phy_cfg);
|
||||
ret = phy_validate(imx8qm_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts);
|
||||
|
@ -158,7 +158,7 @@ imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge,
|
|||
|
||||
if (is_split) {
|
||||
imx8qm_ldb_ch =
|
||||
&imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
|
||||
imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
|
||||
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
|
||||
phy_cfg);
|
||||
ret = phy_configure(imx8qm_ldb_ch->phy, &opts);
|
||||
|
@ -226,13 +226,13 @@ static void imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
if (is_split) {
|
||||
ret = phy_power_on(imx8qm_ldb->channel[0].phy);
|
||||
ret = phy_power_on(imx8qm_ldb->channel[0]->phy);
|
||||
if (ret)
|
||||
DRM_DEV_ERROR(dev,
|
||||
"failed to power on channel0 PHY: %d\n",
|
||||
ret);
|
||||
|
||||
ret = phy_power_on(imx8qm_ldb->channel[1].phy);
|
||||
ret = phy_power_on(imx8qm_ldb->channel[1]->phy);
|
||||
if (ret)
|
||||
DRM_DEV_ERROR(dev,
|
||||
"failed to power on channel1 PHY: %d\n",
|
||||
|
@ -261,12 +261,12 @@ static void imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
|
|||
ldb_bridge_disable_helper(bridge);
|
||||
|
||||
if (is_split) {
|
||||
ret = phy_power_off(imx8qm_ldb->channel[0].phy);
|
||||
ret = phy_power_off(imx8qm_ldb->channel[0]->phy);
|
||||
if (ret)
|
||||
DRM_DEV_ERROR(dev,
|
||||
"failed to power off channel0 PHY: %d\n",
|
||||
ret);
|
||||
ret = phy_power_off(imx8qm_ldb->channel[1].phy);
|
||||
ret = phy_power_off(imx8qm_ldb->channel[1]->phy);
|
||||
if (ret)
|
||||
DRM_DEV_ERROR(dev,
|
||||
"failed to power off channel1 PHY: %d\n",
|
||||
|
@ -412,7 +412,7 @@ static int imx8qm_ldb_get_phy(struct imx8qm_ldb *imx8qm_ldb)
|
|||
int i, ret;
|
||||
|
||||
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
|
||||
imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
|
||||
imx8qm_ldb_ch = imx8qm_ldb->channel[i];
|
||||
ldb_ch = &imx8qm_ldb_ch->base;
|
||||
|
||||
if (!ldb_ch->is_available)
|
||||
|
@ -448,6 +448,14 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
|
|||
if (!imx8qm_ldb)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
|
||||
imx8qm_ldb->channel[i] =
|
||||
devm_drm_bridge_alloc(dev, struct imx8qm_ldb_channel, base.bridge,
|
||||
&imx8qm_ldb_bridge_funcs);
|
||||
if (IS_ERR(imx8qm_ldb->channel[i]))
|
||||
return PTR_ERR(imx8qm_ldb->channel[i]);
|
||||
}
|
||||
|
||||
imx8qm_ldb->clk_pixel = devm_clk_get(dev, "pixel");
|
||||
if (IS_ERR(imx8qm_ldb->clk_pixel)) {
|
||||
ret = PTR_ERR(imx8qm_ldb->clk_pixel);
|
||||
|
@ -473,7 +481,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
|
|||
ldb->ctrl_reg = 0xe0;
|
||||
|
||||
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
|
||||
ldb->channel[i] = &imx8qm_ldb->channel[i].base;
|
||||
ldb->channel[i] = &imx8qm_ldb->channel[i]->base;
|
||||
|
||||
ret = ldb_init_helper(ldb);
|
||||
if (ret)
|
||||
|
@ -499,12 +507,12 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
imx8qm_ldb->active_chno = 0;
|
||||
imx8qm_ldb_ch = &imx8qm_ldb->channel[0];
|
||||
imx8qm_ldb_ch = imx8qm_ldb->channel[0];
|
||||
ldb_ch = &imx8qm_ldb_ch->base;
|
||||
ldb_ch->link_type = pixel_order;
|
||||
} else {
|
||||
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
|
||||
imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
|
||||
imx8qm_ldb_ch = imx8qm_ldb->channel[i];
|
||||
ldb_ch = &imx8qm_ldb_ch->base;
|
||||
|
||||
if (ldb_ch->is_available) {
|
||||
|
@ -525,7 +533,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, imx8qm_ldb);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
ldb_add_bridge_helper(ldb, &imx8qm_ldb_bridge_funcs);
|
||||
ldb_add_bridge_helper(ldb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ struct imx8qxp_ldb_channel {
|
|||
struct imx8qxp_ldb {
|
||||
struct ldb base;
|
||||
struct device *dev;
|
||||
struct imx8qxp_ldb_channel channel[MAX_LDB_CHAN_NUM];
|
||||
struct imx8qxp_ldb_channel *channel[MAX_LDB_CHAN_NUM];
|
||||
struct clk *clk_pixel;
|
||||
struct clk *clk_bypass;
|
||||
struct drm_bridge *companion;
|
||||
|
@ -410,7 +410,7 @@ static const struct drm_bridge_funcs imx8qxp_ldb_bridge_funcs = {
|
|||
static int imx8qxp_ldb_set_di_id(struct imx8qxp_ldb *imx8qxp_ldb)
|
||||
{
|
||||
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
|
||||
&imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
|
||||
imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
|
||||
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
|
||||
struct device_node *ep, *remote;
|
||||
struct device *dev = imx8qxp_ldb->dev;
|
||||
|
@ -456,7 +456,7 @@ imx8qxp_ldb_check_chno_and_dual_link(struct ldb_channel *ldb_ch, int link)
|
|||
static int imx8qxp_ldb_parse_dt_companion(struct imx8qxp_ldb *imx8qxp_ldb)
|
||||
{
|
||||
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
|
||||
&imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
|
||||
imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
|
||||
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
|
||||
struct ldb_channel *companion_ldb_ch;
|
||||
struct device_node *companion;
|
||||
|
@ -586,6 +586,14 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
|
|||
if (!imx8qxp_ldb)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
|
||||
imx8qxp_ldb->channel[i] =
|
||||
devm_drm_bridge_alloc(dev, struct imx8qxp_ldb_channel, base.bridge,
|
||||
&imx8qxp_ldb_bridge_funcs);
|
||||
if (IS_ERR(imx8qxp_ldb->channel[i]))
|
||||
return PTR_ERR(imx8qxp_ldb->channel[i]);
|
||||
}
|
||||
|
||||
imx8qxp_ldb->clk_pixel = devm_clk_get(dev, "pixel");
|
||||
if (IS_ERR(imx8qxp_ldb->clk_pixel)) {
|
||||
ret = PTR_ERR(imx8qxp_ldb->clk_pixel);
|
||||
|
@ -611,7 +619,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
|
|||
ldb->ctrl_reg = 0xe0;
|
||||
|
||||
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
|
||||
ldb->channel[i] = &imx8qxp_ldb->channel[i].base;
|
||||
ldb->channel[i] = &imx8qxp_ldb->channel[i]->base;
|
||||
|
||||
ret = ldb_init_helper(ldb);
|
||||
if (ret)
|
||||
|
@ -627,7 +635,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
|
||||
imx8qxp_ldb_ch = &imx8qxp_ldb->channel[i];
|
||||
imx8qxp_ldb_ch = imx8qxp_ldb->channel[i];
|
||||
ldb_ch = &imx8qxp_ldb_ch->base;
|
||||
|
||||
if (ldb_ch->is_available) {
|
||||
|
@ -660,7 +668,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, imx8qxp_ldb);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs);
|
||||
ldb_add_bridge_helper(ldb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -775,9 +775,9 @@ static int lt9611uxc_probe(struct i2c_client *client)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL);
|
||||
if (!lt9611uxc)
|
||||
return -ENOMEM;
|
||||
lt9611uxc = devm_drm_bridge_alloc(dev, struct lt9611uxc, bridge, <9611uxc_bridge_funcs);
|
||||
if (IS_ERR(lt9611uxc))
|
||||
return PTR_ERR(lt9611uxc);
|
||||
|
||||
lt9611uxc->dev = dev;
|
||||
lt9611uxc->client = client;
|
||||
|
@ -856,7 +856,6 @@ retry:
|
|||
|
||||
i2c_set_clientdata(client, lt9611uxc);
|
||||
|
||||
lt9611uxc->bridge.funcs = <9611uxc_bridge_funcs;
|
||||
lt9611uxc->bridge.of_node = client->dev.of_node;
|
||||
lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
|
||||
if (lt9611uxc->hpd_supported)
|
||||
|
@ -881,7 +880,11 @@ retry:
|
|||
}
|
||||
}
|
||||
|
||||
return lt9611uxc_audio_init(dev, lt9611uxc);
|
||||
ret = lt9611uxc_audio_init(dev, lt9611uxc);
|
||||
if (ret)
|
||||
goto err_remove_bridge;
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_bridge:
|
||||
free_irq(client->irq, lt9611uxc);
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
|
||||
#include <media/cec-notifier.h>
|
||||
|
||||
#include <uapi/linux/media-bus-format.h>
|
||||
#include <uapi/linux/videodev2.h>
|
||||
#include <linux/media-bus-format.h>
|
||||
#include <linux/videodev2.h>
|
||||
|
||||
#include <drm/bridge/dw_hdmi.h>
|
||||
#include <drm/display/drm_hdmi_helper.h>
|
||||
|
@ -3333,9 +3333,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
|
|||
u8 config0;
|
||||
u8 config3;
|
||||
|
||||
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
|
||||
if (!hdmi)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi, bridge, &dw_hdmi_bridge_funcs);
|
||||
if (IS_ERR(hdmi))
|
||||
return hdmi;
|
||||
|
||||
hdmi->plat_data = plat_data;
|
||||
hdmi->dev = dev;
|
||||
|
@ -3495,7 +3495,6 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
|
|||
}
|
||||
|
||||
hdmi->bridge.driver_private = hdmi;
|
||||
hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
|
||||
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
|
||||
| DRM_BRIDGE_OP_HPD;
|
||||
hdmi->bridge.interlace_allowed = true;
|
||||
|
|
|
@ -1781,9 +1781,9 @@ static int tda998x_create(struct device *dev)
|
|||
u32 video;
|
||||
int rev_lo, rev_hi, ret;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
priv = devm_drm_bridge_alloc(dev, struct tda998x_priv, bridge, &tda998x_bridge_funcs);
|
||||
if (IS_ERR(priv))
|
||||
return PTR_ERR(priv);
|
||||
|
||||
dev_set_drvdata(dev, priv);
|
||||
|
||||
|
@ -1948,7 +1948,6 @@ static int tda998x_create(struct device *dev)
|
|||
tda998x_audio_codec_init(priv, &client->dev);
|
||||
}
|
||||
|
||||
priv->bridge.funcs = &tda998x_bridge_funcs;
|
||||
#ifdef CONFIG_OF
|
||||
priv->bridge.of_node = dev->of_node;
|
||||
#endif
|
||||
|
|
|
@ -1317,7 +1317,6 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
pdata->bridge.funcs = &ti_sn_bridge_funcs;
|
||||
pdata->bridge.of_node = np;
|
||||
pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
|
||||
? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
|
||||
|
@ -1907,9 +1906,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
pdata = devm_kzalloc(dev, sizeof(struct ti_sn65dsi86), GFP_KERNEL);
|
||||
if (!pdata)
|
||||
return -ENOMEM;
|
||||
pdata = devm_drm_bridge_alloc(dev, struct ti_sn65dsi86, bridge, &ti_sn_bridge_funcs);
|
||||
if (IS_ERR(pdata))
|
||||
return PTR_ERR(pdata);
|
||||
dev_set_drvdata(dev, pdata);
|
||||
pdata->dev = dev;
|
||||
|
||||
|
|
|
@ -256,3 +256,171 @@ drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
|
|||
return DIV_ROUND_CLOSEST_ULL(clock * bpc, 8);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_hdmi_compute_mode_clock);
|
||||
|
||||
struct drm_hdmi_acr_n_cts_entry {
|
||||
unsigned int n;
|
||||
unsigned int cts;
|
||||
};
|
||||
|
||||
struct drm_hdmi_acr_data {
|
||||
unsigned long tmds_clock_khz;
|
||||
struct drm_hdmi_acr_n_cts_entry n_cts_32k,
|
||||
n_cts_44k1,
|
||||
n_cts_48k;
|
||||
};
|
||||
|
||||
static const struct drm_hdmi_acr_data hdmi_acr_n_cts[] = {
|
||||
{
|
||||
/* "Other" entry */
|
||||
.n_cts_32k = { .n = 4096, },
|
||||
.n_cts_44k1 = { .n = 6272, },
|
||||
.n_cts_48k = { .n = 6144, },
|
||||
}, {
|
||||
.tmds_clock_khz = 25175,
|
||||
.n_cts_32k = { .n = 4576, .cts = 28125, },
|
||||
.n_cts_44k1 = { .n = 7007, .cts = 31250, },
|
||||
.n_cts_48k = { .n = 6864, .cts = 28125, },
|
||||
}, {
|
||||
.tmds_clock_khz = 25200,
|
||||
.n_cts_32k = { .n = 4096, .cts = 25200, },
|
||||
.n_cts_44k1 = { .n = 6272, .cts = 28000, },
|
||||
.n_cts_48k = { .n = 6144, .cts = 25200, },
|
||||
}, {
|
||||
.tmds_clock_khz = 27000,
|
||||
.n_cts_32k = { .n = 4096, .cts = 27000, },
|
||||
.n_cts_44k1 = { .n = 6272, .cts = 30000, },
|
||||
.n_cts_48k = { .n = 6144, .cts = 27000, },
|
||||
}, {
|
||||
.tmds_clock_khz = 27027,
|
||||
.n_cts_32k = { .n = 4096, .cts = 27027, },
|
||||
.n_cts_44k1 = { .n = 6272, .cts = 30030, },
|
||||
.n_cts_48k = { .n = 6144, .cts = 27027, },
|
||||
}, {
|
||||
.tmds_clock_khz = 54000,
|
||||
.n_cts_32k = { .n = 4096, .cts = 54000, },
|
||||
.n_cts_44k1 = { .n = 6272, .cts = 60000, },
|
||||
.n_cts_48k = { .n = 6144, .cts = 54000, },
|
||||
}, {
|
||||
.tmds_clock_khz = 54054,
|
||||
.n_cts_32k = { .n = 4096, .cts = 54054, },
|
||||
.n_cts_44k1 = { .n = 6272, .cts = 60060, },
|
||||
.n_cts_48k = { .n = 6144, .cts = 54054, },
|
||||
}, {
|
||||
.tmds_clock_khz = 74176,
|
||||
.n_cts_32k = { .n = 11648, .cts = 210937, }, /* and 210938 */
|
||||
.n_cts_44k1 = { .n = 17836, .cts = 234375, },
|
||||
.n_cts_48k = { .n = 11648, .cts = 140625, },
|
||||
}, {
|
||||
.tmds_clock_khz = 74250,
|
||||
.n_cts_32k = { .n = 4096, .cts = 74250, },
|
||||
.n_cts_44k1 = { .n = 6272, .cts = 82500, },
|
||||
.n_cts_48k = { .n = 6144, .cts = 74250, },
|
||||
}, {
|
||||
.tmds_clock_khz = 148352,
|
||||
.n_cts_32k = { .n = 11648, .cts = 421875, },
|
||||
.n_cts_44k1 = { .n = 8918, .cts = 234375, },
|
||||
.n_cts_48k = { .n = 5824, .cts = 140625, },
|
||||
}, {
|
||||
.tmds_clock_khz = 148500,
|
||||
.n_cts_32k = { .n = 4096, .cts = 148500, },
|
||||
.n_cts_44k1 = { .n = 6272, .cts = 165000, },
|
||||
.n_cts_48k = { .n = 6144, .cts = 148500, },
|
||||
}, {
|
||||
.tmds_clock_khz = 296703,
|
||||
.n_cts_32k = { .n = 5824, .cts = 421875, },
|
||||
.n_cts_44k1 = { .n = 4459, .cts = 234375, },
|
||||
.n_cts_48k = { .n = 5824, .cts = 281250, },
|
||||
}, {
|
||||
.tmds_clock_khz = 297000,
|
||||
.n_cts_32k = { .n = 3072, .cts = 222750, },
|
||||
.n_cts_44k1 = { .n = 4704, .cts = 247500, },
|
||||
.n_cts_48k = { .n = 5120, .cts = 247500, },
|
||||
}, {
|
||||
.tmds_clock_khz = 593407,
|
||||
.n_cts_32k = { .n = 5824, .cts = 843750, },
|
||||
.n_cts_44k1 = { .n = 8918, .cts = 937500, },
|
||||
.n_cts_48k = { .n = 5824, .cts = 562500, },
|
||||
}, {
|
||||
.tmds_clock_khz = 594000,
|
||||
.n_cts_32k = { .n = 3072, .cts = 445500, },
|
||||
.n_cts_44k1 = { .n = 9408, .cts = 990000, },
|
||||
.n_cts_48k = { .n = 6144, .cts = 594000, },
|
||||
},
|
||||
};
|
||||
|
||||
static int drm_hdmi_acr_find_tmds_entry(unsigned long tmds_clock_khz)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* skip the "other" entry */
|
||||
for (i = 1; i < ARRAY_SIZE(hdmi_acr_n_cts); i++) {
|
||||
if (hdmi_acr_n_cts[i].tmds_clock_khz == tmds_clock_khz)
|
||||
return i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_hdmi_acr_get_n_cts() - get N and CTS values for Audio Clock Regeneration
|
||||
*
|
||||
* @tmds_char_rate: TMDS clock (char rate) as used by the HDMI connector
|
||||
* @sample_rate: audio sample rate
|
||||
* @out_n: a pointer to write the N value
|
||||
* @out_cts: a pointer to write the CTS value
|
||||
*
|
||||
* Get the N and CTS values (either by calculating them or by returning data
|
||||
* from the tables. This follows the HDMI 1.4b Section 7.2 "Audio Sample Clock
|
||||
* Capture and Regeneration".
|
||||
*
|
||||
* Note, @sample_rate corresponds to the Fs value, see sections 7.2.4 - 7.2.6
|
||||
* on how to select Fs for non-L-PCM formats.
|
||||
*/
|
||||
void
|
||||
drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate,
|
||||
unsigned int sample_rate,
|
||||
unsigned int *out_n,
|
||||
unsigned int *out_cts)
|
||||
{
|
||||
/* be a bit more tolerant, especially for the 1.001 entries */
|
||||
unsigned long tmds_clock_khz = DIV_ROUND_CLOSEST_ULL(tmds_char_rate, 1000);
|
||||
const struct drm_hdmi_acr_n_cts_entry *entry;
|
||||
unsigned int n, cts, mult;
|
||||
int tmds_idx;
|
||||
|
||||
tmds_idx = drm_hdmi_acr_find_tmds_entry(tmds_clock_khz);
|
||||
|
||||
/*
|
||||
* Don't change the order, 192 kHz is divisible by 48k and 32k, but it
|
||||
* should use 48k entry.
|
||||
*/
|
||||
if (sample_rate % 48000 == 0) {
|
||||
entry = &hdmi_acr_n_cts[tmds_idx].n_cts_48k;
|
||||
mult = sample_rate / 48000;
|
||||
} else if (sample_rate % 44100 == 0) {
|
||||
entry = &hdmi_acr_n_cts[tmds_idx].n_cts_44k1;
|
||||
mult = sample_rate / 44100;
|
||||
} else if (sample_rate % 32000 == 0) {
|
||||
entry = &hdmi_acr_n_cts[tmds_idx].n_cts_32k;
|
||||
mult = sample_rate / 32000;
|
||||
} else {
|
||||
entry = NULL;
|
||||
}
|
||||
|
||||
if (entry) {
|
||||
n = entry->n * mult;
|
||||
cts = entry->cts;
|
||||
} else {
|
||||
/* Recommended optimal value, HDMI 1.4b, Section 7.2.1 */
|
||||
n = 128 * sample_rate / 1000;
|
||||
cts = 0;
|
||||
}
|
||||
|
||||
if (!cts)
|
||||
cts = DIV_ROUND_CLOSEST_ULL(tmds_char_rate * n,
|
||||
128 * sample_rate);
|
||||
|
||||
*out_n = n;
|
||||
*out_cts = cts;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_hdmi_acr_get_n_cts);
|
||||
|
|
|
@ -66,6 +66,7 @@ struct drm_edid;
|
|||
#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27
|
||||
#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28
|
||||
#define DATA_BLOCK_2_CONTAINER_ID 0x29
|
||||
#define DATA_BLOCK_2_TYPE_10_FORMULA_TIMING 0x2a
|
||||
#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e
|
||||
#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81
|
||||
|
||||
|
@ -114,20 +115,32 @@ struct displayid_tiled_block {
|
|||
struct displayid_detailed_timings_1 {
|
||||
u8 pixel_clock[3];
|
||||
u8 flags;
|
||||
u8 hactive[2];
|
||||
u8 hblank[2];
|
||||
u8 hsync[2];
|
||||
u8 hsw[2];
|
||||
u8 vactive[2];
|
||||
u8 vblank[2];
|
||||
u8 vsync[2];
|
||||
u8 vsw[2];
|
||||
__le16 hactive;
|
||||
__le16 hblank;
|
||||
__le16 hsync;
|
||||
__le16 hsw;
|
||||
__le16 vactive;
|
||||
__le16 vblank;
|
||||
__le16 vsync;
|
||||
__le16 vsw;
|
||||
} __packed;
|
||||
|
||||
struct displayid_detailed_timing_block {
|
||||
struct displayid_block base;
|
||||
struct displayid_detailed_timings_1 timings[];
|
||||
};
|
||||
} __packed;
|
||||
|
||||
struct displayid_formula_timings_9 {
|
||||
u8 flags;
|
||||
__le16 hactive;
|
||||
__le16 vactive;
|
||||
u8 vrefresh;
|
||||
} __packed;
|
||||
|
||||
struct displayid_formula_timing_block {
|
||||
struct displayid_block base;
|
||||
struct displayid_formula_timings_9 timings[];
|
||||
} __packed;
|
||||
|
||||
#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0)
|
||||
#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5)
|
||||
|
|
|
@ -6760,23 +6760,23 @@ out:
|
|||
}
|
||||
|
||||
static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
|
||||
struct displayid_detailed_timings_1 *timings,
|
||||
const struct displayid_detailed_timings_1 *timings,
|
||||
bool type_7)
|
||||
{
|
||||
struct drm_display_mode *mode;
|
||||
unsigned pixel_clock = (timings->pixel_clock[0] |
|
||||
(timings->pixel_clock[1] << 8) |
|
||||
(timings->pixel_clock[2] << 16)) + 1;
|
||||
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
|
||||
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
|
||||
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
|
||||
unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
|
||||
unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
|
||||
unsigned vblank = (timings->vblank[0] | timings->vblank[1] << 8) + 1;
|
||||
unsigned vsync = (timings->vsync[0] | (timings->vsync[1] & 0x7f) << 8) + 1;
|
||||
unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
|
||||
bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
|
||||
bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
|
||||
unsigned int pixel_clock = (timings->pixel_clock[0] |
|
||||
(timings->pixel_clock[1] << 8) |
|
||||
(timings->pixel_clock[2] << 16)) + 1;
|
||||
unsigned int hactive = le16_to_cpu(timings->hactive) + 1;
|
||||
unsigned int hblank = le16_to_cpu(timings->hblank) + 1;
|
||||
unsigned int hsync = (le16_to_cpu(timings->hsync) & 0x7fff) + 1;
|
||||
unsigned int hsync_width = le16_to_cpu(timings->hsw) + 1;
|
||||
unsigned int vactive = le16_to_cpu(timings->vactive) + 1;
|
||||
unsigned int vblank = le16_to_cpu(timings->vblank) + 1;
|
||||
unsigned int vsync = (le16_to_cpu(timings->vsync) & 0x7fff) + 1;
|
||||
unsigned int vsync_width = le16_to_cpu(timings->vsw) + 1;
|
||||
bool hsync_positive = le16_to_cpu(timings->hsync) & (1 << 15);
|
||||
bool vsync_positive = le16_to_cpu(timings->vsync) & (1 << 15);
|
||||
|
||||
mode = drm_mode_create(dev);
|
||||
if (!mode)
|
||||
|
@ -6833,6 +6833,66 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector,
|
|||
return num_modes;
|
||||
}
|
||||
|
||||
static struct drm_display_mode *drm_mode_displayid_formula(struct drm_device *dev,
|
||||
const struct displayid_formula_timings_9 *timings,
|
||||
bool type_10)
|
||||
{
|
||||
struct drm_display_mode *mode;
|
||||
u16 hactive = le16_to_cpu(timings->hactive) + 1;
|
||||
u16 vactive = le16_to_cpu(timings->vactive) + 1;
|
||||
u8 timing_formula = timings->flags & 0x7;
|
||||
|
||||
/* TODO: support RB-v2 & RB-v3 */
|
||||
if (timing_formula > 1)
|
||||
return NULL;
|
||||
|
||||
/* TODO: support video-optimized refresh rate */
|
||||
if (timings->flags & (1 << 4))
|
||||
drm_dbg_kms(dev, "Fractional vrefresh is not implemented, proceeding with non-video-optimized refresh rate");
|
||||
|
||||
mode = drm_cvt_mode(dev, hactive, vactive, timings->vrefresh + 1, timing_formula == 1, false, false);
|
||||
if (!mode)
|
||||
return NULL;
|
||||
|
||||
/* TODO: interpret S3D flags */
|
||||
|
||||
mode->type = DRM_MODE_TYPE_DRIVER;
|
||||
drm_mode_set_name(mode);
|
||||
|
||||
return mode;
|
||||
}
|
||||
|
||||
static int add_displayid_formula_modes(struct drm_connector *connector,
|
||||
const struct displayid_block *block)
|
||||
{
|
||||
const struct displayid_formula_timing_block *formula_block = (struct displayid_formula_timing_block *)block;
|
||||
int num_timings;
|
||||
struct drm_display_mode *newmode;
|
||||
int num_modes = 0;
|
||||
bool type_10 = block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING;
|
||||
int timing_size = 6 + ((formula_block->base.rev & 0x70) >> 4);
|
||||
|
||||
/* extended blocks are not supported yet */
|
||||
if (timing_size != 6)
|
||||
return 0;
|
||||
|
||||
if (block->num_bytes % timing_size)
|
||||
return 0;
|
||||
|
||||
num_timings = block->num_bytes / timing_size;
|
||||
for (int i = 0; i < num_timings; i++) {
|
||||
const struct displayid_formula_timings_9 *timings = &formula_block->timings[i];
|
||||
|
||||
newmode = drm_mode_displayid_formula(connector->dev, timings, type_10);
|
||||
if (!newmode)
|
||||
continue;
|
||||
|
||||
drm_mode_probed_add(connector, newmode);
|
||||
num_modes++;
|
||||
}
|
||||
return num_modes;
|
||||
}
|
||||
|
||||
static int add_displayid_detailed_modes(struct drm_connector *connector,
|
||||
const struct drm_edid *drm_edid)
|
||||
{
|
||||
|
@ -6845,6 +6905,9 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
|
|||
if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING ||
|
||||
block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING)
|
||||
num_modes += add_displayid_detailed_1_modes(connector, block);
|
||||
else if (block->tag == DATA_BLOCK_2_TYPE_9_FORMULA_TIMING ||
|
||||
block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING)
|
||||
num_modes += add_displayid_formula_modes(connector, block);
|
||||
}
|
||||
displayid_iter_end(&iter);
|
||||
|
||||
|
|
|
@ -106,21 +106,21 @@ EXPORT_SYMBOL(drm_panel_remove);
|
|||
*
|
||||
* Calling this function will enable power and deassert any reset signals to
|
||||
* the panel. After this has completed it is possible to communicate with any
|
||||
* integrated circuitry via a command bus.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
* integrated circuitry via a command bus. This function cannot fail (as it is
|
||||
* called from the pre_enable call chain). There will always be a call to
|
||||
* drm_panel_disable() afterwards.
|
||||
*/
|
||||
int drm_panel_prepare(struct drm_panel *panel)
|
||||
void drm_panel_prepare(struct drm_panel *panel)
|
||||
{
|
||||
struct drm_panel_follower *follower;
|
||||
int ret;
|
||||
|
||||
if (!panel)
|
||||
return -EINVAL;
|
||||
return;
|
||||
|
||||
if (panel->prepared) {
|
||||
dev_warn(panel->dev, "Skipping prepare of already prepared panel\n");
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&panel->follower_lock);
|
||||
|
@ -139,11 +139,8 @@ int drm_panel_prepare(struct drm_panel *panel)
|
|||
follower->funcs->panel_prepared, ret);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
exit:
|
||||
mutex_unlock(&panel->follower_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_panel_prepare);
|
||||
|
||||
|
@ -155,16 +152,14 @@ EXPORT_SYMBOL(drm_panel_prepare);
|
|||
* reset, turn off power supplies, ...). After this function has completed, it
|
||||
* is usually no longer possible to communicate with the panel until another
|
||||
* call to drm_panel_prepare().
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_panel_unprepare(struct drm_panel *panel)
|
||||
void drm_panel_unprepare(struct drm_panel *panel)
|
||||
{
|
||||
struct drm_panel_follower *follower;
|
||||
int ret;
|
||||
|
||||
if (!panel)
|
||||
return -EINVAL;
|
||||
return;
|
||||
|
||||
/*
|
||||
* If you are seeing the warning below it likely means one of two things:
|
||||
|
@ -177,7 +172,7 @@ int drm_panel_unprepare(struct drm_panel *panel)
|
|||
*/
|
||||
if (!panel->prepared) {
|
||||
dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&panel->follower_lock);
|
||||
|
@ -196,11 +191,8 @@ int drm_panel_unprepare(struct drm_panel *panel)
|
|||
}
|
||||
panel->prepared = false;
|
||||
|
||||
ret = 0;
|
||||
exit:
|
||||
mutex_unlock(&panel->follower_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_panel_unprepare);
|
||||
|
||||
|
@ -210,26 +202,26 @@ EXPORT_SYMBOL(drm_panel_unprepare);
|
|||
*
|
||||
* Calling this function will cause the panel display drivers to be turned on
|
||||
* and the backlight to be enabled. Content will be visible on screen after
|
||||
* this call completes.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
* this call completes. This function cannot fail (as it is called from the
|
||||
* enable call chain). There will always be a call to drm_panel_disable()
|
||||
* afterwards.
|
||||
*/
|
||||
int drm_panel_enable(struct drm_panel *panel)
|
||||
void drm_panel_enable(struct drm_panel *panel)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!panel)
|
||||
return -EINVAL;
|
||||
return;
|
||||
|
||||
if (panel->enabled) {
|
||||
dev_warn(panel->dev, "Skipping enable of already enabled panel\n");
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (panel->funcs && panel->funcs->enable) {
|
||||
ret = panel->funcs->enable(panel);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return;
|
||||
}
|
||||
panel->enabled = true;
|
||||
|
||||
|
@ -237,8 +229,6 @@ int drm_panel_enable(struct drm_panel *panel)
|
|||
if (ret < 0)
|
||||
DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
|
||||
ret);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_panel_enable);
|
||||
|
||||
|
@ -249,15 +239,13 @@ EXPORT_SYMBOL(drm_panel_enable);
|
|||
* This will typically turn off the panel's backlight or disable the display
|
||||
* drivers. For smart panels it should still be possible to communicate with
|
||||
* the integrated circuitry via any command bus after this call.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_panel_disable(struct drm_panel *panel)
|
||||
void drm_panel_disable(struct drm_panel *panel)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!panel)
|
||||
return -EINVAL;
|
||||
return;
|
||||
|
||||
/*
|
||||
* If you are seeing the warning below it likely means one of two things:
|
||||
|
@ -270,7 +258,7 @@ int drm_panel_disable(struct drm_panel *panel)
|
|||
*/
|
||||
if (!panel->enabled) {
|
||||
dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
ret = backlight_disable(panel->backlight);
|
||||
|
@ -281,11 +269,9 @@ int drm_panel_disable(struct drm_panel *panel)
|
|||
if (panel->funcs && panel->funcs->disable) {
|
||||
ret = panel->funcs->disable(panel);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return;
|
||||
}
|
||||
panel->enabled = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_panel_disable);
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/font.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/iosys-map.h>
|
||||
#include <linux/kdebug.h>
|
||||
|
@ -154,6 +155,90 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
|
|||
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
|
||||
}
|
||||
|
||||
static void drm_panic_write_pixel16(void *vaddr, unsigned int offset, u16 color)
|
||||
{
|
||||
u16 *p = vaddr + offset;
|
||||
|
||||
*p = color;
|
||||
}
|
||||
|
||||
static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
|
||||
{
|
||||
u8 *p = vaddr + offset;
|
||||
|
||||
*p++ = color & 0xff;
|
||||
color >>= 8;
|
||||
*p++ = color & 0xff;
|
||||
color >>= 8;
|
||||
*p = color & 0xff;
|
||||
}
|
||||
|
||||
static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
|
||||
{
|
||||
u32 *p = vaddr + offset;
|
||||
|
||||
*p = color;
|
||||
}
|
||||
|
||||
static void drm_panic_write_pixel(void *vaddr, unsigned int offset, u32 color, unsigned int cpp)
|
||||
{
|
||||
switch (cpp) {
|
||||
case 2:
|
||||
drm_panic_write_pixel16(vaddr, offset, color);
|
||||
break;
|
||||
case 3:
|
||||
drm_panic_write_pixel24(vaddr, offset, color);
|
||||
break;
|
||||
case 4:
|
||||
drm_panic_write_pixel32(vaddr, offset, color);
|
||||
break;
|
||||
default:
|
||||
pr_debug_once("Can't blit with pixel width %d\n", cpp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The scanout buffer pages are not mapped, so for each pixel,
|
||||
* use kmap_local_page_try_from_panic() to map the page, and write the pixel.
|
||||
* Try to keep the map from the previous pixel, to avoid too much map/unmap.
|
||||
*/
|
||||
static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
|
||||
unsigned int cpp, const u8 *sbuf8,
|
||||
unsigned int spitch, struct drm_rect *clip,
|
||||
unsigned int scale, u32 fg32)
|
||||
{
|
||||
unsigned int y, x;
|
||||
unsigned int page = ~0;
|
||||
unsigned int height = drm_rect_height(clip);
|
||||
unsigned int width = drm_rect_width(clip);
|
||||
void *vaddr = NULL;
|
||||
|
||||
for (y = 0; y < height; y++) {
|
||||
for (x = 0; x < width; x++) {
|
||||
if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
|
||||
unsigned int new_page;
|
||||
unsigned int offset;
|
||||
|
||||
offset = (y + clip->y1) * dpitch + (x + clip->x1) * cpp;
|
||||
new_page = offset >> PAGE_SHIFT;
|
||||
offset = offset % PAGE_SIZE;
|
||||
if (new_page != page) {
|
||||
if (!pages[new_page])
|
||||
continue;
|
||||
if (vaddr)
|
||||
kunmap_local(vaddr);
|
||||
page = new_page;
|
||||
vaddr = kmap_local_page_try_from_panic(pages[page]);
|
||||
}
|
||||
if (vaddr)
|
||||
drm_panic_write_pixel(vaddr, offset, fg32, cpp);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (vaddr)
|
||||
kunmap_local(vaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
* drm_panic_blit - convert a monochrome image to a linear framebuffer
|
||||
* @sb: destination scanout buffer
|
||||
|
@ -177,6 +262,10 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
|
|||
if (sb->set_pixel)
|
||||
return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, scale, fg_color);
|
||||
|
||||
if (sb->pages)
|
||||
return drm_panic_blit_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
|
||||
sbuf8, spitch, clip, scale, fg_color);
|
||||
|
||||
map = sb->map[0];
|
||||
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
|
||||
|
||||
|
@ -209,6 +298,35 @@ static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
|
|||
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, color);
|
||||
}
|
||||
|
||||
static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
|
||||
unsigned int cpp, struct drm_rect *clip,
|
||||
u32 color)
|
||||
{
|
||||
unsigned int y, x;
|
||||
unsigned int page = ~0;
|
||||
void *vaddr = NULL;
|
||||
|
||||
for (y = clip->y1; y < clip->y2; y++) {
|
||||
for (x = clip->x1; x < clip->x2; x++) {
|
||||
unsigned int new_page;
|
||||
unsigned int offset;
|
||||
|
||||
offset = y * dpitch + x * cpp;
|
||||
new_page = offset >> PAGE_SHIFT;
|
||||
offset = offset % PAGE_SIZE;
|
||||
if (new_page != page) {
|
||||
if (vaddr)
|
||||
kunmap_local(vaddr);
|
||||
page = new_page;
|
||||
vaddr = kmap_local_page_try_from_panic(pages[page]);
|
||||
}
|
||||
drm_panic_write_pixel(vaddr, offset, color, cpp);
|
||||
}
|
||||
}
|
||||
if (vaddr)
|
||||
kunmap_local(vaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
* drm_panic_fill - Fill a rectangle with a color
|
||||
* @sb: destination scanout buffer
|
||||
|
@ -225,6 +343,10 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
|
|||
if (sb->set_pixel)
|
||||
return drm_panic_fill_pixel(sb, clip, color);
|
||||
|
||||
if (sb->pages)
|
||||
return drm_panic_fill_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
|
||||
clip, color);
|
||||
|
||||
map = sb->map[0];
|
||||
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
|
||||
|
||||
|
@ -709,16 +831,24 @@ static void draw_panic_plane(struct drm_plane *plane, const char *description)
|
|||
if (!drm_panic_trylock(plane->dev, flags))
|
||||
return;
|
||||
|
||||
drm_panic_set_description(description);
|
||||
|
||||
ret = plane->helper_private->get_scanout_buffer(plane, &sb);
|
||||
|
||||
if (!ret && drm_panic_is_format_supported(sb.format)) {
|
||||
draw_panic_dispatch(&sb);
|
||||
if (plane->helper_private->panic_flush)
|
||||
plane->helper_private->panic_flush(plane);
|
||||
}
|
||||
if (ret || !drm_panic_is_format_supported(sb.format))
|
||||
goto unlock;
|
||||
|
||||
/* One of these should be set, or it can't draw pixels */
|
||||
if (!sb.set_pixel && !sb.pages && iosys_map_is_null(&sb.map[0]))
|
||||
goto unlock;
|
||||
|
||||
drm_panic_set_description(description);
|
||||
|
||||
draw_panic_dispatch(&sb);
|
||||
if (plane->helper_private->panic_flush)
|
||||
plane->helper_private->panic_flush(plane);
|
||||
|
||||
drm_panic_clear_description();
|
||||
|
||||
unlock:
|
||||
drm_panic_unlock(plane->dev, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -315,7 +315,7 @@ impl Segment<'_> {
|
|||
}
|
||||
}
|
||||
|
||||
// Returns the size of the length field in bits, depending on QR Version.
|
||||
/// Returns the size of the length field in bits, depending on QR Version.
|
||||
fn length_bits_count(&self, version: Version) -> usize {
|
||||
let Version(v) = version;
|
||||
match self {
|
||||
|
@ -331,7 +331,7 @@ impl Segment<'_> {
|
|||
}
|
||||
}
|
||||
|
||||
// Number of characters in the segment.
|
||||
/// Number of characters in the segment.
|
||||
fn character_count(&self) -> usize {
|
||||
match self {
|
||||
Segment::Binary(data) => data.len(),
|
||||
|
@ -569,8 +569,8 @@ struct EncodedMsgIterator<'a> {
|
|||
impl Iterator for EncodedMsgIterator<'_> {
|
||||
type Item = u8;
|
||||
|
||||
// Send the bytes in interleaved mode, first byte of first block of group1,
|
||||
// then first byte of second block of group1, ...
|
||||
/// Send the bytes in interleaved mode, first byte of first block of group1,
|
||||
/// then first byte of second block of group1, ...
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let em = self.em;
|
||||
let blocks = em.g1_blocks + em.g2_blocks;
|
||||
|
@ -638,7 +638,7 @@ impl QrImage<'_> {
|
|||
self.data.fill(0);
|
||||
}
|
||||
|
||||
// Set pixel to light color.
|
||||
/// Set pixel to light color.
|
||||
fn set(&mut self, x: u8, y: u8) {
|
||||
let off = y as usize * self.stride as usize + x as usize / 8;
|
||||
let mut v = self.data[off];
|
||||
|
@ -646,13 +646,13 @@ impl QrImage<'_> {
|
|||
self.data[off] = v;
|
||||
}
|
||||
|
||||
// Invert a module color.
|
||||
/// Invert a module color.
|
||||
fn xor(&mut self, x: u8, y: u8) {
|
||||
let off = y as usize * self.stride as usize + x as usize / 8;
|
||||
self.data[off] ^= 0x80 >> (x % 8);
|
||||
}
|
||||
|
||||
// Draw a light square at (x, y) top left corner.
|
||||
/// Draw a light square at (x, y) top left corner.
|
||||
fn draw_square(&mut self, x: u8, y: u8, size: u8) {
|
||||
for k in 0..size {
|
||||
self.set(x + k, y);
|
||||
|
@ -784,7 +784,7 @@ impl QrImage<'_> {
|
|||
vinfo != 0 && ((x >= pos && x < pos + 3 && y < 6) || (y >= pos && y < pos + 3 && x < 6))
|
||||
}
|
||||
|
||||
// Returns true if the module is reserved (Not usable for data and EC).
|
||||
/// Returns true if the module is reserved (Not usable for data and EC).
|
||||
fn is_reserved(&self, x: u8, y: u8) -> bool {
|
||||
self.is_alignment(x, y)
|
||||
|| self.is_finder(x, y)
|
||||
|
@ -793,13 +793,14 @@ impl QrImage<'_> {
|
|||
|| self.is_version_info(x, y)
|
||||
}
|
||||
|
||||
// Last module to draw, at bottom left corner.
|
||||
/// Last module to draw, at bottom left corner.
|
||||
fn is_last(&self, x: u8, y: u8) -> bool {
|
||||
x == 0 && y == self.width - 1
|
||||
}
|
||||
|
||||
// Move to the next module according to QR code order.
|
||||
// From bottom right corner, to bottom left corner.
|
||||
/// Move to the next module according to QR code order.
|
||||
///
|
||||
/// From bottom right corner, to bottom left corner.
|
||||
fn next(&self, x: u8, y: u8) -> (u8, u8) {
|
||||
let x_adj = if x <= 6 { x + 1 } else { x };
|
||||
let column_type = (self.width - x_adj) % 4;
|
||||
|
@ -812,7 +813,7 @@ impl QrImage<'_> {
|
|||
}
|
||||
}
|
||||
|
||||
// Find next module that can hold data.
|
||||
/// Find next module that can hold data.
|
||||
fn next_available(&self, x: u8, y: u8) -> (u8, u8) {
|
||||
let (mut x, mut y) = self.next(x, y);
|
||||
while self.is_reserved(x, y) && !self.is_last(x, y) {
|
||||
|
@ -841,7 +842,7 @@ impl QrImage<'_> {
|
|||
}
|
||||
}
|
||||
|
||||
// Apply checkerboard mask to all non-reserved modules.
|
||||
/// Apply checkerboard mask to all non-reserved modules.
|
||||
fn apply_mask(&mut self) {
|
||||
for x in 0..self.width {
|
||||
for y in 0..self.width {
|
||||
|
@ -852,7 +853,7 @@ impl QrImage<'_> {
|
|||
}
|
||||
}
|
||||
|
||||
// Draw the QR code with the provided data iterator.
|
||||
/// Draw the QR code with the provided data iterator.
|
||||
fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
|
||||
// First clear the table, as it may have already some data.
|
||||
self.clear();
|
||||
|
|
|
@ -39,7 +39,7 @@ int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
|
|||
|
||||
int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
|
||||
{
|
||||
if (!obj->import_attach) {
|
||||
if (!drm_gem_is_imported(obj)) {
|
||||
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
||||
|
||||
mutex_lock(&etnaviv_obj->lock);
|
||||
|
@ -51,7 +51,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
|
|||
|
||||
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
|
||||
{
|
||||
if (!obj->import_attach) {
|
||||
if (!drm_gem_is_imported(obj)) {
|
||||
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
||||
|
||||
mutex_lock(&etnaviv_obj->lock);
|
||||
|
@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
|
|||
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
|
||||
|
||||
if (etnaviv_obj->vaddr)
|
||||
dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
|
||||
dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map);
|
||||
|
||||
/* Don't drop the pages for imported dmabuf, as they are not
|
||||
* ours, just free the array we allocated:
|
||||
|
@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
|
|||
|
||||
lockdep_assert_held(&etnaviv_obj->lock);
|
||||
|
||||
ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
|
||||
ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map);
|
||||
if (ret)
|
||||
return NULL;
|
||||
return map.vaddr;
|
||||
|
|
|
@ -379,11 +379,11 @@ static int exynos_mic_probe(struct platform_device *pdev)
|
|||
struct resource res;
|
||||
int ret, i;
|
||||
|
||||
mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL);
|
||||
if (!mic) {
|
||||
mic = devm_drm_bridge_alloc(dev, struct exynos_mic, bridge, &mic_bridge_funcs);
|
||||
if (IS_ERR(mic)) {
|
||||
DRM_DEV_ERROR(dev,
|
||||
"mic: Failed to allocate memory for MIC object\n");
|
||||
ret = -ENOMEM;
|
||||
ret = PTR_ERR(mic);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -421,7 +421,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, mic);
|
||||
|
||||
mic->bridge.funcs = &mic_bridge_funcs;
|
||||
mic->bridge.of_node = dev->of_node;
|
||||
|
||||
drm_bridge_add(&mic->bridge);
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o \
|
||||
dp/dp_aux.o dp/dp_link.o dp/dp_hw.o hibmc_drm_dp.o
|
||||
dp/dp_aux.o dp/dp_link.o dp/dp_hw.o dp/dp_serdes.o hibmc_drm_dp.o \
|
||||
hibmc_drm_debugfs.o
|
||||
|
||||
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <drm/drm_print.h>
|
||||
#include "dp_comm.h"
|
||||
#include "dp_reg.h"
|
||||
#include "dp_hw.h"
|
||||
|
||||
#define HIBMC_AUX_CMD_REQ_LEN GENMASK(7, 4)
|
||||
#define HIBMC_AUX_CMD_ADDR GENMASK(27, 8)
|
||||
|
@ -124,7 +125,8 @@ static int hibmc_dp_aux_parse_xfer(struct hibmc_dp_dev *dp, struct drm_dp_aux_ms
|
|||
/* ret >= 0 ,ret is size; ret < 0, ret is err code */
|
||||
static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
||||
{
|
||||
struct hibmc_dp_dev *dp = container_of(aux, struct hibmc_dp_dev, aux);
|
||||
struct hibmc_dp *dp_priv = container_of(aux, struct hibmc_dp, aux);
|
||||
struct hibmc_dp_dev *dp = dp_priv->dp_dev;
|
||||
u32 aux_cmd;
|
||||
int ret;
|
||||
u32 val; /* val will be assigned at the beginning of readl_poll_timeout function */
|
||||
|
@ -151,14 +153,16 @@ static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *
|
|||
return hibmc_dp_aux_parse_xfer(dp, msg);
|
||||
}
|
||||
|
||||
void hibmc_dp_aux_init(struct hibmc_dp_dev *dp)
|
||||
void hibmc_dp_aux_init(struct hibmc_dp *dp)
|
||||
{
|
||||
hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
|
||||
hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
|
||||
hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
|
||||
hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
|
||||
hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
|
||||
hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
|
||||
HIBMC_DP_MIN_PULSE_NUM);
|
||||
|
||||
dp->aux.transfer = hibmc_dp_aux_xfer;
|
||||
dp->aux.is_remote = 0;
|
||||
dp->aux.name = "HIBMC DRM dp aux";
|
||||
dp->aux.drm_dev = dp->drm_dev;
|
||||
drm_dp_aux_init(&dp->aux);
|
||||
dp->dp_dev->aux = &dp->aux;
|
||||
}
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
#include <linux/io.h>
|
||||
#include <drm/display/drm_dp_helper.h>
|
||||
|
||||
#include "dp_hw.h"
|
||||
|
||||
#define HIBMC_DP_LANE_NUM_MAX 2
|
||||
|
||||
struct hibmc_link_status {
|
||||
|
@ -32,12 +34,13 @@ struct hibmc_dp_link {
|
|||
};
|
||||
|
||||
struct hibmc_dp_dev {
|
||||
struct drm_dp_aux aux;
|
||||
struct drm_dp_aux *aux;
|
||||
struct drm_device *dev;
|
||||
void __iomem *base;
|
||||
struct mutex lock; /* protects concurrent RW in hibmc_dp_reg_write_field() */
|
||||
struct hibmc_dp_link link;
|
||||
u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
void __iomem *serdes_base;
|
||||
};
|
||||
|
||||
#define dp_field_modify(reg_value, mask, val) \
|
||||
|
@ -57,7 +60,10 @@ struct hibmc_dp_dev {
|
|||
mutex_unlock(&_dp->lock); \
|
||||
} while (0)
|
||||
|
||||
void hibmc_dp_aux_init(struct hibmc_dp_dev *dp);
|
||||
void hibmc_dp_aux_init(struct hibmc_dp *dp);
|
||||
int hibmc_dp_link_training(struct hibmc_dp_dev *dp);
|
||||
int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp);
|
||||
int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp);
|
||||
int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX]);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -15,5 +15,7 @@
|
|||
#define HIBMC_DP_CLK_EN 0x7
|
||||
#define HIBMC_DP_SYNC_EN_MASK 0x3
|
||||
#define HIBMC_DP_LINK_RATE_CAL 27
|
||||
#define HIBMC_DP_SYNC_DELAY(lanes) ((lanes) == 0x2 ? 86 : 46)
|
||||
#define HIBMC_DP_INT_ENABLE 0xc
|
||||
|
||||
#endif
|
||||
|
|
|
@ -72,6 +72,9 @@ static void hibmc_dp_set_sst(struct hibmc_dp_dev *dp, struct drm_display_mode *m
|
|||
HIBMC_DP_CFG_STREAM_HTOTAL_SIZE, htotal_size);
|
||||
hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
|
||||
HIBMC_DP_CFG_STREAM_HBLANK_SIZE, hblank_size);
|
||||
hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
|
||||
HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION,
|
||||
HIBMC_DP_SYNC_DELAY(dp->link.cap.lanes));
|
||||
}
|
||||
|
||||
static void hibmc_dp_link_cfg(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
|
||||
|
@ -151,6 +154,7 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
|
|||
{
|
||||
struct drm_device *drm_dev = dp->drm_dev;
|
||||
struct hibmc_dp_dev *dp_dev;
|
||||
int ret;
|
||||
|
||||
dp_dev = devm_kzalloc(drm_dev->dev, sizeof(struct hibmc_dp_dev), GFP_KERNEL);
|
||||
if (!dp_dev)
|
||||
|
@ -163,10 +167,14 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
|
|||
dp_dev->dev = drm_dev;
|
||||
dp_dev->base = dp->mmio + HIBMC_DP_OFFSET;
|
||||
|
||||
hibmc_dp_aux_init(dp_dev);
|
||||
hibmc_dp_aux_init(dp);
|
||||
|
||||
ret = hibmc_dp_serdes_init(dp_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dp_dev->link.cap.lanes = 0x2;
|
||||
dp_dev->link.cap.link_rate = DP_LINK_BW_2_7;
|
||||
dp_dev->link.cap.link_rate = DP_LINK_BW_8_1;
|
||||
|
||||
/* hdcp data */
|
||||
writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
|
||||
|
@ -181,6 +189,36 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void hibmc_dp_enable_int(struct hibmc_dp *dp)
|
||||
{
|
||||
struct hibmc_dp_dev *dp_dev = dp->dp_dev;
|
||||
|
||||
writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
|
||||
}
|
||||
|
||||
void hibmc_dp_disable_int(struct hibmc_dp *dp)
|
||||
{
|
||||
struct hibmc_dp_dev *dp_dev = dp->dp_dev;
|
||||
|
||||
writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
|
||||
writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
|
||||
}
|
||||
|
||||
void hibmc_dp_hpd_cfg(struct hibmc_dp *dp)
|
||||
{
|
||||
struct hibmc_dp_dev *dp_dev = dp->dp_dev;
|
||||
|
||||
hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
|
||||
hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
|
||||
hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM, 0x9);
|
||||
writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
|
||||
writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
|
||||
writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
|
||||
writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
|
||||
writel(HIBMC_DP_DPTX_RST, dp_dev->base + HIBMC_DP_DPTX_RST_CTRL);
|
||||
writel(HIBMC_DP_CLK_EN, dp_dev->base + HIBMC_DP_DPTX_CLK_CTRL);
|
||||
}
|
||||
|
||||
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable)
|
||||
{
|
||||
struct hibmc_dp_dev *dp_dev = dp->dp_dev;
|
||||
|
@ -218,3 +256,52 @@ int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hibmc_dp_reset_link(struct hibmc_dp *dp)
|
||||
{
|
||||
dp->dp_dev->link.status.clock_recovered = false;
|
||||
dp->dp_dev->link.status.channel_equalized = false;
|
||||
}
|
||||
|
||||
static const struct hibmc_dp_color_raw g_rgb_raw[] = {
|
||||
{CBAR_COLOR_BAR, 0x000, 0x000, 0x000},
|
||||
{CBAR_WHITE, 0xfff, 0xfff, 0xfff},
|
||||
{CBAR_RED, 0xfff, 0x000, 0x000},
|
||||
{CBAR_ORANGE, 0xfff, 0x800, 0x000},
|
||||
{CBAR_YELLOW, 0xfff, 0xfff, 0x000},
|
||||
{CBAR_GREEN, 0x000, 0xfff, 0x000},
|
||||
{CBAR_CYAN, 0x000, 0x800, 0x800},
|
||||
{CBAR_BLUE, 0x000, 0x000, 0xfff},
|
||||
{CBAR_PURPLE, 0x800, 0x000, 0x800},
|
||||
{CBAR_BLACK, 0x000, 0x000, 0x000},
|
||||
};
|
||||
|
||||
void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg)
|
||||
{
|
||||
struct hibmc_dp_dev *dp_dev = dp->dp_dev;
|
||||
struct hibmc_dp_color_raw raw_data;
|
||||
|
||||
if (cfg->enable) {
|
||||
hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(9),
|
||||
cfg->self_timing);
|
||||
hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(8, 1),
|
||||
cfg->dynamic_rate);
|
||||
if (cfg->pattern == CBAR_COLOR_BAR) {
|
||||
hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 0);
|
||||
} else {
|
||||
raw_data = g_rgb_raw[cfg->pattern];
|
||||
drm_dbg_dp(dp->drm_dev, "r:%x g:%x b:%x\n", raw_data.r_value,
|
||||
raw_data.g_value, raw_data.b_value);
|
||||
hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 1);
|
||||
hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(23, 12),
|
||||
raw_data.r_value);
|
||||
hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(23, 12),
|
||||
raw_data.g_value);
|
||||
hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(11, 0),
|
||||
raw_data.b_value);
|
||||
}
|
||||
}
|
||||
|
||||
hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(0), cfg->enable);
|
||||
writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
|
||||
}
|
||||
|
|
|
@ -10,19 +10,55 @@
|
|||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_connector.h>
|
||||
#include <drm/drm_print.h>
|
||||
#include <drm/display/drm_dp_helper.h>
|
||||
|
||||
struct hibmc_dp_dev;
|
||||
|
||||
enum hibmc_dp_cbar_pattern {
|
||||
CBAR_COLOR_BAR,
|
||||
CBAR_WHITE,
|
||||
CBAR_RED,
|
||||
CBAR_ORANGE,
|
||||
CBAR_YELLOW,
|
||||
CBAR_GREEN,
|
||||
CBAR_CYAN,
|
||||
CBAR_BLUE,
|
||||
CBAR_PURPLE,
|
||||
CBAR_BLACK,
|
||||
};
|
||||
|
||||
struct hibmc_dp_color_raw {
|
||||
enum hibmc_dp_cbar_pattern pattern;
|
||||
u32 r_value;
|
||||
u32 g_value;
|
||||
u32 b_value;
|
||||
};
|
||||
|
||||
struct hibmc_dp_cbar_cfg {
|
||||
u8 enable;
|
||||
u8 self_timing;
|
||||
u8 dynamic_rate; /* 0:static, 1-255(frame):dynamic */
|
||||
enum hibmc_dp_cbar_pattern pattern;
|
||||
};
|
||||
|
||||
struct hibmc_dp {
|
||||
struct hibmc_dp_dev *dp_dev;
|
||||
struct drm_device *drm_dev;
|
||||
struct drm_encoder encoder;
|
||||
struct drm_connector connector;
|
||||
void __iomem *mmio;
|
||||
struct drm_dp_aux aux;
|
||||
struct hibmc_dp_cbar_cfg cfg;
|
||||
u32 irq_status;
|
||||
};
|
||||
|
||||
int hibmc_dp_hw_init(struct hibmc_dp *dp);
|
||||
int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode);
|
||||
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable);
|
||||
void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg);
|
||||
void hibmc_dp_reset_link(struct hibmc_dp *dp);
|
||||
void hibmc_dp_hpd_cfg(struct hibmc_dp *dp);
|
||||
void hibmc_dp_enable_int(struct hibmc_dp *dp);
|
||||
void hibmc_dp_disable_int(struct hibmc_dp *dp);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -9,6 +9,22 @@
|
|||
|
||||
#define HIBMC_EQ_MAX_RETRY 5
|
||||
|
||||
static inline int hibmc_dp_get_serdes_rate_cfg(struct hibmc_dp_dev *dp)
|
||||
{
|
||||
switch (dp->link.cap.link_rate) {
|
||||
case DP_LINK_BW_1_62:
|
||||
return DP_SERDES_BW_1_62;
|
||||
case DP_LINK_BW_2_7:
|
||||
return DP_SERDES_BW_2_7;
|
||||
case DP_LINK_BW_5_4:
|
||||
return DP_SERDES_BW_5_4;
|
||||
case DP_LINK_BW_8_1:
|
||||
return DP_SERDES_BW_8_1;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
|
||||
{
|
||||
u8 buf[2];
|
||||
|
@ -26,7 +42,7 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
|
|||
/* set rate and lane count */
|
||||
buf[0] = dp->link.cap.link_rate;
|
||||
buf[1] = DP_LANE_COUNT_ENHANCED_FRAME_EN | dp->link.cap.lanes;
|
||||
ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
|
||||
ret = drm_dp_dpcd_write(dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
|
||||
if (ret != sizeof(buf)) {
|
||||
drm_dbg_dp(dp->dev, "dp aux write link rate and lanes failed, ret: %d\n", ret);
|
||||
return ret >= 0 ? -EIO : ret;
|
||||
|
@ -35,17 +51,13 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
|
|||
/* set 8b/10b and downspread */
|
||||
buf[0] = DP_SPREAD_AMP_0_5;
|
||||
buf[1] = DP_SET_ANSI_8B10B;
|
||||
ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
|
||||
ret = drm_dp_dpcd_write(dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
|
||||
if (ret != sizeof(buf)) {
|
||||
drm_dbg_dp(dp->dev, "dp aux write 8b/10b and downspread failed, ret: %d\n", ret);
|
||||
return ret >= 0 ? -EIO : ret;
|
||||
}
|
||||
|
||||
ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd);
|
||||
if (ret)
|
||||
drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
|
||||
|
@ -84,7 +96,7 @@ static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
|
|||
|
||||
hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_PAT_SEL, val);
|
||||
|
||||
ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
|
||||
ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
|
||||
if (ret != sizeof(buf)) {
|
||||
drm_dbg_dp(dp->dev, "dp aux write training pattern set failed\n");
|
||||
return ret >= 0 ? -EIO : ret;
|
||||
|
@ -108,9 +120,13 @@ static int hibmc_dp_link_training_cr_pre(struct hibmc_dp_dev *dp)
|
|||
return ret;
|
||||
|
||||
for (i = 0; i < dp->link.cap.lanes; i++)
|
||||
train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
|
||||
train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
|
||||
|
||||
ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
|
||||
ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
|
||||
if (ret != dp->link.cap.lanes) {
|
||||
drm_dbg_dp(dp->dev, "dp aux write training lane set failed\n");
|
||||
return ret >= 0 ? -EIO : ret;
|
||||
|
@ -137,21 +153,29 @@ static bool hibmc_dp_link_get_adjust_train(struct hibmc_dp_dev *dp,
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
|
||||
static int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (dp->link.cap.link_rate) {
|
||||
case DP_LINK_BW_2_7:
|
||||
dp->link.cap.link_rate = DP_LINK_BW_1_62;
|
||||
return 0;
|
||||
break;
|
||||
case DP_LINK_BW_5_4:
|
||||
dp->link.cap.link_rate = DP_LINK_BW_2_7;
|
||||
return 0;
|
||||
break;
|
||||
case DP_LINK_BW_8_1:
|
||||
dp->link.cap.link_rate = DP_LINK_BW_5_4;
|
||||
return 0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = hibmc_dp_get_serdes_rate_cfg(dp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return hibmc_dp_serdes_rate_switch(ret, dp);
|
||||
}
|
||||
|
||||
static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
|
||||
|
@ -159,6 +183,7 @@ static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
|
|||
switch (dp->link.cap.lanes) {
|
||||
case 0x2:
|
||||
dp->link.cap.lanes--;
|
||||
drm_dbg_dp(dp->dev, "dp link training reduce to 1 lane\n");
|
||||
break;
|
||||
case 0x1:
|
||||
drm_err(dp->dev, "dp link training reduce lane failed, already reach minimum\n");
|
||||
|
@ -185,9 +210,9 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
|
|||
|
||||
voltage_tries = 1;
|
||||
for (cr_tries = 0; cr_tries < 80; cr_tries++) {
|
||||
drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
|
||||
drm_dp_link_train_clock_recovery_delay(dp->aux, dp->dpcd);
|
||||
|
||||
ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
|
||||
ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
|
||||
if (ret) {
|
||||
drm_err(dp->dev, "Get lane status failed\n");
|
||||
return ret;
|
||||
|
@ -206,7 +231,12 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
|
|||
}
|
||||
|
||||
level_changed = hibmc_dp_link_get_adjust_train(dp, lane_status);
|
||||
ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
|
||||
|
||||
ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
|
||||
dp->link.cap.lanes);
|
||||
if (ret != dp->link.cap.lanes) {
|
||||
drm_dbg_dp(dp->dev, "Update link training failed\n");
|
||||
|
@ -233,9 +263,9 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
|
|||
return ret;
|
||||
|
||||
for (eq_tries = 0; eq_tries < HIBMC_EQ_MAX_RETRY; eq_tries++) {
|
||||
drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
|
||||
drm_dp_link_train_channel_eq_delay(dp->aux, dp->dpcd);
|
||||
|
||||
ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
|
||||
ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
|
||||
if (ret) {
|
||||
drm_err(dp->dev, "get lane status failed\n");
|
||||
break;
|
||||
|
@ -255,7 +285,12 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
|
|||
}
|
||||
|
||||
hibmc_dp_link_get_adjust_train(dp, lane_status);
|
||||
ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
|
||||
|
||||
ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET,
|
||||
dp->link.train_set, dp->link.cap.lanes);
|
||||
if (ret != dp->link.cap.lanes) {
|
||||
drm_dbg_dp(dp->dev, "Update link training failed\n");
|
||||
|
@ -295,6 +330,21 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
|
|||
struct hibmc_dp_link *link = &dp->link;
|
||||
int ret;
|
||||
|
||||
ret = drm_dp_read_dpcd_caps(dp->aux, dp->dpcd);
|
||||
if (ret)
|
||||
drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
|
||||
|
||||
dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
|
||||
dp->link.cap.lanes = 0x2;
|
||||
|
||||
ret = hibmc_dp_get_serdes_rate_cfg(dp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = hibmc_dp_serdes_rate_switch(ret, dp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
while (true) {
|
||||
ret = hibmc_dp_link_training_cr_pre(dp);
|
||||
if (ret)
|
||||
|
|
|
@ -5,72 +5,128 @@
|
|||
#define DP_REG_H
|
||||
|
||||
#define HIBMC_DP_AUX_CMD_ADDR 0x50
|
||||
|
||||
#define HIBMC_DP_AUX_WR_DATA0 0x54
|
||||
#define HIBMC_DP_AUX_WR_DATA1 0x58
|
||||
#define HIBMC_DP_AUX_WR_DATA2 0x5c
|
||||
#define HIBMC_DP_AUX_WR_DATA3 0x60
|
||||
#define HIBMC_DP_AUX_RD_DATA0 0x64
|
||||
#define HIBMC_DP_AUX_REQ 0x74
|
||||
#define HIBMC_DP_AUX_STATUS 0x78
|
||||
#define HIBMC_DP_PHYIF_CTRL0 0xa0
|
||||
#define HIBMC_DP_VIDEO_CTRL 0x100
|
||||
#define HIBMC_DP_VIDEO_CONFIG0 0x104
|
||||
#define HIBMC_DP_VIDEO_CONFIG1 0x108
|
||||
#define HIBMC_DP_VIDEO_CONFIG2 0x10c
|
||||
#define HIBMC_DP_VIDEO_CONFIG3 0x110
|
||||
#define HIBMC_DP_VIDEO_PACKET 0x114
|
||||
#define HIBMC_DP_VIDEO_MSA0 0x118
|
||||
#define HIBMC_DP_VIDEO_MSA1 0x11c
|
||||
#define HIBMC_DP_VIDEO_MSA2 0x120
|
||||
#define HIBMC_DP_VIDEO_HORIZONTAL_SIZE 0X124
|
||||
#define HIBMC_DP_TIMING_GEN_CONFIG0 0x26c
|
||||
#define HIBMC_DP_TIMING_GEN_CONFIG2 0x274
|
||||
#define HIBMC_DP_TIMING_GEN_CONFIG3 0x278
|
||||
#define HIBMC_DP_HDCP_CFG 0x600
|
||||
#define HIBMC_DP_DPTX_RST_CTRL 0x700
|
||||
#define HIBMC_DP_DPTX_CLK_CTRL 0x704
|
||||
#define HIBMC_DP_DPTX_GCTL0 0x708
|
||||
#define HIBMC_DP_INTR_ENABLE 0x720
|
||||
#define HIBMC_DP_INTR_ORIGINAL_STATUS 0x728
|
||||
#define HIBMC_DP_TIMING_MODEL_CTRL 0x884
|
||||
#define HIBMC_DP_TIMING_SYNC_CTRL 0xFF0
|
||||
|
||||
#define HIBMC_DP_AUX_REQ 0x74
|
||||
#define HIBMC_DP_CFG_AUX_REQ BIT(0)
|
||||
#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
|
||||
#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
|
||||
#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
|
||||
#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
|
||||
#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
|
||||
#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
|
||||
#define HIBMC_DP_CFG_AUX_REQ BIT(0)
|
||||
#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
|
||||
|
||||
#define HIBMC_DP_AUX_STATUS 0x78
|
||||
#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
|
||||
#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
|
||||
#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
|
||||
#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
|
||||
#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
|
||||
|
||||
#define HIBMC_DP_PHYIF_CTRL0 0xa0
|
||||
#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
|
||||
#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
|
||||
#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
|
||||
#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
|
||||
#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
|
||||
#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
|
||||
#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
|
||||
#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
|
||||
#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
|
||||
#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
|
||||
#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
|
||||
#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
|
||||
|
||||
#define HIBMC_DP_VIDEO_CTRL 0x100
|
||||
#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
|
||||
#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
|
||||
#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
|
||||
#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
|
||||
#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
|
||||
|
||||
#define HIBMC_DP_VIDEO_CONFIG0 0x104
|
||||
#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
|
||||
|
||||
#define HIBMC_DP_VIDEO_CONFIG1 0x108
|
||||
#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
|
||||
|
||||
#define HIBMC_DP_VIDEO_CONFIG2 0x10c
|
||||
#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
|
||||
|
||||
#define HIBMC_DP_VIDEO_CONFIG3 0x110
|
||||
#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
|
||||
#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
|
||||
|
||||
#define HIBMC_DP_VIDEO_PACKET 0x114
|
||||
#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
|
||||
#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
|
||||
#define HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION GENMASK(31, 20)
|
||||
|
||||
#define HIBMC_DP_VIDEO_MSA0 0x118
|
||||
#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
|
||||
|
||||
#define HIBMC_DP_VIDEO_MSA1 0x11c
|
||||
#define HIBMC_DP_VIDEO_MSA2 0x120
|
||||
|
||||
#define HIBMC_DP_VIDEO_HORIZONTAL_SIZE 0X124
|
||||
#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
|
||||
|
||||
#define HIBMC_DP_COLOR_BAR_CTRL 0x260
|
||||
#define HIBMC_DP_COLOR_BAR_CTRL1 0x264
|
||||
|
||||
#define HIBMC_DP_TIMING_GEN_CONFIG0 0x26c
|
||||
#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
|
||||
|
||||
#define HIBMC_DP_TIMING_GEN_CONFIG2 0x274
|
||||
#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
|
||||
#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
|
||||
|
||||
#define HIBMC_DP_TIMING_GEN_CONFIG3 0x278
|
||||
#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
|
||||
|
||||
#define HIBMC_DP_HDCP_CFG 0x600
|
||||
|
||||
#define HIBMC_DP_DPTX_RST_CTRL 0x700
|
||||
#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
|
||||
|
||||
#define HIBMC_DP_DPTX_CLK_CTRL 0x704
|
||||
|
||||
#define HIBMC_DP_DPTX_GCTL0 0x708
|
||||
#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
|
||||
|
||||
#define HIBMC_DP_INTR_ENABLE 0x720
|
||||
#define HIBMC_DP_INTR_ORIGINAL_STATUS 0x728
|
||||
|
||||
#define HIBMC_DP_TIMING_MODEL_CTRL 0x884
|
||||
#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
|
||||
|
||||
#define HIBMC_DP_TIMING_SYNC_CTRL 0xFF0
|
||||
|
||||
#define HIBMC_DP_INTSTAT 0x1e0724
|
||||
#define HIBMC_DP_INTCLR 0x1e0728
|
||||
|
||||
/* dp serdes reg */
|
||||
#define HIBMC_DP_HOST_OFFSET 0x10000
|
||||
#define HIBMC_DP_LANE0_RATE_OFFSET 0x4
|
||||
#define HIBMC_DP_LANE1_RATE_OFFSET 0xc
|
||||
#define HIBMC_DP_LANE_STATUS_OFFSET 0x10
|
||||
#define HIBMC_DP_PMA_LANE0_OFFSET 0x18
|
||||
#define HIBMC_DP_PMA_LANE1_OFFSET 0x1c
|
||||
#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
|
||||
#define HIBMC_DP_PMA_TXDEEMPH GENMASK(18, 1)
|
||||
#define DP_SERDES_DONE 0x3
|
||||
|
||||
/* dp serdes TX-Deempth Configuration */
|
||||
#define DP_SERDES_VOL0_PRE0 0x280
|
||||
#define DP_SERDES_VOL0_PRE1 0x2300
|
||||
#define DP_SERDES_VOL0_PRE2 0x53c0
|
||||
#define DP_SERDES_VOL0_PRE3 0x8400
|
||||
#define DP_SERDES_VOL1_PRE0 0x380
|
||||
#define DP_SERDES_VOL1_PRE1 0x3440
|
||||
#define DP_SERDES_VOL1_PRE2 0x6480
|
||||
#define DP_SERDES_VOL2_PRE0 0x4c1
|
||||
#define DP_SERDES_VOL2_PRE1 0x4500
|
||||
#define DP_SERDES_VOL3_PRE0 0x600
|
||||
#define DP_SERDES_BW_8_1 0x3
|
||||
#define DP_SERDES_BW_5_4 0x2
|
||||
#define DP_SERDES_BW_2_7 0x1
|
||||
#define DP_SERDES_BW_1_62 0x0
|
||||
|
||||
#endif
|
||||
|
|
71
drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
Normal file
71
drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
Normal file
|
@ -0,0 +1,71 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
// Copyright (c) 2025 Hisilicon Limited.
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_print.h>
|
||||
#include "dp_comm.h"
|
||||
#include "dp_config.h"
|
||||
#include "dp_reg.h"
|
||||
|
||||
int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX])
|
||||
{
|
||||
static const u32 serdes_tx_cfg[4][4] = { {DP_SERDES_VOL0_PRE0, DP_SERDES_VOL0_PRE1,
|
||||
DP_SERDES_VOL0_PRE2, DP_SERDES_VOL0_PRE3},
|
||||
{DP_SERDES_VOL1_PRE0, DP_SERDES_VOL1_PRE1,
|
||||
DP_SERDES_VOL1_PRE2}, {DP_SERDES_VOL2_PRE0,
|
||||
DP_SERDES_VOL2_PRE1}, {DP_SERDES_VOL3_PRE0}};
|
||||
int cfg[2];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < HIBMC_DP_LANE_NUM_MAX; i++) {
|
||||
cfg[i] = serdes_tx_cfg[FIELD_GET(DP_TRAIN_VOLTAGE_SWING_MASK, train_set[i])]
|
||||
[FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK, train_set[i])];
|
||||
if (!cfg[i])
|
||||
return -EINVAL;
|
||||
|
||||
/* lane1 offset is 4 */
|
||||
writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, cfg[i]),
|
||||
dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET + i * 4);
|
||||
}
|
||||
|
||||
usleep_range(300, 500);
|
||||
|
||||
if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
|
||||
drm_dbg_dp(dp->dev, "dp serdes cfg failed\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp)
|
||||
{
|
||||
writel(rate, dp->serdes_base + HIBMC_DP_LANE0_RATE_OFFSET);
|
||||
writel(rate, dp->serdes_base + HIBMC_DP_LANE1_RATE_OFFSET);
|
||||
|
||||
usleep_range(300, 500);
|
||||
|
||||
if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
|
||||
drm_dbg_dp(dp->dev, "dp serdes rate switching failed\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (rate < DP_SERDES_BW_8_1)
|
||||
drm_dbg_dp(dp->dev, "reducing serdes rate to :%d\n",
|
||||
rate ? rate * HIBMC_DP_LINK_RATE_CAL * 10 : 162);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp)
|
||||
{
|
||||
dp->serdes_base = dp->base + HIBMC_DP_HOST_OFFSET;
|
||||
|
||||
writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
|
||||
dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET);
|
||||
writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
|
||||
dp->serdes_base + HIBMC_DP_PMA_LANE1_OFFSET);
|
||||
|
||||
return hibmc_dp_serdes_rate_switch(DP_SERDES_BW_8_1, dp);
|
||||
}
|
104
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
Normal file
104
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
Normal file
|
@ -0,0 +1,104 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
// Copyright (c) 2024 Hisilicon Limited.
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_debugfs.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
||||
#include "hibmc_drm_drv.h"
|
||||
|
||||
#define MAX_BUF_SIZE 12
|
||||
|
||||
static ssize_t hibmc_control_write(struct file *file, const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct hibmc_drm_private *priv = file_inode(file)->i_private;
|
||||
struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
|
||||
int ret, idx;
|
||||
u8 buf[MAX_BUF_SIZE];
|
||||
|
||||
if (count >= MAX_BUF_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, user_buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
buf[count] = '\0';
|
||||
|
||||
/* Only 4 parameters is allowed, the ranger are as follow:
|
||||
* [0] enable/disable colorbar feature
|
||||
0: enable colorbar, 1: disable colorbar
|
||||
* [1] the timing source of colorbar displaying
|
||||
0: timing follows XDP, 1: internal self timing
|
||||
* [2] the movment of colorbar displaying
|
||||
0: static colorbar image,
|
||||
* 1~255: right shifting a type of color per (1~255)frames
|
||||
* [3] the color type of colorbar displaying
|
||||
0~9: color bar, white, red, orange,
|
||||
* yellow, green, cyan, bule, pupper, black
|
||||
*/
|
||||
if (sscanf(buf, "%hhu %hhu %hhu %u", &cfg->enable, &cfg->self_timing,
|
||||
&cfg->dynamic_rate, &cfg->pattern) != 4) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cfg->pattern > 9 || cfg->enable > 1 || cfg->self_timing > 1)
|
||||
return -EINVAL;
|
||||
|
||||
ret = drm_dev_enter(&priv->dev, &idx);
|
||||
if (!ret)
|
||||
return -ENODEV;
|
||||
|
||||
hibmc_dp_set_cbar(&priv->dp, cfg);
|
||||
|
||||
drm_dev_exit(idx);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int hibmc_dp_dbgfs_show(struct seq_file *m, void *arg)
|
||||
{
|
||||
struct hibmc_drm_private *priv = m->private;
|
||||
struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
|
||||
int idx;
|
||||
|
||||
if (!drm_dev_enter(&priv->dev, &idx))
|
||||
return -ENODEV;
|
||||
|
||||
seq_printf(m, "hibmc dp colorbar cfg: %u %u %u %u\n", cfg->enable, cfg->self_timing,
|
||||
cfg->dynamic_rate, cfg->pattern);
|
||||
|
||||
drm_dev_exit(idx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hibmc_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return single_open(filp, hibmc_dp_dbgfs_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations hibmc_dbg_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.write = hibmc_control_write,
|
||||
.read = seq_read,
|
||||
.open = hibmc_open,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
|
||||
|
||||
/* create the file in drm directory, so we don't need to remove manually */
|
||||
debugfs_create_file("colorbar-cfg", 0200,
|
||||
root, priv, &hibmc_dbg_fops);
|
||||
}
|
|
@ -13,27 +13,64 @@
|
|||
#include "hibmc_drm_drv.h"
|
||||
#include "dp/dp_hw.h"
|
||||
|
||||
#define DP_MASKED_SINK_HPD_PLUG_INT BIT(2)
|
||||
|
||||
static int hibmc_dp_connector_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
const struct drm_edid *drm_edid;
|
||||
int count;
|
||||
|
||||
count = drm_add_modes_noedid(connector, connector->dev->mode_config.max_width,
|
||||
connector->dev->mode_config.max_height);
|
||||
drm_set_preferred_mode(connector, 1024, 768); // temporary implementation
|
||||
drm_edid = drm_edid_read(connector);
|
||||
|
||||
drm_edid_connector_update(connector, drm_edid);
|
||||
|
||||
count = drm_edid_connector_add_modes(connector);
|
||||
|
||||
drm_edid_free(drm_edid);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int hibmc_dp_detect(struct drm_connector *connector,
|
||||
struct drm_modeset_acquire_ctx *ctx, bool force)
|
||||
{
|
||||
mdelay(200);
|
||||
|
||||
return drm_connector_helper_detect_from_ddc(connector, ctx, force);
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs hibmc_dp_conn_helper_funcs = {
|
||||
.get_modes = hibmc_dp_connector_get_modes,
|
||||
.detect_ctx = hibmc_dp_detect,
|
||||
};
|
||||
|
||||
static int hibmc_dp_late_register(struct drm_connector *connector)
|
||||
{
|
||||
struct hibmc_dp *dp = to_hibmc_dp(connector);
|
||||
|
||||
hibmc_dp_enable_int(dp);
|
||||
|
||||
return drm_dp_aux_register(&dp->aux);
|
||||
}
|
||||
|
||||
static void hibmc_dp_early_unregister(struct drm_connector *connector)
|
||||
{
|
||||
struct hibmc_dp *dp = to_hibmc_dp(connector);
|
||||
|
||||
drm_dp_aux_unregister(&dp->aux);
|
||||
|
||||
hibmc_dp_disable_int(dp);
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs hibmc_dp_conn_funcs = {
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = drm_connector_cleanup,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
.late_register = hibmc_dp_late_register,
|
||||
.early_unregister = hibmc_dp_early_unregister,
|
||||
.debugfs_init = hibmc_debugfs_init,
|
||||
};
|
||||
|
||||
static inline int hibmc_dp_prepare(struct hibmc_dp *dp, struct drm_display_mode *mode)
|
||||
|
@ -74,6 +111,31 @@ static const struct drm_encoder_helper_funcs hibmc_dp_encoder_helper_funcs = {
|
|||
.atomic_disable = hibmc_dp_encoder_disable,
|
||||
};
|
||||
|
||||
irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *)arg;
|
||||
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
|
||||
int idx;
|
||||
|
||||
if (!drm_dev_enter(dev, &idx))
|
||||
return -ENODEV;
|
||||
|
||||
if (priv->dp.irq_status & DP_MASKED_SINK_HPD_PLUG_INT) {
|
||||
drm_dbg_dp(&priv->dev, "HPD IN isr occur!\n");
|
||||
hibmc_dp_hpd_cfg(&priv->dp);
|
||||
} else {
|
||||
drm_dbg_dp(&priv->dev, "HPD OUT isr occur!\n");
|
||||
hibmc_dp_reset_link(&priv->dp);
|
||||
}
|
||||
|
||||
if (dev->registered)
|
||||
drm_connector_helper_hpd_irq_event(&priv->dp.connector);
|
||||
|
||||
drm_dev_exit(idx);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int hibmc_dp_init(struct hibmc_drm_private *priv)
|
||||
{
|
||||
struct drm_device *dev = &priv->dev;
|
||||
|
@ -103,8 +165,8 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
|
|||
|
||||
drm_encoder_helper_add(encoder, &hibmc_dp_encoder_helper_funcs);
|
||||
|
||||
ret = drm_connector_init(dev, connector, &hibmc_dp_conn_funcs,
|
||||
DRM_MODE_CONNECTOR_DisplayPort);
|
||||
ret = drm_connector_init_with_ddc(dev, connector, &hibmc_dp_conn_funcs,
|
||||
DRM_MODE_CONNECTOR_DisplayPort, &dp->aux.ddc);
|
||||
if (ret) {
|
||||
drm_err(dev, "init dp connector failed: %d\n", ret);
|
||||
return ret;
|
||||
|
@ -114,5 +176,7 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
|
|||
|
||||
drm_connector_attach_encoder(connector, encoder);
|
||||
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -28,12 +28,12 @@
|
|||
#include "hibmc_drm_drv.h"
|
||||
#include "hibmc_drm_regs.h"
|
||||
|
||||
#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
|
||||
#define HIBMC_DP_HOST_SERDES_CTRL_VAL 0x8a00
|
||||
#define HIBMC_DP_HOST_SERDES_CTRL_MASK 0x7ffff
|
||||
#include "dp/dp_reg.h"
|
||||
|
||||
DEFINE_DRM_GEM_FOPS(hibmc_fops);
|
||||
|
||||
static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" };
|
||||
|
||||
static irqreturn_t hibmc_interrupt(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *)arg;
|
||||
|
@ -51,6 +51,22 @@ static irqreturn_t hibmc_interrupt(int irq, void *arg)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t hibmc_dp_interrupt(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *)arg;
|
||||
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
|
||||
u32 status;
|
||||
|
||||
status = readl(priv->mmio + HIBMC_DP_INTSTAT);
|
||||
if (status) {
|
||||
priv->dp.irq_status = status;
|
||||
writel(status, priv->mmio + HIBMC_DP_INTCLR);
|
||||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
|
@ -121,9 +137,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* if DP existed, init DP */
|
||||
if ((readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL) &
|
||||
HIBMC_DP_HOST_SERDES_CTRL_MASK) == HIBMC_DP_HOST_SERDES_CTRL_VAL) {
|
||||
/*
|
||||
* If the serdes reg is readable and is not equal to 0,
|
||||
* DP block exists and initializes it.
|
||||
*/
|
||||
ret = readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL);
|
||||
if (ret) {
|
||||
ret = hibmc_dp_init(priv);
|
||||
if (ret)
|
||||
drm_err(dev, "failed to init dp: %d\n", ret);
|
||||
|
@ -250,15 +269,48 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hibmc_unload(struct drm_device *dev)
|
||||
static void hibmc_unload(struct drm_device *dev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(dev);
|
||||
}
|
||||
|
||||
static int hibmc_msi_init(struct drm_device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
char name[32] = {0};
|
||||
int valid_irq_num;
|
||||
int irq;
|
||||
int ret;
|
||||
|
||||
drm_atomic_helper_shutdown(dev);
|
||||
ret = pci_alloc_irq_vectors(pdev, HIBMC_MIN_VECTORS,
|
||||
HIBMC_MAX_VECTORS, PCI_IRQ_MSI);
|
||||
if (ret < 0) {
|
||||
drm_err(dev, "enabling MSI failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
free_irq(pdev->irq, dev);
|
||||
valid_irq_num = ret;
|
||||
|
||||
pci_disable_msi(to_pci_dev(dev->dev));
|
||||
for (int i = 0; i < valid_irq_num; i++) {
|
||||
snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s",
|
||||
dev->driver->name, pci_name(pdev), g_irqs_names_map[i]);
|
||||
|
||||
irq = pci_irq_vector(pdev, i);
|
||||
|
||||
if (i)
|
||||
/* PCI devices require shared interrupts. */
|
||||
ret = devm_request_threaded_irq(&pdev->dev, irq,
|
||||
hibmc_dp_interrupt,
|
||||
hibmc_dp_hpd_isr,
|
||||
IRQF_SHARED, name, dev);
|
||||
else
|
||||
ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
|
||||
IRQF_SHARED, name, dev);
|
||||
if (ret) {
|
||||
drm_err(dev, "install irq failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -290,15 +342,10 @@ static int hibmc_load(struct drm_device *dev)
|
|||
goto err;
|
||||
}
|
||||
|
||||
ret = pci_enable_msi(pdev);
|
||||
ret = hibmc_msi_init(dev);
|
||||
if (ret) {
|
||||
drm_warn(dev, "enabling MSI failed: %d\n", ret);
|
||||
} else {
|
||||
/* PCI devices require shared interrupts. */
|
||||
ret = request_irq(pdev->irq, hibmc_interrupt, IRQF_SHARED,
|
||||
dev->driver->name, dev);
|
||||
if (ret)
|
||||
drm_warn(dev, "install irq failed: %d\n", ret);
|
||||
drm_err(dev, "hibmc msi init failed, ret:%d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* reset all the states of crtc/plane/encoder/connector */
|
||||
|
@ -374,7 +421,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
|
|||
|
||||
static void hibmc_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
|
||||
hibmc_pci_remove(pdev);
|
||||
}
|
||||
|
||||
static const struct pci_device_id hibmc_pci_table[] = {
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
|
||||
#include "dp/dp_hw.h"
|
||||
|
||||
#define HIBMC_MIN_VECTORS 1
|
||||
#define HIBMC_MAX_VECTORS 2
|
||||
|
||||
struct hibmc_vdac {
|
||||
struct drm_device *dev;
|
||||
struct drm_encoder encoder;
|
||||
|
@ -47,6 +50,11 @@ static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector)
|
|||
return container_of(connector, struct hibmc_vdac, connector);
|
||||
}
|
||||
|
||||
static inline struct hibmc_dp *to_hibmc_dp(struct drm_connector *connector)
|
||||
{
|
||||
return container_of(connector, struct hibmc_dp, connector);
|
||||
}
|
||||
|
||||
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
|
||||
{
|
||||
return container_of(dev, struct hibmc_drm_private, dev);
|
||||
|
@ -64,4 +72,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
|
|||
|
||||
int hibmc_dp_init(struct hibmc_drm_private *priv);
|
||||
|
||||
void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root);
|
||||
|
||||
irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -60,6 +60,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
|
|||
static const struct drm_connector_helper_funcs
|
||||
hibmc_connector_helper_funcs = {
|
||||
.get_modes = hibmc_connector_get_modes,
|
||||
.detect_ctx = drm_connector_helper_detect_from_ddc,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs hibmc_connector_funcs = {
|
||||
|
@ -127,5 +128,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
|
|||
|
||||
drm_connector_attach_encoder(connector, encoder);
|
||||
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -663,7 +663,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
|
|||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
|
||||
if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE)) {
|
||||
if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) &&
|
||||
(intel_dp->edp_dpcd[3] & DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE)) {
|
||||
drm_dbg_kms(display->drm,
|
||||
"[CONNECTOR:%d:%s] AUX Luminance Based Backlight Control Supported!\n",
|
||||
connector->base.base.id, connector->base.name);
|
||||
|
|
|
@ -12,8 +12,10 @@ powervr-y := \
|
|||
pvr_fw.o \
|
||||
pvr_fw_meta.o \
|
||||
pvr_fw_mips.o \
|
||||
pvr_fw_riscv.o \
|
||||
pvr_fw_startstop.o \
|
||||
pvr_fw_trace.o \
|
||||
pvr_fw_util.o \
|
||||
pvr_gem.o \
|
||||
pvr_hwrt.o \
|
||||
pvr_job.o \
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -120,6 +121,21 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int pvr_device_reset_init(struct pvr_device *pvr_dev)
|
||||
{
|
||||
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
|
||||
struct reset_control *reset;
|
||||
|
||||
reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
|
||||
if (IS_ERR(reset))
|
||||
return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
|
||||
"failed to get gpu reset line\n");
|
||||
|
||||
pvr_dev->reset = reset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pvr_device_process_active_queues() - Process all queue related events.
|
||||
* @pvr_dev: PowerVR device to check
|
||||
|
@ -146,9 +162,61 @@ static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
|
|||
mutex_unlock(&pvr_dev->queues.lock);
|
||||
}
|
||||
|
||||
static bool pvr_device_safety_irq_pending(struct pvr_device *pvr_dev)
|
||||
{
|
||||
u32 events;
|
||||
|
||||
WARN_ON_ONCE(!pvr_dev->has_safety_events);
|
||||
|
||||
events = pvr_cr_read32(pvr_dev, ROGUE_CR_EVENT_STATUS);
|
||||
|
||||
return (events & ROGUE_CR_EVENT_STATUS_SAFETY_EN) != 0;
|
||||
}
|
||||
|
||||
static void pvr_device_safety_irq_clear(struct pvr_device *pvr_dev)
|
||||
{
|
||||
WARN_ON_ONCE(!pvr_dev->has_safety_events);
|
||||
|
||||
pvr_cr_write32(pvr_dev, ROGUE_CR_EVENT_CLEAR,
|
||||
ROGUE_CR_EVENT_CLEAR_SAFETY_EN);
|
||||
}
|
||||
|
||||
static void pvr_device_handle_safety_events(struct pvr_device *pvr_dev)
|
||||
{
|
||||
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
|
||||
u32 events;
|
||||
|
||||
WARN_ON_ONCE(!pvr_dev->has_safety_events);
|
||||
|
||||
events = pvr_cr_read32(pvr_dev, ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE);
|
||||
|
||||
/* Handle only these events on the host and leave the rest to the FW. */
|
||||
events &= ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN |
|
||||
ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN;
|
||||
|
||||
pvr_cr_write32(pvr_dev, ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE, events);
|
||||
|
||||
if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN) {
|
||||
u32 fault_fw = pvr_cr_read32(pvr_dev, ROGUE_CR_FAULT_FW_STATUS);
|
||||
|
||||
pvr_cr_write32(pvr_dev, ROGUE_CR_FAULT_FW_CLEAR, fault_fw);
|
||||
|
||||
drm_info(drm_dev, "Safety event: FW fault (mask=0x%08x)\n", fault_fw);
|
||||
}
|
||||
|
||||
if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN) {
|
||||
/*
|
||||
* The watchdog timer is disabled by the driver so this event
|
||||
* should never be fired.
|
||||
*/
|
||||
drm_info(drm_dev, "Safety event: Watchdog timeout\n");
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
|
||||
{
|
||||
struct pvr_device *pvr_dev = data;
|
||||
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
||||
/* We are in the threaded handler, we can keep dequeuing events until we
|
||||
|
@ -164,30 +232,76 @@ static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
|
|||
pvr_device_process_active_queues(pvr_dev);
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(from_pvr_device(pvr_dev)->dev);
|
||||
pm_runtime_mark_last_busy(drm_dev->dev);
|
||||
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* Unmask FW irqs before returning, so new interrupts can be received. */
|
||||
pvr_fw_irq_enable(pvr_dev);
|
||||
if (pvr_dev->has_safety_events) {
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Ensure the GPU is powered on since some safety events (such
|
||||
* as ECC faults) can happen outside of job submissions, which
|
||||
* are otherwise the only time a power reference is held.
|
||||
*/
|
||||
err = pvr_power_get(pvr_dev);
|
||||
if (err) {
|
||||
drm_err_ratelimited(drm_dev,
|
||||
"%s: could not take power reference (%d)\n",
|
||||
__func__, err);
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (pvr_device_safety_irq_pending(pvr_dev)) {
|
||||
pvr_device_safety_irq_clear(pvr_dev);
|
||||
pvr_device_handle_safety_events(pvr_dev);
|
||||
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
pvr_power_put(pvr_dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static irqreturn_t pvr_device_irq_handler(int irq, void *data)
|
||||
{
|
||||
struct pvr_device *pvr_dev = data;
|
||||
bool safety_irq_pending = false;
|
||||
|
||||
if (!pvr_fw_irq_pending(pvr_dev))
|
||||
if (pvr_dev->has_safety_events)
|
||||
safety_irq_pending = pvr_device_safety_irq_pending(pvr_dev);
|
||||
|
||||
if (!pvr_fw_irq_pending(pvr_dev) && !safety_irq_pending)
|
||||
return IRQ_NONE; /* Spurious IRQ - ignore. */
|
||||
|
||||
/* Mask the FW interrupts before waking up the thread. Will be unmasked
|
||||
* when the thread handler is done processing events.
|
||||
*/
|
||||
pvr_fw_irq_disable(pvr_dev);
|
||||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
static void pvr_device_safety_irq_init(struct pvr_device *pvr_dev)
|
||||
{
|
||||
u32 num_ecc_rams = 0;
|
||||
|
||||
/*
|
||||
* Safety events are an optional feature of the RogueXE platform. They
|
||||
* are only enabled if at least one of ECC memory or the watchdog timer
|
||||
* are present in HW. While safety events can be generated by other
|
||||
* systems, that will never happen if the above mentioned hardware is
|
||||
* not present.
|
||||
*/
|
||||
if (!PVR_HAS_FEATURE(pvr_dev, roguexe)) {
|
||||
pvr_dev->has_safety_events = false;
|
||||
return;
|
||||
}
|
||||
|
||||
PVR_FEATURE_VALUE(pvr_dev, ecc_rams, &num_ecc_rams);
|
||||
|
||||
pvr_dev->has_safety_events =
|
||||
num_ecc_rams > 0 || PVR_HAS_FEATURE(pvr_dev, watchdog_timer);
|
||||
}
|
||||
|
||||
/**
|
||||
* pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
|
||||
* @pvr_dev: Target PowerVR device.
|
||||
|
@ -205,17 +319,25 @@ pvr_device_irq_init(struct pvr_device *pvr_dev)
|
|||
|
||||
init_waitqueue_head(&pvr_dev->kccb.rtn_q);
|
||||
|
||||
pvr_device_safety_irq_init(pvr_dev);
|
||||
|
||||
pvr_dev->irq = platform_get_irq(plat_dev, 0);
|
||||
if (pvr_dev->irq < 0)
|
||||
return pvr_dev->irq;
|
||||
|
||||
/* Clear any pending events before requesting the IRQ line. */
|
||||
pvr_fw_irq_clear(pvr_dev);
|
||||
pvr_fw_irq_enable(pvr_dev);
|
||||
|
||||
if (pvr_dev->has_safety_events)
|
||||
pvr_device_safety_irq_clear(pvr_dev);
|
||||
|
||||
/*
|
||||
* The ONESHOT flag ensures IRQs are masked while the thread handler is
|
||||
* running.
|
||||
*/
|
||||
return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
|
||||
pvr_device_irq_thread_handler,
|
||||
IRQF_SHARED, "gpu", pvr_dev);
|
||||
IRQF_SHARED | IRQF_ONESHOT, "gpu", pvr_dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -509,6 +631,11 @@ pvr_device_init(struct pvr_device *pvr_dev)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
/* Get the reset line for the GPU */
|
||||
err = pvr_device_reset_init(pvr_dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Explicitly power the GPU so we can access control registers before the FW is booted. */
|
||||
err = pm_runtime_resume_and_get(dev);
|
||||
if (err)
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/bits.h>
|
||||
#include <linux/compiler_attributes.h>
|
||||
#include <linux/compiler_types.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -131,6 +132,22 @@ struct pvr_device {
|
|||
*/
|
||||
struct clk *mem_clk;
|
||||
|
||||
struct pvr_device_power {
|
||||
struct device **domain_devs;
|
||||
struct device_link **domain_links;
|
||||
|
||||
u32 domain_count;
|
||||
} power;
|
||||
|
||||
/**
|
||||
* @reset: Optional reset line.
|
||||
*
|
||||
* This may be used on some platforms to provide a reset line that needs to be de-asserted
|
||||
* after power-up procedure. It would also need to be asserted after the power-down
|
||||
* procedure.
|
||||
*/
|
||||
struct reset_control *reset;
|
||||
|
||||
/** @irq: IRQ number. */
|
||||
int irq;
|
||||
|
||||
|
@ -300,6 +317,9 @@ struct pvr_device {
|
|||
* struct pvr_file.
|
||||
*/
|
||||
spinlock_t ctx_list_lock;
|
||||
|
||||
/** @has_safety_events: Whether this device can raise safety events. */
|
||||
bool has_safety_events;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -728,8 +748,22 @@ pvr_ioctl_union_padding_check(void *instance, size_t union_offset,
|
|||
__union_size, __member_size); \
|
||||
})
|
||||
|
||||
#define PVR_FW_PROCESSOR_TYPE_META 0
|
||||
#define PVR_FW_PROCESSOR_TYPE_MIPS 1
|
||||
#define PVR_FW_PROCESSOR_TYPE_RISCV 2
|
||||
/*
|
||||
* These utility functions should more properly be placed in pvr_fw.h, but that
|
||||
* would cause a dependency cycle between that header and this one. Since
|
||||
* they're primarily used in pvr_device.c, let's put them in here for now.
|
||||
*/
|
||||
|
||||
static __always_inline bool
|
||||
pvr_fw_irq_pending(struct pvr_device *pvr_dev)
|
||||
{
|
||||
return pvr_dev->fw_dev.defs->irq_pending(pvr_dev);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
pvr_fw_irq_clear(struct pvr_device *pvr_dev)
|
||||
{
|
||||
pvr_dev->fw_dev.defs->irq_clear(pvr_dev);
|
||||
}
|
||||
|
||||
#endif /* PVR_DEVICE_H */
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
* This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
|
||||
*
|
||||
* * AXE-1-16M (found in Texas Instruments AM62)
|
||||
* * BXS-4-64 MC1 (found in Texas Instruments J721S2/AM68)
|
||||
*/
|
||||
|
||||
/**
|
||||
|
@ -1411,6 +1412,10 @@ pvr_probe(struct platform_device *plat_dev)
|
|||
|
||||
platform_set_drvdata(plat_dev, drm_dev);
|
||||
|
||||
err = pvr_power_domains_init(pvr_dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
init_rwsem(&pvr_dev->reset_sem);
|
||||
|
||||
pvr_context_device_init(pvr_dev);
|
||||
|
@ -1450,6 +1455,8 @@ err_watchdog_fini:
|
|||
err_context_fini:
|
||||
pvr_context_device_fini(pvr_dev);
|
||||
|
||||
pvr_power_domains_fini(pvr_dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1470,9 +1477,17 @@ static void pvr_remove(struct platform_device *plat_dev)
|
|||
pvr_watchdog_fini(pvr_dev);
|
||||
pvr_queue_device_fini(pvr_dev);
|
||||
pvr_context_device_fini(pvr_dev);
|
||||
pvr_power_domains_fini(pvr_dev);
|
||||
}
|
||||
|
||||
static const struct of_device_id dt_match[] = {
|
||||
{ .compatible = "img,img-rogue", .data = NULL },
|
||||
|
||||
/*
|
||||
* This legacy compatible string was introduced early on before the more generic
|
||||
* "img,img-rogue" was added. Keep it around here for compatibility, but never use
|
||||
* "img,img-axe" in new devicetrees.
|
||||
*/
|
||||
{ .compatible = "img,img-axe", .data = NULL },
|
||||
{}
|
||||
};
|
||||
|
@ -1498,3 +1513,4 @@ MODULE_DESCRIPTION(PVR_DRIVER_DESC);
|
|||
MODULE_LICENSE("Dual MIT/GPL");
|
||||
MODULE_IMPORT_NS("DMA_BUF");
|
||||
MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
|
||||
MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw");
|
||||
|
|
|
@ -437,6 +437,9 @@ fw_runtime_cfg_init(void *cpu_ptr, void *priv)
|
|||
runtime_cfg->active_pm_latency_persistant = true;
|
||||
WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters,
|
||||
&runtime_cfg->default_dusts_num_init) != 0);
|
||||
|
||||
/* Keep watchdog timer disabled. */
|
||||
runtime_cfg->wdg_period_us = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -659,7 +662,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
|
|||
return PTR_ERR(fw_code_ptr);
|
||||
}
|
||||
|
||||
if (pvr_dev->fw_dev.defs->has_fixed_data_addr()) {
|
||||
if (pvr_dev->fw_dev.defs->has_fixed_data_addr) {
|
||||
u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask;
|
||||
|
||||
fw_data_ptr =
|
||||
|
@ -935,18 +938,22 @@ pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev)
|
|||
int
|
||||
pvr_fw_init(struct pvr_device *pvr_dev)
|
||||
{
|
||||
static const struct pvr_fw_defs *fw_defs[PVR_FW_PROCESSOR_TYPE_COUNT] = {
|
||||
[PVR_FW_PROCESSOR_TYPE_META] = &pvr_fw_defs_meta,
|
||||
[PVR_FW_PROCESSOR_TYPE_MIPS] = &pvr_fw_defs_mips,
|
||||
[PVR_FW_PROCESSOR_TYPE_RISCV] = &pvr_fw_defs_riscv,
|
||||
};
|
||||
|
||||
u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
|
||||
u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn);
|
||||
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
|
||||
int err;
|
||||
|
||||
if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_META)
|
||||
fw_dev->defs = &pvr_fw_defs_meta;
|
||||
else if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_MIPS)
|
||||
fw_dev->defs = &pvr_fw_defs_mips;
|
||||
else
|
||||
if (fw_dev->processor_type >= PVR_FW_PROCESSOR_TYPE_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
fw_dev->defs = fw_defs[fw_dev->processor_type];
|
||||
|
||||
err = fw_dev->defs->init(pvr_dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -1452,6 +1459,15 @@ void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset,
|
|||
*fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset);
|
||||
}
|
||||
|
||||
u64
|
||||
pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj)
|
||||
{
|
||||
struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev);
|
||||
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
|
||||
|
||||
return fw_dev->fw_heap_info.gpu_addr + fw_obj->fw_addr_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW
|
||||
* structures
|
||||
|
|
|
@ -167,47 +167,30 @@ struct pvr_fw_defs {
|
|||
int (*wrapper_init)(struct pvr_device *pvr_dev);
|
||||
|
||||
/**
|
||||
* @has_fixed_data_addr:
|
||||
* @irq_pending: Check interrupt status register for pending interrupts.
|
||||
*
|
||||
* Called to check if firmware fixed data must be loaded at the address given by the
|
||||
* firmware layout table.
|
||||
* @pvr_dev: Target PowerVR device.
|
||||
*
|
||||
* This function is mandatory.
|
||||
*
|
||||
* Returns:
|
||||
* * %true if firmware fixed data must be loaded at the address given by the firmware
|
||||
* layout table.
|
||||
* * %false otherwise.
|
||||
*/
|
||||
bool (*has_fixed_data_addr)(void);
|
||||
bool (*irq_pending)(struct pvr_device *pvr_dev);
|
||||
|
||||
/**
|
||||
* @irq: FW Interrupt information.
|
||||
* @irq_clear: Clear pending interrupts.
|
||||
*
|
||||
* Those are processor dependent, and should be initialized by the
|
||||
* processor backend in pvr_fw_funcs::init().
|
||||
* @pvr_dev: Target PowerVR device.
|
||||
*
|
||||
* This function is mandatory.
|
||||
*/
|
||||
struct {
|
||||
/** @enable_reg: FW interrupt enable register. */
|
||||
u32 enable_reg;
|
||||
void (*irq_clear)(struct pvr_device *pvr_dev);
|
||||
|
||||
/** @status_reg: FW interrupt status register. */
|
||||
u32 status_reg;
|
||||
|
||||
/**
|
||||
* @clear_reg: FW interrupt clear register.
|
||||
*
|
||||
* If @status_reg == @clear_reg, we clear by write a bit to zero,
|
||||
* otherwise we clear by writing a bit to one.
|
||||
*/
|
||||
u32 clear_reg;
|
||||
|
||||
/** @event_mask: Bitmask of events to listen for. */
|
||||
u32 event_mask;
|
||||
|
||||
/** @clear_mask: Value to write to the clear_reg in order to clear FW IRQs. */
|
||||
u32 clear_mask;
|
||||
} irq;
|
||||
/**
|
||||
* @has_fixed_data_addr: Specify whether the firmware fixed data must be loaded at the
|
||||
* address given by the firmware layout table.
|
||||
*
|
||||
* This value is mandatory.
|
||||
*/
|
||||
bool has_fixed_data_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -400,26 +383,16 @@ struct pvr_fw_device {
|
|||
} fw_objs;
|
||||
};
|
||||
|
||||
#define pvr_fw_irq_read_reg(pvr_dev, name) \
|
||||
pvr_cr_read32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg)
|
||||
|
||||
#define pvr_fw_irq_write_reg(pvr_dev, name, value) \
|
||||
pvr_cr_write32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg, value)
|
||||
|
||||
#define pvr_fw_irq_pending(pvr_dev) \
|
||||
(pvr_fw_irq_read_reg(pvr_dev, status) & (pvr_dev)->fw_dev.defs->irq.event_mask)
|
||||
|
||||
#define pvr_fw_irq_clear(pvr_dev) \
|
||||
pvr_fw_irq_write_reg(pvr_dev, clear, (pvr_dev)->fw_dev.defs->irq.clear_mask)
|
||||
|
||||
#define pvr_fw_irq_enable(pvr_dev) \
|
||||
pvr_fw_irq_write_reg(pvr_dev, enable, (pvr_dev)->fw_dev.defs->irq.event_mask)
|
||||
|
||||
#define pvr_fw_irq_disable(pvr_dev) \
|
||||
pvr_fw_irq_write_reg(pvr_dev, enable, 0)
|
||||
enum pvr_fw_processor_type {
|
||||
PVR_FW_PROCESSOR_TYPE_META = 0,
|
||||
PVR_FW_PROCESSOR_TYPE_MIPS,
|
||||
PVR_FW_PROCESSOR_TYPE_RISCV,
|
||||
PVR_FW_PROCESSOR_TYPE_COUNT,
|
||||
};
|
||||
|
||||
extern const struct pvr_fw_defs pvr_fw_defs_meta;
|
||||
extern const struct pvr_fw_defs pvr_fw_defs_mips;
|
||||
extern const struct pvr_fw_defs pvr_fw_defs_riscv;
|
||||
|
||||
int pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev);
|
||||
int pvr_fw_init(struct pvr_device *pvr_dev);
|
||||
|
@ -506,4 +479,18 @@ pvr_fw_object_get_fw_addr(struct pvr_fw_object *fw_obj, u32 *fw_addr_out)
|
|||
pvr_fw_object_get_fw_addr_offset(fw_obj, 0, fw_addr_out);
|
||||
}
|
||||
|
||||
u64
|
||||
pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj);
|
||||
|
||||
static __always_inline size_t
|
||||
pvr_fw_obj_get_object_size(struct pvr_fw_object *fw_obj)
|
||||
{
|
||||
return pvr_gem_object_size(fw_obj->gem);
|
||||
}
|
||||
|
||||
/* Util functions defined in pvr_fw_util.c. These are intended for use in pvr_fw_<arch>.c files. */
|
||||
int
|
||||
pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
|
||||
u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr);
|
||||
|
||||
#endif /* PVR_FW_H */
|
||||
|
|
|
@ -533,9 +533,17 @@ pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
|
|||
}
|
||||
|
||||
static bool
|
||||
pvr_meta_has_fixed_data_addr(void)
|
||||
pvr_meta_irq_pending(struct pvr_device *pvr_dev)
|
||||
{
|
||||
return false;
|
||||
return pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS) &
|
||||
ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN;
|
||||
}
|
||||
|
||||
static void
|
||||
pvr_meta_irq_clear(struct pvr_device *pvr_dev)
|
||||
{
|
||||
pvr_cr_write32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS,
|
||||
ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK);
|
||||
}
|
||||
|
||||
const struct pvr_fw_defs pvr_fw_defs_meta = {
|
||||
|
@ -545,12 +553,7 @@ const struct pvr_fw_defs pvr_fw_defs_meta = {
|
|||
.vm_unmap = pvr_meta_vm_unmap,
|
||||
.get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset,
|
||||
.wrapper_init = pvr_meta_wrapper_init,
|
||||
.has_fixed_data_addr = pvr_meta_has_fixed_data_addr,
|
||||
.irq = {
|
||||
.enable_reg = ROGUE_CR_META_SP_MSLVIRQENABLE,
|
||||
.status_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
|
||||
.clear_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
|
||||
.event_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN,
|
||||
.clear_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK,
|
||||
},
|
||||
.irq_pending = pvr_meta_irq_pending,
|
||||
.irq_clear = pvr_meta_irq_clear,
|
||||
.has_fixed_data_addr = false,
|
||||
};
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
#include "pvr_rogue_mips.h"
|
||||
#include "pvr_vm_mips.h"
|
||||
|
||||
#include <linux/elf.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
@ -16,59 +15,6 @@
|
|||
#define ROGUE_FW_HEAP_MIPS_SHIFT 24 /* 16 MB */
|
||||
#define ROGUE_FW_HEAP_MIPS_RESERVED_SIZE SZ_1M
|
||||
|
||||
/**
|
||||
* process_elf_command_stream() - Process ELF firmware image and populate
|
||||
* firmware sections
|
||||
* @pvr_dev: Device pointer.
|
||||
* @fw: Pointer to firmware image.
|
||||
* @fw_code_ptr: Pointer to FW code section.
|
||||
* @fw_data_ptr: Pointer to FW data section.
|
||||
* @fw_core_code_ptr: Pointer to FW coremem code section.
|
||||
* @fw_core_data_ptr: Pointer to FW coremem data section.
|
||||
*
|
||||
* Returns :
|
||||
* * 0 on success, or
|
||||
* * -EINVAL on any error in ELF command stream.
|
||||
*/
|
||||
static int
|
||||
process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
|
||||
u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
|
||||
{
|
||||
struct elf32_hdr *header = (struct elf32_hdr *)fw;
|
||||
struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
|
||||
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
|
||||
int err;
|
||||
|
||||
for (u32 entry = 0; entry < header->e_phnum; entry++, program_header++) {
|
||||
void *write_addr;
|
||||
|
||||
/* Only consider loadable entries in the ELF segment table */
|
||||
if (program_header->p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
|
||||
program_header->p_memsz, fw_code_ptr, fw_data_ptr,
|
||||
fw_core_code_ptr, fw_core_data_ptr, &write_addr);
|
||||
if (err) {
|
||||
drm_err(drm_dev,
|
||||
"Addr 0x%x (size: %d) not found in any firmware segment",
|
||||
program_header->p_vaddr, program_header->p_memsz);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Write to FW allocation only if available */
|
||||
if (write_addr) {
|
||||
memcpy(write_addr, fw + program_header->p_offset,
|
||||
program_header->p_filesz);
|
||||
|
||||
memset((u8 *)write_addr + program_header->p_filesz, 0,
|
||||
program_header->p_memsz - program_header->p_filesz);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
pvr_mips_init(struct pvr_device *pvr_dev)
|
||||
{
|
||||
|
@ -98,8 +44,8 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
|
|||
dma_addr_t dma_addr;
|
||||
int err;
|
||||
|
||||
err = process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
|
||||
fw_core_data_ptr);
|
||||
err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr,
|
||||
fw_core_code_ptr, fw_core_data_ptr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -226,9 +172,17 @@ pvr_mips_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
|
|||
}
|
||||
|
||||
static bool
|
||||
pvr_mips_has_fixed_data_addr(void)
|
||||
pvr_mips_irq_pending(struct pvr_device *pvr_dev)
|
||||
{
|
||||
return true;
|
||||
return pvr_cr_read32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS) &
|
||||
ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN;
|
||||
}
|
||||
|
||||
static void
|
||||
pvr_mips_irq_clear(struct pvr_device *pvr_dev)
|
||||
{
|
||||
pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
|
||||
ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN);
|
||||
}
|
||||
|
||||
const struct pvr_fw_defs pvr_fw_defs_mips = {
|
||||
|
@ -239,12 +193,7 @@ const struct pvr_fw_defs pvr_fw_defs_mips = {
|
|||
.vm_unmap = pvr_vm_mips_unmap,
|
||||
.get_fw_addr_with_offset = pvr_mips_get_fw_addr_with_offset,
|
||||
.wrapper_init = pvr_mips_wrapper_init,
|
||||
.has_fixed_data_addr = pvr_mips_has_fixed_data_addr,
|
||||
.irq = {
|
||||
.enable_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE,
|
||||
.status_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS,
|
||||
.clear_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
|
||||
.event_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN,
|
||||
.clear_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN,
|
||||
},
|
||||
.irq_pending = pvr_mips_irq_pending,
|
||||
.irq_clear = pvr_mips_irq_clear,
|
||||
.has_fixed_data_addr = true,
|
||||
};
|
||||
|
|
165
drivers/gpu/drm/imagination/pvr_fw_riscv.c
Normal file
165
drivers/gpu/drm/imagination/pvr_fw_riscv.c
Normal file
|
@ -0,0 +1,165 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/* Copyright (c) 2024 Imagination Technologies Ltd. */
|
||||
|
||||
#include "pvr_device.h"
|
||||
#include "pvr_fw.h"
|
||||
#include "pvr_fw_info.h"
|
||||
#include "pvr_fw_mips.h"
|
||||
#include "pvr_gem.h"
|
||||
#include "pvr_rogue_cr_defs.h"
|
||||
#include "pvr_rogue_riscv.h"
|
||||
#include "pvr_vm.h"
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define ROGUE_FW_HEAP_RISCV_SHIFT 25 /* 32 MB */
|
||||
#define ROGUE_FW_HEAP_RISCV_SIZE (1u << ROGUE_FW_HEAP_RISCV_SHIFT)
|
||||
|
||||
static int
|
||||
pvr_riscv_wrapper_init(struct pvr_device *pvr_dev)
|
||||
{
|
||||
const u64 common_opts =
|
||||
((u64)(ROGUE_FW_HEAP_RISCV_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT)
|
||||
<< ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT) |
|
||||
((u64)MMU_CONTEXT_MAPPING_FWPRIV
|
||||
<< FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT);
|
||||
|
||||
u64 code_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.code_obj);
|
||||
u64 data_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.data_obj);
|
||||
|
||||
/* This condition allows us to OR the addresses into the register directly. */
|
||||
static_assert(ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT ==
|
||||
ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT);
|
||||
|
||||
WARN_ON(code_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
|
||||
WARN_ON(data_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
|
||||
|
||||
pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_CODE),
|
||||
code_addr | common_opts | ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN);
|
||||
|
||||
pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_DATA),
|
||||
data_addr | common_opts |
|
||||
ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN);
|
||||
|
||||
/* Garten IDLE bit controlled by RISC-V. */
|
||||
pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG,
|
||||
ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct rogue_riscv_fw_boot_data {
|
||||
u64 coremem_code_dev_vaddr;
|
||||
u64 coremem_data_dev_vaddr;
|
||||
u32 coremem_code_fw_addr;
|
||||
u32 coremem_data_fw_addr;
|
||||
u32 coremem_code_size;
|
||||
u32 coremem_data_size;
|
||||
u32 flags;
|
||||
u32 reserved;
|
||||
};
|
||||
|
||||
static int
|
||||
pvr_riscv_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
|
||||
u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
|
||||
u32 core_code_alloc_size)
|
||||
{
|
||||
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
|
||||
struct pvr_fw_mem *fw_mem = &fw_dev->mem;
|
||||
struct rogue_riscv_fw_boot_data *boot_data;
|
||||
int err;
|
||||
|
||||
err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr,
|
||||
fw_core_code_ptr, fw_core_data_ptr);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
boot_data = (struct rogue_riscv_fw_boot_data *)fw_data_ptr;
|
||||
|
||||
if (fw_mem->core_code_obj) {
|
||||
boot_data->coremem_code_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_code_obj);
|
||||
pvr_fw_object_get_fw_addr(fw_mem->core_code_obj, &boot_data->coremem_code_fw_addr);
|
||||
boot_data->coremem_code_size = pvr_fw_obj_get_object_size(fw_mem->core_code_obj);
|
||||
}
|
||||
|
||||
if (fw_mem->core_data_obj) {
|
||||
boot_data->coremem_data_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_data_obj);
|
||||
pvr_fw_object_get_fw_addr(fw_mem->core_data_obj, &boot_data->coremem_data_fw_addr);
|
||||
boot_data->coremem_data_size = pvr_fw_obj_get_object_size(fw_mem->core_data_obj);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
pvr_riscv_init(struct pvr_device *pvr_dev)
|
||||
{
|
||||
pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_RISCV_SHIFT, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32
|
||||
pvr_riscv_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
|
||||
{
|
||||
u32 fw_addr = fw_obj->fw_addr_offset + offset;
|
||||
|
||||
/* RISC-V cacheability is determined by address. */
|
||||
if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
|
||||
fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_UNCACHED_DATA);
|
||||
else
|
||||
fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_CACHED_DATA);
|
||||
|
||||
return fw_addr;
|
||||
}
|
||||
|
||||
static int
|
||||
pvr_riscv_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
|
||||
{
|
||||
struct pvr_gem_object *pvr_obj = fw_obj->gem;
|
||||
|
||||
return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start,
|
||||
pvr_gem_object_size(pvr_obj));
|
||||
}
|
||||
|
||||
static void
|
||||
pvr_riscv_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
|
||||
{
|
||||
struct pvr_gem_object *pvr_obj = fw_obj->gem;
|
||||
|
||||
pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
|
||||
fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
|
||||
}
|
||||
|
||||
static bool
|
||||
pvr_riscv_irq_pending(struct pvr_device *pvr_dev)
|
||||
{
|
||||
return pvr_cr_read32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_STATUS) &
|
||||
ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN;
|
||||
}
|
||||
|
||||
static void
|
||||
pvr_riscv_irq_clear(struct pvr_device *pvr_dev)
|
||||
{
|
||||
pvr_cr_write32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_CLEAR,
|
||||
ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN);
|
||||
}
|
||||
|
||||
const struct pvr_fw_defs pvr_fw_defs_riscv = {
|
||||
.init = pvr_riscv_init,
|
||||
.fw_process = pvr_riscv_fw_process,
|
||||
.vm_map = pvr_riscv_vm_map,
|
||||
.vm_unmap = pvr_riscv_vm_unmap,
|
||||
.get_fw_addr_with_offset = pvr_riscv_get_fw_addr_with_offset,
|
||||
.wrapper_init = pvr_riscv_wrapper_init,
|
||||
.irq_pending = pvr_riscv_irq_pending,
|
||||
.irq_clear = pvr_riscv_irq_clear,
|
||||
.has_fixed_data_addr = false,
|
||||
};
|
|
@ -49,6 +49,14 @@ rogue_bif_init(struct pvr_device *pvr_dev)
|
|||
|
||||
pvr_cr_write64(pvr_dev, BIF_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV),
|
||||
pc_addr);
|
||||
|
||||
if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) {
|
||||
pc_addr = (((u64)pc_dma_addr >> ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT)
|
||||
<< ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) &
|
||||
~ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK;
|
||||
|
||||
pvr_cr_write64(pvr_dev, FWCORE_MEM_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV), pc_addr);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -114,6 +122,9 @@ pvr_fw_start(struct pvr_device *pvr_dev)
|
|||
(void)pvr_cr_read32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE); /* Fence write */
|
||||
}
|
||||
|
||||
if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV)
|
||||
pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 0);
|
||||
|
||||
/* Set Rogue in soft-reset. */
|
||||
pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask);
|
||||
if (has_reset2)
|
||||
|
@ -167,6 +178,12 @@ pvr_fw_start(struct pvr_device *pvr_dev)
|
|||
/* ... and afterwards. */
|
||||
udelay(3);
|
||||
|
||||
if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) {
|
||||
/* Boot the FW. */
|
||||
pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 1);
|
||||
udelay(3);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_reset:
|
||||
|
|
|
@ -119,8 +119,6 @@ void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
|
|||
pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
/**
|
||||
* update_logtype() - Send KCCB command to trigger FW to update logtype
|
||||
* @pvr_dev: Target PowerVR device
|
||||
|
@ -441,7 +439,7 @@ static const struct file_operations pvr_fw_trace_fops = {
|
|||
void
|
||||
pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask)
|
||||
{
|
||||
if (old_mask != new_mask)
|
||||
if (IS_ENABLED(CONFIG_DEBUG_FS) && old_mask != new_mask)
|
||||
update_logtype(pvr_dev, new_mask);
|
||||
}
|
||||
|
||||
|
@ -450,6 +448,9 @@ pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
|
|||
{
|
||||
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_DEBUG_FS))
|
||||
return;
|
||||
|
||||
static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
|
||||
"The filename buffer is only large enough for a single-digit thread count");
|
||||
|
||||
|
@ -462,4 +463,3 @@ pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
|
|||
&pvr_fw_trace_fops);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -65,7 +65,6 @@ struct pvr_fw_trace {
|
|||
int pvr_fw_trace_init(struct pvr_device *pvr_dev);
|
||||
void pvr_fw_trace_fini(struct pvr_device *pvr_dev);
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
/* Forward declaration from <linux/dcache.h>. */
|
||||
struct dentry;
|
||||
|
||||
|
@ -73,6 +72,5 @@ void pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask,
|
|||
u32 new_mask);
|
||||
|
||||
void pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir);
|
||||
#endif /* defined(CONFIG_DEBUG_FS) */
|
||||
|
||||
#endif /* PVR_FW_TRACE_H */
|
||||
|
|
66
drivers/gpu/drm/imagination/pvr_fw_util.c
Normal file
66
drivers/gpu/drm/imagination/pvr_fw_util.c
Normal file
|
@ -0,0 +1,66 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only OR MIT
|
||||
/* Copyright (c) 2024 Imagination Technologies Ltd. */
|
||||
|
||||
#include "pvr_device.h"
|
||||
#include "pvr_fw.h"
|
||||
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include <linux/elf.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* pvr_fw_process_elf_command_stream() - Process ELF firmware image and populate
|
||||
* firmware sections
|
||||
* @pvr_dev: Device pointer.
|
||||
* @fw: Pointer to firmware image.
|
||||
* @fw_code_ptr: Pointer to FW code section.
|
||||
* @fw_data_ptr: Pointer to FW data section.
|
||||
* @fw_core_code_ptr: Pointer to FW coremem code section.
|
||||
* @fw_core_data_ptr: Pointer to FW coremem data section.
|
||||
*
|
||||
* Returns :
|
||||
* * 0 on success, or
|
||||
* * -EINVAL on any error in ELF command stream.
|
||||
*/
|
||||
int
|
||||
pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw,
|
||||
u8 *fw_code_ptr, u8 *fw_data_ptr,
|
||||
u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
|
||||
{
|
||||
struct elf32_hdr *header = (struct elf32_hdr *)fw;
|
||||
struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
|
||||
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
|
||||
int err;
|
||||
|
||||
for (u32 entry = 0; entry < header->e_phnum; entry++, program_header++) {
|
||||
void *write_addr;
|
||||
|
||||
/* Only consider loadable entries in the ELF segment table */
|
||||
if (program_header->p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
|
||||
program_header->p_memsz, fw_code_ptr, fw_data_ptr,
|
||||
fw_core_code_ptr, fw_core_data_ptr, &write_addr);
|
||||
if (err) {
|
||||
drm_err(drm_dev,
|
||||
"Addr 0x%x (size: %d) not found in any firmware segment",
|
||||
program_header->p_vaddr, program_header->p_memsz);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Write to FW allocation only if available */
|
||||
if (write_addr) {
|
||||
memcpy(write_addr, fw + program_header->p_offset,
|
||||
program_header->p_filesz);
|
||||
|
||||
memset((u8 *)write_addr + program_header->p_filesz, 0,
|
||||
program_header->p_memsz - program_header->p_filesz);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/log2.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
|
@ -334,6 +335,7 @@ struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t
|
|||
struct pvr_gem_object *
|
||||
pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
|
||||
{
|
||||
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
|
||||
struct drm_gem_shmem_object *shmem_obj;
|
||||
struct pvr_gem_object *pvr_obj;
|
||||
struct sg_table *sgt;
|
||||
|
@ -343,7 +345,10 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
|
|||
if (size == 0 || !pvr_gem_object_flags_validate(flags))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
shmem_obj = drm_gem_shmem_create(from_pvr_device(pvr_dev), size);
|
||||
if (device_get_dma_attr(drm_dev->dev) == DEV_DMA_COHERENT)
|
||||
flags |= PVR_BO_CPU_CACHED;
|
||||
|
||||
shmem_obj = drm_gem_shmem_create(drm_dev, size);
|
||||
if (IS_ERR(shmem_obj))
|
||||
return ERR_CAST(shmem_obj);
|
||||
|
||||
|
@ -358,8 +363,7 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
|
|||
goto err_shmem_object_free;
|
||||
}
|
||||
|
||||
dma_sync_sgtable_for_device(shmem_obj->base.dev->dev, sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
dma_sync_sgtable_for_device(drm_dev->dev, sgt, DMA_BIDIRECTIONAL);
|
||||
|
||||
/*
|
||||
* Do this last because pvr_gem_object_zero() requires a fully
|
||||
|
|
|
@ -44,8 +44,10 @@ struct pvr_file;
|
|||
* Bits not defined anywhere are "undefined".
|
||||
*
|
||||
* CPU mapping options
|
||||
* :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set this
|
||||
* flag to override this behaviour and map the object cached.
|
||||
* :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set
|
||||
* this flag to override this behaviour and map the object cached. If the dma_coherent
|
||||
* property is present in devicetree, all allocations will be mapped as if this flag was set.
|
||||
* This does not require any additional consideration at allocation time.
|
||||
*
|
||||
* Firmware options
|
||||
* :PVR_BO_FW_NO_CLEAR_ON_RESET: By default, all FW objects are cleared and reinitialised on hard
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#define PVR_SHIFT_FROM_SIZE(size_) (__builtin_ctzll(size_))
|
||||
|
@ -259,6 +260,7 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
|
|||
struct device *dev = from_pvr_device(pvr_dev)->dev;
|
||||
|
||||
struct page *raw_page;
|
||||
pgprot_t prot;
|
||||
int err;
|
||||
|
||||
dma_addr_t dma_addr;
|
||||
|
@ -268,7 +270,11 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
|
|||
if (!raw_page)
|
||||
return -ENOMEM;
|
||||
|
||||
host_ptr = vmap(&raw_page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
|
||||
prot = PAGE_KERNEL;
|
||||
if (device_get_dma_attr(dev) != DEV_DMA_COHERENT)
|
||||
prot = pgprot_writecombine(prot);
|
||||
|
||||
host_ptr = vmap(&raw_page, 1, VM_MAP, prot);
|
||||
if (!host_ptr) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_page;
|
||||
|
|
|
@ -10,11 +10,15 @@
|
|||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
@ -252,6 +256,8 @@ pvr_power_device_suspend(struct device *dev)
|
|||
clk_disable_unprepare(pvr_dev->sys_clk);
|
||||
clk_disable_unprepare(pvr_dev->core_clk);
|
||||
|
||||
err = reset_control_assert(pvr_dev->reset);
|
||||
|
||||
err_drm_dev_exit:
|
||||
drm_dev_exit(idx);
|
||||
|
||||
|
@ -282,16 +288,33 @@ pvr_power_device_resume(struct device *dev)
|
|||
if (err)
|
||||
goto err_sys_clk_disable;
|
||||
|
||||
/*
|
||||
* According to the hardware manual, a delay of at least 32 clock
|
||||
* cycles is required between de-asserting the clkgen reset and
|
||||
* de-asserting the GPU reset. Assuming a worst-case scenario with
|
||||
* a very high GPU clock frequency, a delay of 1 microsecond is
|
||||
* sufficient to ensure this requirement is met across all
|
||||
* feasible GPU clock speeds.
|
||||
*/
|
||||
udelay(1);
|
||||
|
||||
err = reset_control_deassert(pvr_dev->reset);
|
||||
if (err)
|
||||
goto err_mem_clk_disable;
|
||||
|
||||
if (pvr_dev->fw_dev.booted) {
|
||||
err = pvr_power_fw_enable(pvr_dev);
|
||||
if (err)
|
||||
goto err_mem_clk_disable;
|
||||
goto err_reset_assert;
|
||||
}
|
||||
|
||||
drm_dev_exit(idx);
|
||||
|
||||
return 0;
|
||||
|
||||
err_reset_assert:
|
||||
reset_control_assert(pvr_dev->reset);
|
||||
|
||||
err_mem_clk_disable:
|
||||
clk_disable_unprepare(pvr_dev->mem_clk);
|
||||
|
||||
|
@ -431,3 +454,114 @@ pvr_watchdog_fini(struct pvr_device *pvr_dev)
|
|||
{
|
||||
cancel_delayed_work_sync(&pvr_dev->watchdog.work);
|
||||
}
|
||||
|
||||
int pvr_power_domains_init(struct pvr_device *pvr_dev)
|
||||
{
|
||||
struct device *dev = from_pvr_device(pvr_dev)->dev;
|
||||
|
||||
struct device_link **domain_links __free(kfree) = NULL;
|
||||
struct device **domain_devs __free(kfree) = NULL;
|
||||
int domain_count;
|
||||
int link_count;
|
||||
|
||||
char dev_name[2] = "a";
|
||||
int err;
|
||||
int i;
|
||||
|
||||
domain_count = of_count_phandle_with_args(dev->of_node, "power-domains",
|
||||
"#power-domain-cells");
|
||||
if (domain_count < 0)
|
||||
return domain_count;
|
||||
|
||||
if (domain_count <= 1)
|
||||
return 0;
|
||||
|
||||
link_count = domain_count + (domain_count - 1);
|
||||
|
||||
domain_devs = kcalloc(domain_count, sizeof(*domain_devs), GFP_KERNEL);
|
||||
if (!domain_devs)
|
||||
return -ENOMEM;
|
||||
|
||||
domain_links = kcalloc(link_count, sizeof(*domain_links), GFP_KERNEL);
|
||||
if (!domain_links)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < domain_count; i++) {
|
||||
struct device *domain_dev;
|
||||
|
||||
dev_name[0] = 'a' + i;
|
||||
domain_dev = dev_pm_domain_attach_by_name(dev, dev_name);
|
||||
if (IS_ERR_OR_NULL(domain_dev)) {
|
||||
err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV;
|
||||
goto err_detach;
|
||||
}
|
||||
|
||||
domain_devs[i] = domain_dev;
|
||||
}
|
||||
|
||||
for (i = 0; i < domain_count; i++) {
|
||||
struct device_link *link;
|
||||
|
||||
link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
|
||||
if (!link) {
|
||||
err = -ENODEV;
|
||||
goto err_unlink;
|
||||
}
|
||||
|
||||
domain_links[i] = link;
|
||||
}
|
||||
|
||||
for (i = domain_count; i < link_count; i++) {
|
||||
struct device_link *link;
|
||||
|
||||
link = device_link_add(domain_devs[i - domain_count + 1],
|
||||
domain_devs[i - domain_count],
|
||||
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
|
||||
if (!link) {
|
||||
err = -ENODEV;
|
||||
goto err_unlink;
|
||||
}
|
||||
|
||||
domain_links[i] = link;
|
||||
}
|
||||
|
||||
pvr_dev->power = (struct pvr_device_power){
|
||||
.domain_devs = no_free_ptr(domain_devs),
|
||||
.domain_links = no_free_ptr(domain_links),
|
||||
.domain_count = domain_count,
|
||||
};
|
||||
|
||||
return 0;
|
||||
|
||||
err_unlink:
|
||||
while (--i >= 0)
|
||||
device_link_del(domain_links[i]);
|
||||
|
||||
i = domain_count;
|
||||
|
||||
err_detach:
|
||||
while (--i >= 0)
|
||||
dev_pm_domain_detach(domain_devs[i], true);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void pvr_power_domains_fini(struct pvr_device *pvr_dev)
|
||||
{
|
||||
const int domain_count = pvr_dev->power.domain_count;
|
||||
|
||||
int i = domain_count + (domain_count - 1);
|
||||
|
||||
while (--i >= 0)
|
||||
device_link_del(pvr_dev->power.domain_links[i]);
|
||||
|
||||
i = domain_count;
|
||||
|
||||
while (--i >= 0)
|
||||
dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true);
|
||||
|
||||
kfree(pvr_dev->power.domain_links);
|
||||
kfree(pvr_dev->power.domain_devs);
|
||||
|
||||
pvr_dev->power = (struct pvr_device_power){ 0 };
|
||||
}
|
||||
|
|
|
@ -38,4 +38,7 @@ pvr_power_put(struct pvr_device *pvr_dev)
|
|||
return pm_runtime_put(drm_dev->dev);
|
||||
}
|
||||
|
||||
int pvr_power_domains_init(struct pvr_device *pvr_dev);
|
||||
void pvr_power_domains_fini(struct pvr_device *pvr_dev);
|
||||
|
||||
#endif /* PVR_POWER_H */
|
||||
|
|
|
@ -827,6 +827,120 @@
|
|||
#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
|
||||
#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_EN 0x00000001U
|
||||
|
||||
/* Register ROGUE_CR_EVENT_CLEAR */
|
||||
#define ROGUE_CR_EVENT_CLEAR 0x0138U
|
||||
#define ROGUE_CR_EVENT_CLEAR__ROGUEXE__MASKFULL 0x00000000E01DFFFFULL
|
||||
#define ROGUE_CR_EVENT_CLEAR__SIGNALS__MASKFULL 0x00000000E007FFFFULL
|
||||
#define ROGUE_CR_EVENT_CLEAR_MASKFULL 0x00000000FFFFFFFFULL
|
||||
#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT 31U
|
||||
#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK 0x7FFFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN 0x80000000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT 30U
|
||||
#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK 0xBFFFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN 0x40000000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT 29U
|
||||
#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK 0xDFFFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN 0x20000000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_SHIFT 28U
|
||||
#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_CLRMSK 0xEFFFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_EN 0x10000000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_SHIFT 27U
|
||||
#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_CLRMSK 0xF7FFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_EN 0x08000000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_SHIFT 26U
|
||||
#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_CLRMSK 0xFBFFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_EN 0x04000000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_SHIFT 25U
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_CLRMSK 0xFDFFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_EN 0x02000000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_SHIFT 24U
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_CLRMSK 0xFEFFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_EN 0x01000000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_SHIFT 23U
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_CLRMSK 0xFF7FFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_EN 0x00800000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_SHIFT 22U
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_CLRMSK 0xFFBFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_EN 0x00400000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_SHIFT 21U
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_CLRMSK 0xFFDFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_EN 0x00200000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_SHIFT 20U
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_CLRMSK 0xFFEFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_EN 0x00100000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_SAFETY_SHIFT 20U
|
||||
#define ROGUE_CR_EVENT_CLEAR_SAFETY_CLRMSK 0xFFEFFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_SAFETY_EN 0x00100000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_SHIFT 19U
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_CLRMSK 0xFFF7FFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_EN 0x00080000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_SHIFT 19U
|
||||
#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_CLRMSK 0xFFF7FFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_EN 0x00080000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_SHIFT 18U
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_CLRMSK 0xFFFBFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_EN 0x00040000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT 18U
|
||||
#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK 0xFFFBFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN 0x00040000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_SHIFT 17U
|
||||
#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_CLRMSK 0xFFFDFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_EN 0x00020000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT 17U
|
||||
#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK 0xFFFDFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN 0x00020000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT 16U
|
||||
#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK 0xFFFEFFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN 0x00010000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT 15U
|
||||
#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK 0xFFFF7FFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_EN 0x00008000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_SHIFT 14U
|
||||
#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_CLRMSK 0xFFFFBFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_EN 0x00004000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_SHIFT 13U
|
||||
#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK 0xFFFFDFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_EN 0x00002000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_SHIFT 12U
|
||||
#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK 0xFFFFEFFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_EN 0x00001000U
|
||||
#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_SHIFT 11U
|
||||
#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK 0xFFFFF7FFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_EN 0x00000800U
|
||||
#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT 10U
|
||||
#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK 0xFFFFFBFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_EN 0x00000400U
|
||||
#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT 9U
|
||||
#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK 0xFFFFFDFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN 0x00000200U
|
||||
#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_SHIFT 8U
|
||||
#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_CLRMSK 0xFFFFFEFFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_EN 0x00000100U
|
||||
#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT 7U
|
||||
#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK 0xFFFFFF7FU
|
||||
#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN 0x00000080U
|
||||
#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT 6U
|
||||
#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK 0xFFFFFFBFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_EN 0x00000040U
|
||||
#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_SHIFT 5U
|
||||
#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK 0xFFFFFFDFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_EN 0x00000020U
|
||||
#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT 4U
|
||||
#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK 0xFFFFFFEFU
|
||||
#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN 0x00000010U
|
||||
#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_SHIFT 3U
|
||||
#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_CLRMSK 0xFFFFFFF7U
|
||||
#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_EN 0x00000008U
|
||||
#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT 2U
|
||||
#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK 0xFFFFFFFBU
|
||||
#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN 0x00000004U
|
||||
#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT 1U
|
||||
#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK 0xFFFFFFFDU
|
||||
#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_EN 0x00000002U
|
||||
#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_SHIFT 0U
|
||||
#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
|
||||
#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_EN 0x00000001U
|
||||
|
||||
/* Register ROGUE_CR_TIMER */
|
||||
#define ROGUE_CR_TIMER 0x0160U
|
||||
#define ROGUE_CR_TIMER_MASKFULL 0x8000FFFFFFFFFFFFULL
|
||||
|
@ -6031,25 +6145,6 @@
|
|||
#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
|
||||
#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
|
||||
|
||||
/* Register ROGUE_CR_ECC_RAM_ERR_INJ */
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ 0xF340U
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_MASKFULL 0x000000000000001FULL
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT 4U
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN 0x00000010U
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_SHIFT 3U
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_CLRMSK 0xFFFFFFF7U
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_EN 0x00000008U
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT 2U
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN 0x00000004U
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT 1U
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK 0xFFFFFFFDU
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_EN 0x00000002U
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_SHIFT 0U
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK 0xFFFFFFFEU
|
||||
#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_EN 0x00000001U
|
||||
|
||||
/* Register ROGUE_CR_ECC_RAM_INIT_KICK */
|
||||
#define ROGUE_CR_ECC_RAM_INIT_KICK 0xF348U
|
||||
#define ROGUE_CR_ECC_RAM_INIT_KICK_MASKFULL 0x000000000000001FULL
|
||||
|
@ -6163,6 +6258,26 @@
|
|||
#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
|
||||
#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
|
||||
|
||||
/* Register ROGUE_CR_FAULT_FW_STATUS */
|
||||
#define ROGUE_CR_FAULT_FW_STATUS 0xF3B0U
|
||||
#define ROGUE_CR_FAULT_FW_STATUS_MASKFULL 0x0000000000010001ULL
|
||||
#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT 16U
|
||||
#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_CLRMSK 0xFFFEFFFFU
|
||||
#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_EN 0x00010000U
|
||||
#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT 0U
|
||||
#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_CLRMSK 0xFFFFFFFEU
|
||||
#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_EN 0x00000001U
|
||||
|
||||
/* Register ROGUE_CR_FAULT_FW_CLEAR */
|
||||
#define ROGUE_CR_FAULT_FW_CLEAR 0xF3B8U
|
||||
#define ROGUE_CR_FAULT_FW_CLEAR_MASKFULL 0x0000000000010001ULL
|
||||
#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_SHIFT 16U
|
||||
#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_CLRMSK 0xFFFEFFFFU
|
||||
#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_EN 0x00010000U
|
||||
#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_SHIFT 0U
|
||||
#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_CLRMSK 0xFFFFFFFEU
|
||||
#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_EN 0x00000001U
|
||||
|
||||
/* Register ROGUE_CR_MTS_SAFETY_EVENT_ENABLE */
|
||||
#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE 0xF3D8U
|
||||
#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL
|
||||
|
|
41
drivers/gpu/drm/imagination/pvr_rogue_riscv.h
Normal file
41
drivers/gpu/drm/imagination/pvr_rogue_riscv.h
Normal file
|
@ -0,0 +1,41 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
|
||||
/* Copyright (c) 2024 Imagination Technologies Ltd. */
|
||||
|
||||
#ifndef PVR_ROGUE_RISCV_H
|
||||
#define PVR_ROGUE_RISCV_H
|
||||
|
||||
#include "pvr_rogue_cr_defs.h"
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define ROGUE_RISCVFW_REGION_SIZE SZ_256M
|
||||
#define ROGUE_RISCVFW_REGION_SHIFT __ffs(ROGUE_RISCVFW_REGION_SIZE)
|
||||
|
||||
enum rogue_riscvfw_region {
|
||||
ROGUE_RISCV_REGION__RESERVED_0 = 0,
|
||||
ROGUE_RISCV_REGION__RESERVED_1,
|
||||
ROGUE_RISCV_REGION_SOCIF,
|
||||
ROGUE_RISCV_REGION__RESERVED_3,
|
||||
ROGUE_RISCV_REGION__RESERVED_4,
|
||||
ROGUE_RISCV_REGION_BOOTLDR_DATA,
|
||||
ROGUE_RISCV_REGION_SHARED_CACHED_DATA,
|
||||
ROGUE_RISCV_REGION__RESERVED_7,
|
||||
ROGUE_RISCV_REGION_COREMEM,
|
||||
ROGUE_RISCV_REGION__RESERVED_9,
|
||||
ROGUE_RISCV_REGION__RESERVED_A,
|
||||
ROGUE_RISCV_REGION__RESERVED_B,
|
||||
ROGUE_RISCV_REGION_BOOTLDR_CODE,
|
||||
ROGUE_RISCV_REGION_SHARED_UNCACHED_DATA,
|
||||
ROGUE_RISCV_REGION__RESERVED_E,
|
||||
ROGUE_RISCV_REGION__RESERVED_F,
|
||||
|
||||
ROGUE_RISCV_REGION__COUNT,
|
||||
};
|
||||
|
||||
#define ROGUE_RISCVFW_REGION_BASE(r) ((u32)(ROGUE_RISCV_REGION_##r) << ROGUE_RISCVFW_REGION_SHIFT)
|
||||
#define ROGUE_RISCVFW_REGION_REMAP_CR(r) \
|
||||
(ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 + (u32)(ROGUE_RISCV_REGION_##r) * 8U)
|
||||
|
||||
#endif /* PVR_ROGUE_RISCV_H */
|
|
@ -1138,7 +1138,6 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
|
|||
d->bridge_out = bridge;
|
||||
|
||||
/* Create a bridge for this DSI channel */
|
||||
d->bridge.funcs = &mcde_dsi_bridge_funcs;
|
||||
d->bridge.of_node = dev->of_node;
|
||||
drm_bridge_add(&d->bridge);
|
||||
|
||||
|
@ -1174,9 +1173,9 @@ static int mcde_dsi_probe(struct platform_device *pdev)
|
|||
u32 dsi_id;
|
||||
int ret;
|
||||
|
||||
d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
d = devm_drm_bridge_alloc(dev, struct mcde_dsi, bridge, &mcde_dsi_bridge_funcs);
|
||||
if (IS_ERR(d))
|
||||
return PTR_ERR(d);
|
||||
d->dev = dev;
|
||||
platform_set_drvdata(pdev, d);
|
||||
|
||||
|
|
|
@ -296,14 +296,15 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
|
|||
struct msm_dp_bridge *msm_dp_bridge;
|
||||
struct drm_bridge *bridge;
|
||||
|
||||
msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL);
|
||||
if (!msm_dp_bridge)
|
||||
return -ENOMEM;
|
||||
msm_dp_bridge = devm_drm_bridge_alloc(dev->dev, struct msm_dp_bridge, bridge,
|
||||
msm_dp_display->is_edp ? &msm_edp_bridge_ops :
|
||||
&msm_dp_bridge_ops);
|
||||
if (IS_ERR(msm_dp_bridge))
|
||||
return PTR_ERR(msm_dp_bridge);
|
||||
|
||||
msm_dp_bridge->msm_dp_display = msm_dp_display;
|
||||
|
||||
bridge = &msm_dp_bridge->bridge;
|
||||
bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops;
|
||||
bridge->type = msm_dp_display->connector_type;
|
||||
bridge->ycbcr_420_allowed = yuv_supported;
|
||||
|
||||
|
|
|
@ -462,15 +462,14 @@ int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi,
|
|||
struct drm_connector *connector;
|
||||
int ret;
|
||||
|
||||
dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
|
||||
sizeof(*dsi_bridge), GFP_KERNEL);
|
||||
if (!dsi_bridge)
|
||||
return -ENOMEM;
|
||||
dsi_bridge = devm_drm_bridge_alloc(msm_dsi->dev->dev, struct dsi_bridge, base,
|
||||
&dsi_mgr_bridge_funcs);
|
||||
if (IS_ERR(dsi_bridge))
|
||||
return PTR_ERR(dsi_bridge);
|
||||
|
||||
dsi_bridge->id = msm_dsi->id;
|
||||
|
||||
bridge = &dsi_bridge->base;
|
||||
bridge->funcs = &dsi_mgr_bridge_funcs;
|
||||
|
||||
ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge);
|
||||
if (ret)
|
||||
|
|
|
@ -498,16 +498,15 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
|
|||
struct hdmi_bridge *hdmi_bridge;
|
||||
int ret;
|
||||
|
||||
hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
|
||||
sizeof(*hdmi_bridge), GFP_KERNEL);
|
||||
if (!hdmi_bridge)
|
||||
return -ENOMEM;
|
||||
hdmi_bridge = devm_drm_bridge_alloc(hdmi->dev->dev, struct hdmi_bridge, base,
|
||||
&msm_hdmi_bridge_funcs);
|
||||
if (IS_ERR(hdmi_bridge))
|
||||
return PTR_ERR(hdmi_bridge);
|
||||
|
||||
hdmi_bridge->hdmi = hdmi;
|
||||
INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
|
||||
|
||||
bridge = &hdmi_bridge->base;
|
||||
bridge->funcs = &msm_hdmi_bridge_funcs;
|
||||
bridge->ddc = hdmi->i2c;
|
||||
bridge->type = DRM_MODE_CONNECTOR_HDMIA;
|
||||
bridge->vendor = "Qualcomm";
|
||||
|
|
|
@ -671,7 +671,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
|
|||
ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
|
||||
break;
|
||||
case MSM_INFO_GET_FLAGS:
|
||||
if (obj->import_attach) {
|
||||
if (drm_gem_is_imported(obj)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -735,7 +735,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
|
|||
|
||||
msm_gem_assert_locked(obj);
|
||||
|
||||
if (obj->import_attach)
|
||||
if (drm_gem_is_imported(obj))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
pages = msm_gem_get_pages_locked(obj, madv);
|
||||
|
@ -1074,7 +1074,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
|
|||
|
||||
put_iova_spaces(obj, true);
|
||||
|
||||
if (obj->import_attach) {
|
||||
if (drm_gem_is_imported(obj)) {
|
||||
GEM_WARN_ON(msm_obj->vaddr);
|
||||
|
||||
/* Don't drop the pages for imported dmabuf, as they are not
|
||||
|
|
|
@ -224,7 +224,7 @@ msm_gem_assert_locked(struct drm_gem_object *obj)
|
|||
/* imported/exported objects are not purgeable: */
|
||||
static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
return msm_obj->base.import_attach || msm_obj->pin_count;
|
||||
return drm_gem_is_imported(&msm_obj->base) || msm_obj->pin_count;
|
||||
}
|
||||
|
||||
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
|
||||
|
|
|
@ -50,7 +50,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
|
|||
struct page **pages;
|
||||
int ret = 0;
|
||||
|
||||
if (obj->import_attach)
|
||||
if (drm_gem_is_imported(obj))
|
||||
return 0;
|
||||
|
||||
pages = msm_gem_pin_pages_locked(obj);
|
||||
|
@ -62,7 +62,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
|
|||
|
||||
void msm_gem_prime_unpin(struct drm_gem_object *obj)
|
||||
{
|
||||
if (obj->import_attach)
|
||||
if (drm_gem_is_imported(obj))
|
||||
return;
|
||||
|
||||
msm_gem_unpin_pages_locked(obj);
|
||||
|
|
|
@ -776,7 +776,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
|
|||
const u8 rekey = 56; /* binary driver, and tegra, constant */
|
||||
u32 max_ac_packet;
|
||||
DEFINE_RAW_FLEX(struct nvif_outp_infoframe_v0, args, data, 17);
|
||||
const u8 data_len = __struct_size(args) - sizeof(*args);
|
||||
const u8 data_len = __member_size(args->data);
|
||||
int ret, size;
|
||||
|
||||
max_ac_packet = mode->htotal - mode->hdisplay;
|
||||
|
|
|
@ -270,10 +270,7 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
|
|||
{ NV03_CHANNEL_DMA , 0 },
|
||||
{}
|
||||
};
|
||||
struct {
|
||||
struct nvif_chan_v0 chan;
|
||||
char name[TASK_COMM_LEN+16];
|
||||
} args;
|
||||
DEFINE_RAW_FLEX(struct nvif_chan_v0, args, name, TASK_COMM_LEN + 16);
|
||||
struct nvif_device *device = &cli->device;
|
||||
struct nouveau_channel *chan;
|
||||
const u64 plength = 0x10000;
|
||||
|
@ -298,28 +295,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
|
|||
return ret;
|
||||
|
||||
/* create channel object */
|
||||
args.chan.version = 0;
|
||||
args.chan.namelen = sizeof(args.name);
|
||||
args.chan.runlist = __ffs64(runm);
|
||||
args.chan.runq = 0;
|
||||
args.chan.priv = priv;
|
||||
args.chan.devm = BIT(0);
|
||||
args->version = 0;
|
||||
args->namelen = __member_size(args->name);
|
||||
args->runlist = __ffs64(runm);
|
||||
args->runq = 0;
|
||||
args->priv = priv;
|
||||
args->devm = BIT(0);
|
||||
if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO) {
|
||||
args.chan.vmm = 0;
|
||||
args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
|
||||
args.chan.offset = chan->push.addr;
|
||||
args.chan.length = 0;
|
||||
args->vmm = 0;
|
||||
args->ctxdma = nvif_handle(&chan->push.ctxdma);
|
||||
args->offset = chan->push.addr;
|
||||
args->length = 0;
|
||||
} else {
|
||||
args.chan.vmm = nvif_handle(&chan->vmm->vmm.object);
|
||||
args->vmm = nvif_handle(&chan->vmm->vmm.object);
|
||||
if (hosts[cid].oclass < FERMI_CHANNEL_GPFIFO)
|
||||
args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
|
||||
args->ctxdma = nvif_handle(&chan->push.ctxdma);
|
||||
else
|
||||
args.chan.ctxdma = 0;
|
||||
args.chan.offset = ioffset + chan->push.addr;
|
||||
args.chan.length = ilength;
|
||||
args->ctxdma = 0;
|
||||
args->offset = ioffset + chan->push.addr;
|
||||
args->length = ilength;
|
||||
}
|
||||
args.chan.huserd = 0;
|
||||
args.chan.ouserd = 0;
|
||||
args->huserd = 0;
|
||||
args->ouserd = 0;
|
||||
|
||||
/* allocate userd */
|
||||
if (hosts[cid].oclass >= VOLTA_CHANNEL_GPFIFO_A) {
|
||||
|
@ -329,27 +326,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
args.chan.huserd = nvif_handle(&chan->mem_userd.object);
|
||||
args.chan.ouserd = 0;
|
||||
args->huserd = nvif_handle(&chan->mem_userd.object);
|
||||
args->ouserd = 0;
|
||||
|
||||
chan->userd = &chan->mem_userd.object;
|
||||
} else {
|
||||
chan->userd = &chan->user;
|
||||
}
|
||||
|
||||
snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current));
|
||||
snprintf(args->name, __member_size(args->name), "%s[%d]",
|
||||
current->comm, task_pid_nr(current));
|
||||
|
||||
ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
|
||||
&args, sizeof(args), &chan->user);
|
||||
args, __struct_size(args), &chan->user);
|
||||
if (ret) {
|
||||
nouveau_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
chan->runlist = args.chan.runlist;
|
||||
chan->chid = args.chan.chid;
|
||||
chan->inst = args.chan.inst;
|
||||
chan->token = args.chan.token;
|
||||
chan->runlist = args->runlist;
|
||||
chan->chid = args->chid;
|
||||
chan->inst = args->inst;
|
||||
chan->token = args->token;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -367,17 +365,17 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
|
|||
return ret;
|
||||
|
||||
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
|
||||
struct {
|
||||
struct nvif_event_v0 base;
|
||||
struct nvif_chan_event_v0 host;
|
||||
} args;
|
||||
DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
|
||||
sizeof(struct nvif_chan_event_v0));
|
||||
struct nvif_chan_event_v0 *host =
|
||||
(struct nvif_chan_event_v0 *)args->data;
|
||||
|
||||
args.host.version = 0;
|
||||
args.host.type = NVIF_CHAN_EVENT_V0_KILLED;
|
||||
host->version = 0;
|
||||
host->type = NVIF_CHAN_EVENT_V0_KILLED;
|
||||
|
||||
ret = nvif_event_ctor(&chan->user, "abi16ChanKilled", chan->chid,
|
||||
nouveau_channel_killed, false,
|
||||
&args.base, sizeof(args), &chan->kill);
|
||||
args, __struct_size(args), &chan->kill);
|
||||
if (ret == 0)
|
||||
ret = nvif_event_allow(&chan->kill);
|
||||
if (ret) {
|
||||
|
@ -520,46 +518,44 @@ nouveau_channels_fini(struct nouveau_drm *drm)
|
|||
int
|
||||
nouveau_channels_init(struct nouveau_drm *drm)
|
||||
{
|
||||
struct {
|
||||
struct nv_device_info_v1 m;
|
||||
struct {
|
||||
struct nv_device_info_v1_data channels;
|
||||
struct nv_device_info_v1_data runlists;
|
||||
} v;
|
||||
} args = {
|
||||
.m.version = 1,
|
||||
.m.count = sizeof(args.v) / sizeof(args.v.channels),
|
||||
.v.channels.mthd = NV_DEVICE_HOST_CHANNELS,
|
||||
.v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS,
|
||||
};
|
||||
DEFINE_RAW_FLEX(struct nv_device_info_v1, args, data, 2);
|
||||
struct nv_device_info_v1_data *channels = &args->data[0];
|
||||
struct nv_device_info_v1_data *runlists = &args->data[1];
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int ret, i;
|
||||
|
||||
ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
|
||||
args->version = 1;
|
||||
args->count = __member_size(args->data) / sizeof(*args->data);
|
||||
channels->mthd = NV_DEVICE_HOST_CHANNELS;
|
||||
runlists->mthd = NV_DEVICE_HOST_RUNLISTS;
|
||||
|
||||
ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args,
|
||||
__struct_size(args));
|
||||
if (ret ||
|
||||
args.v.runlists.mthd == NV_DEVICE_INFO_INVALID || !args.v.runlists.data ||
|
||||
args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
|
||||
runlists->mthd == NV_DEVICE_INFO_INVALID || !runlists->data ||
|
||||
channels->mthd == NV_DEVICE_INFO_INVALID)
|
||||
return -ENODEV;
|
||||
|
||||
drm->chan_nr = drm->chan_total = args.v.channels.data;
|
||||
drm->runl_nr = fls64(args.v.runlists.data);
|
||||
drm->chan_nr = drm->chan_total = channels->data;
|
||||
drm->runl_nr = fls64(runlists->data);
|
||||
drm->runl = kcalloc(drm->runl_nr, sizeof(*drm->runl), GFP_KERNEL);
|
||||
if (!drm->runl)
|
||||
return -ENOMEM;
|
||||
|
||||
if (drm->chan_nr == 0) {
|
||||
for (i = 0; i < drm->runl_nr; i++) {
|
||||
if (!(args.v.runlists.data & BIT(i)))
|
||||
if (!(runlists->data & BIT(i)))
|
||||
continue;
|
||||
|
||||
args.v.channels.mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
|
||||
args.v.channels.data = i;
|
||||
channels->mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
|
||||
channels->data = i;
|
||||
|
||||
ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
|
||||
if (ret || args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
|
||||
ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args,
|
||||
__struct_size(args));
|
||||
if (ret || channels->mthd == NV_DEVICE_INFO_INVALID)
|
||||
return -ENODEV;
|
||||
|
||||
drm->runl[i].chan_nr = args.v.channels.data;
|
||||
drm->runl[i].chan_nr = channels->data;
|
||||
drm->runl[i].chan_id_base = drm->chan_total;
|
||||
drm->runl[i].context_base = dma_fence_context_alloc(drm->runl[i].chan_nr);
|
||||
|
||||
|
|
|
@ -198,7 +198,7 @@ nvif_outp_hda_eld(struct nvif_outp *outp, int head, void *data, u32 size)
|
|||
DEFINE_RAW_FLEX(struct nvif_outp_hda_eld_v0, mthd, data, 128);
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(size > (__struct_size(mthd) - sizeof(*mthd))))
|
||||
if (WARN_ON(size > __member_size(mthd->data)))
|
||||
return -EINVAL;
|
||||
|
||||
mthd->version = 0;
|
||||
|
|
|
@ -154,6 +154,17 @@ config DRM_PANEL_LVDS
|
|||
handling of power supplies or control signals. It implements automatic
|
||||
backlight handling if the panel is attached to a backlight controller.
|
||||
|
||||
config DRM_PANEL_HIMAX_HX8279
|
||||
tristate "Himax HX8279-based panels"
|
||||
depends on OF
|
||||
depends on DRM_MIPI_DSI
|
||||
depends on BACKLIGHT_CLASS_DEVICE
|
||||
help
|
||||
Say Y if you want to enable support for panels based on the
|
||||
Himax HX8279 controller, such as the Startek KD070FHFID078
|
||||
7.0" 1200x1920 IPS LCD panel that uses a MIPI-DSI interface
|
||||
and others.
|
||||
|
||||
config DRM_PANEL_HIMAX_HX83102
|
||||
tristate "Himax HX83102-based panels"
|
||||
depends on OF
|
||||
|
@ -996,6 +1007,15 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
|
|||
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
|
||||
Video Mode panel
|
||||
|
||||
config DRM_PANEL_VISIONOX_G2647FB105
|
||||
tristate "Visionox G2647FB105"
|
||||
depends on OF
|
||||
depends on DRM_MIPI_DSI
|
||||
depends on BACKLIGHT_CLASS_DEVICE
|
||||
help
|
||||
Say Y here if you want to enable support for the Visionox
|
||||
G2647FB105 (2340x1080@60Hz) AMOLED DSI cmd mode panel.
|
||||
|
||||
config DRM_PANEL_VISIONOX_R66451
|
||||
tristate "Visionox R66451"
|
||||
depends on OF
|
||||
|
|
|
@ -16,6 +16,7 @@ obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o
|
|||
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
|
||||
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
|
||||
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
|
||||
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8279) += panel-himax-hx8279.o
|
||||
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o
|
||||
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
|
||||
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
|
||||
|
@ -101,6 +102,7 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
|
|||
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
|
||||
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
|
||||
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
|
||||
obj-$(CONFIG_DRM_PANEL_VISIONOX_G2647FB105) += panel-visionox-g2647fb105.o
|
||||
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
|
||||
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM692E5) += panel-visionox-rm692e5.o
|
||||
obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o
|
||||
|
|
|
@ -55,77 +55,56 @@ static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe)
|
|||
static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe)
|
||||
{
|
||||
struct mipi_dsi_device *dsi = boe->dsi;
|
||||
struct device *dev = &dsi->dev;
|
||||
int ret;
|
||||
struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
|
||||
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c);
|
||||
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10);
|
||||
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xf8,
|
||||
0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x00, 0x4c);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x10);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8,
|
||||
0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
|
||||
|
||||
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
msleep(30);
|
||||
mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
|
||||
mipi_dsi_msleep(&dsi_ctx, 30);
|
||||
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xc0,
|
||||
0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
|
||||
0x2a, 0x31, 0x39, 0x20, 0x09);
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
|
||||
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
|
||||
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
|
||||
0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
|
||||
0x5c, 0x5c, 0x5c);
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0,
|
||||
0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
|
||||
0x2a, 0x31, 0x39, 0x20, 0x09);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
|
||||
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
|
||||
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
|
||||
0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
|
||||
0x5c, 0x5c, 0x5c);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
|
||||
|
||||
msleep(30);
|
||||
mipi_dsi_msleep(&dsi_ctx, 30);
|
||||
|
||||
ret = mipi_dsi_dcs_set_display_on(dsi);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to set display on: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
msleep(50);
|
||||
mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
|
||||
mipi_dsi_msleep(&dsi_ctx, 50);
|
||||
|
||||
return 0;
|
||||
return dsi_ctx.accum_err;
|
||||
}
|
||||
|
||||
static int boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe)
|
||||
static void boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe)
|
||||
{
|
||||
struct mipi_dsi_device *dsi = boe->dsi;
|
||||
struct device *dev = &dsi->dev;
|
||||
int ret;
|
||||
struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
|
||||
|
||||
/* OFF commands sent in HS mode */
|
||||
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
|
||||
ret = mipi_dsi_dcs_set_display_off(dsi);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to set display off: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
msleep(20);
|
||||
mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
|
||||
mipi_dsi_msleep(&dsi_ctx, 20);
|
||||
|
||||
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
usleep_range(1000, 2000);
|
||||
mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
|
||||
mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
|
||||
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
|
||||
{
|
||||
struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
|
||||
struct device *dev = &boe->dsi->dev;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -157,13 +136,14 @@ static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
|
|||
|
||||
ret = boe_bf060y8m_aj0_on(boe);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to initialize panel: %d\n", ret);
|
||||
gpiod_set_value_cansleep(boe->reset_gpio, 1);
|
||||
return ret;
|
||||
goto err_on;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_on:
|
||||
regulator_disable(boe->vregs[BF060Y8M_VREG_VCI].consumer);
|
||||
err_vci:
|
||||
regulator_disable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer);
|
||||
err_vddio:
|
||||
|
@ -178,15 +158,11 @@ err_elvss:
|
|||
static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel)
|
||||
{
|
||||
struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
|
||||
struct device *dev = &boe->dsi->dev;
|
||||
int ret;
|
||||
|
||||
ret = boe_bf060y8m_aj0_off(boe);
|
||||
if (ret < 0)
|
||||
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
|
||||
boe_bf060y8m_aj0_off(boe);
|
||||
|
||||
gpiod_set_value_cansleep(boe->reset_gpio, 1);
|
||||
ret = regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
|
||||
regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -234,13 +210,11 @@ static int boe_bf060y8m_aj0_bl_update_status(struct backlight_device *bl)
|
|||
{
|
||||
struct mipi_dsi_device *dsi = bl_get_data(bl);
|
||||
u16 brightness = backlight_get_brightness(bl);
|
||||
int ret;
|
||||
struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
|
||||
|
||||
ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness);
|
||||
|
||||
return 0;
|
||||
return dsi_ctx.accum_err;
|
||||
}
|
||||
|
||||
static int boe_bf060y8m_aj0_bl_get_brightness(struct backlight_device *bl)
|
||||
|
|
1296
drivers/gpu/drm/panel/panel-himax-hx8279.c
Normal file
1296
drivers/gpu/drm/panel/panel-himax-hx8279.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -413,15 +413,10 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
|
|||
static void panel_nv3051d_shutdown(struct mipi_dsi_device *dsi)
|
||||
{
|
||||
struct panel_nv3051d *ctx = mipi_dsi_get_drvdata(dsi);
|
||||
int ret;
|
||||
|
||||
ret = drm_panel_unprepare(&ctx->panel);
|
||||
if (ret < 0)
|
||||
dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
|
||||
drm_panel_unprepare(&ctx->panel);
|
||||
|
||||
ret = drm_panel_disable(&ctx->panel);
|
||||
if (ret < 0)
|
||||
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
|
||||
drm_panel_disable(&ctx->panel);
|
||||
}
|
||||
|
||||
static void panel_nv3051d_remove(struct mipi_dsi_device *dsi)
|
||||
|
|
|
@ -22,7 +22,6 @@ struct sofef00_panel {
|
|||
struct mipi_dsi_device *dsi;
|
||||
struct regulator *supply;
|
||||
struct gpio_desc *reset_gpio;
|
||||
const struct drm_display_mode *mode;
|
||||
};
|
||||
|
||||
static inline
|
||||
|
@ -44,66 +43,44 @@ static void sofef00_panel_reset(struct sofef00_panel *ctx)
|
|||
static int sofef00_panel_on(struct sofef00_panel *ctx)
|
||||
{
|
||||
struct mipi_dsi_device *dsi = ctx->dsi;
|
||||
struct device *dev = &dsi->dev;
|
||||
int ret;
|
||||
struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
|
||||
|
||||
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
|
||||
|
||||
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
usleep_range(10000, 11000);
|
||||
mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
|
||||
mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
|
||||
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
|
||||
|
||||
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to set tear on: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
|
||||
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x07);
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x12);
|
||||
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
|
||||
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
|
||||
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x12);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
|
||||
|
||||
ret = mipi_dsi_dcs_set_display_on(dsi);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to set display on: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
|
||||
|
||||
return 0;
|
||||
return dsi_ctx.accum_err;
|
||||
}
|
||||
|
||||
static int sofef00_panel_off(struct sofef00_panel *ctx)
|
||||
{
|
||||
struct mipi_dsi_device *dsi = ctx->dsi;
|
||||
struct device *dev = &dsi->dev;
|
||||
int ret;
|
||||
struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
|
||||
|
||||
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
|
||||
|
||||
ret = mipi_dsi_dcs_set_display_off(dsi);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to set display off: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
msleep(40);
|
||||
mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
|
||||
mipi_dsi_msleep(&dsi_ctx, 40);
|
||||
|
||||
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
msleep(160);
|
||||
mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
|
||||
mipi_dsi_msleep(&dsi_ctx, 160);
|
||||
|
||||
return 0;
|
||||
return dsi_ctx.accum_err;
|
||||
}
|
||||
|
||||
static int sofef00_panel_prepare(struct drm_panel *panel)
|
||||
|
@ -122,7 +99,6 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
|
|||
|
||||
ret = sofef00_panel_on(ctx);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to initialize panel: %d\n", ret);
|
||||
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
|
||||
return ret;
|
||||
}
|
||||
|
@ -133,13 +109,8 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
|
|||
static int sofef00_panel_unprepare(struct drm_panel *panel)
|
||||
{
|
||||
struct sofef00_panel *ctx = to_sofef00_panel(panel);
|
||||
struct device *dev = &ctx->dsi->dev;
|
||||
int ret;
|
||||
|
||||
ret = sofef00_panel_off(ctx);
|
||||
if (ret < 0)
|
||||
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
|
||||
|
||||
sofef00_panel_off(ctx);
|
||||
regulator_disable(ctx->supply);
|
||||
|
||||
return 0;
|
||||
|
@ -159,26 +130,11 @@ static const struct drm_display_mode enchilada_panel_mode = {
|
|||
.height_mm = 145,
|
||||
};
|
||||
|
||||
static const struct drm_display_mode fajita_panel_mode = {
|
||||
.clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000,
|
||||
.hdisplay = 1080,
|
||||
.hsync_start = 1080 + 72,
|
||||
.hsync_end = 1080 + 72 + 16,
|
||||
.htotal = 1080 + 72 + 16 + 36,
|
||||
.vdisplay = 2340,
|
||||
.vsync_start = 2340 + 32,
|
||||
.vsync_end = 2340 + 32 + 4,
|
||||
.vtotal = 2340 + 32 + 4 + 18,
|
||||
.width_mm = 68,
|
||||
.height_mm = 145,
|
||||
};
|
||||
|
||||
static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector)
|
||||
{
|
||||
struct drm_display_mode *mode;
|
||||
struct sofef00_panel *ctx = to_sofef00_panel(panel);
|
||||
|
||||
mode = drm_mode_duplicate(connector->dev, ctx->mode);
|
||||
mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode);
|
||||
if (!mode)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -239,13 +195,6 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
|
|||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->mode = of_device_get_match_data(dev);
|
||||
|
||||
if (!ctx->mode) {
|
||||
dev_err(dev, "Missing device mode\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ctx->supply = devm_regulator_get(dev, "vddio");
|
||||
if (IS_ERR(ctx->supply))
|
||||
return dev_err_probe(dev, PTR_ERR(ctx->supply),
|
||||
|
@ -295,14 +244,7 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi)
|
|||
}
|
||||
|
||||
static const struct of_device_id sofef00_panel_of_match[] = {
|
||||
{ // OnePlus 6 / enchilada
|
||||
.compatible = "samsung,sofef00",
|
||||
.data = &enchilada_panel_mode,
|
||||
},
|
||||
{ // OnePlus 6T / fajita
|
||||
.compatible = "samsung,s6e3fc2x01",
|
||||
.data = &fajita_panel_mode,
|
||||
},
|
||||
{ .compatible = "samsung,sofef00" },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sofef00_panel_of_match);
|
||||
|
|
|
@ -3526,6 +3526,30 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
|
|||
.connector_type = DRM_MODE_CONNECTOR_DPI,
|
||||
};
|
||||
|
||||
static const struct drm_display_mode nlt_nl13676bc25_03f_mode = {
|
||||
.clock = 75400,
|
||||
.hdisplay = 1366,
|
||||
.hsync_start = 1366 + 14,
|
||||
.hsync_end = 1366 + 14 + 56,
|
||||
.htotal = 1366 + 14 + 56 + 64,
|
||||
.vdisplay = 768,
|
||||
.vsync_start = 768 + 1,
|
||||
.vsync_end = 768 + 1 + 3,
|
||||
.vtotal = 768 + 1 + 3 + 22,
|
||||
};
|
||||
|
||||
static const struct panel_desc nlt_nl13676bc25_03f = {
|
||||
.modes = &nlt_nl13676bc25_03f_mode,
|
||||
.num_modes = 1,
|
||||
.bpc = 8,
|
||||
.size = {
|
||||
.width = 363,
|
||||
.height = 215,
|
||||
},
|
||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
|
||||
.connector_type = DRM_MODE_CONNECTOR_LVDS,
|
||||
};
|
||||
|
||||
static const struct display_timing nlt_nl192108ac18_02d_timing = {
|
||||
.pixelclock = { 130000000, 148350000, 163000000 },
|
||||
.hactive = { 1920, 1920, 1920 },
|
||||
|
@ -4418,10 +4442,10 @@ static const struct panel_desc tianma_tm070jvhg33 = {
|
|||
};
|
||||
|
||||
/*
|
||||
* The datasheet computes total blanking as back porch + front porch, not
|
||||
* including sync pulse width. This is for both H and V. To make the total
|
||||
* blanking and period correct, subtract the pulse width from the front
|
||||
* porch.
|
||||
* The TM070JDHG34-00 datasheet computes total blanking as back porch +
|
||||
* front porch, not including sync pulse width. This is for both H and
|
||||
* V. To make the total blanking and period correct, subtract the pulse
|
||||
* width from the front porch.
|
||||
*
|
||||
* This works well for the Min and Typ values, but for Max values the sync
|
||||
* pulse width is higher than back porch + front porch, so work around that
|
||||
|
@ -4430,6 +4454,10 @@ static const struct panel_desc tianma_tm070jvhg33 = {
|
|||
*
|
||||
* Exact datasheet values are added as a comment where they differ from the
|
||||
* ones implemented for the above reason.
|
||||
*
|
||||
* The P0700WXF1MBAA datasheet is even less detailed, only listing period
|
||||
* and total blanking time, however the resulting values are the same as
|
||||
* the TM070JDHG34-00.
|
||||
*/
|
||||
static const struct display_timing tianma_tm070jdhg34_00_timing = {
|
||||
.pixelclock = { 68400000, 71900000, 78100000 },
|
||||
|
@ -4452,6 +4480,30 @@ static const struct panel_desc tianma_tm070jdhg34_00 = {
|
|||
.width = 150, /* 149.76 */
|
||||
.height = 94, /* 93.60 */
|
||||
},
|
||||
.delay = {
|
||||
.prepare = 15, /* Tp1 */
|
||||
.enable = 150, /* Tp2 */
|
||||
.disable = 150, /* Tp4 */
|
||||
.unprepare = 120, /* Tp3 */
|
||||
},
|
||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
|
||||
.connector_type = DRM_MODE_CONNECTOR_LVDS,
|
||||
};
|
||||
|
||||
static const struct panel_desc tianma_p0700wxf1mbaa = {
|
||||
.timings = &tianma_tm070jdhg34_00_timing,
|
||||
.num_timings = 1,
|
||||
.bpc = 8,
|
||||
.size = {
|
||||
.width = 150, /* 149.76 */
|
||||
.height = 94, /* 93.60 */
|
||||
},
|
||||
.delay = {
|
||||
.prepare = 18, /* Tr + Tp1 */
|
||||
.enable = 152, /* Tp2 + Tp5 */
|
||||
.disable = 152, /* Tp6 + Tp4 */
|
||||
.unprepare = 120, /* Tp3 */
|
||||
},
|
||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
|
||||
.connector_type = DRM_MODE_CONNECTOR_LVDS,
|
||||
};
|
||||
|
@ -5145,6 +5197,9 @@ static const struct of_device_id platform_of_match[] = {
|
|||
}, {
|
||||
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
|
||||
.data = &newhaven_nhd_43_480272ef_atxl,
|
||||
}, {
|
||||
.compatible = "nlt,nl13676bc25-03f",
|
||||
.data = &nlt_nl13676bc25_03f,
|
||||
}, {
|
||||
.compatible = "nlt,nl192108ac18-02d",
|
||||
.data = &nlt_nl192108ac18_02d,
|
||||
|
@ -5241,6 +5296,9 @@ static const struct of_device_id platform_of_match[] = {
|
|||
}, {
|
||||
.compatible = "tfc,s9700rtwv43tr-01b",
|
||||
.data = &tfc_s9700rtwv43tr_01b,
|
||||
}, {
|
||||
.compatible = "tianma,p0700wxf1mbaa",
|
||||
.data = &tianma_p0700wxf1mbaa,
|
||||
}, {
|
||||
.compatible = "tianma,tm070jdhg30",
|
||||
.data = &tianma_tm070jdhg30,
|
||||
|
|
280
drivers/gpu/drm/panel/panel-visionox-g2647fb105.c
Normal file
280
drivers/gpu/drm/panel/panel-visionox-g2647fb105.c
Normal file
|
@ -0,0 +1,280 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
|
||||
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2025, Alexander Baransky <sanyapilot496@gmail.com>
|
||||
*/
|
||||
|
||||
#include <linux/backlight.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#include <drm/drm_mipi_dsi.h>
|
||||
#include <drm/drm_modes.h>
|
||||
#include <drm/drm_panel.h>
|
||||
|
||||
struct visionox_g2647fb105 {
|
||||
struct drm_panel panel;
|
||||
struct mipi_dsi_device *dsi;
|
||||
struct gpio_desc *reset_gpio;
|
||||
struct regulator_bulk_data *supplies;
|
||||
};
|
||||
|
||||
static const struct regulator_bulk_data visionox_g2647fb105_supplies[] = {
|
||||
{ .supply = "vdd3p3" },
|
||||
{ .supply = "vddio" },
|
||||
{ .supply = "vsn" },
|
||||
{ .supply = "vsp" },
|
||||
};
|
||||
|
||||
static inline
|
||||
struct visionox_g2647fb105 *to_visionox_g2647fb105(struct drm_panel *panel)
|
||||
{
|
||||
return container_of(panel, struct visionox_g2647fb105, panel);
|
||||
}
|
||||
|
||||
static void visionox_g2647fb105_reset(struct visionox_g2647fb105 *ctx)
|
||||
{
|
||||
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
|
||||
usleep_range(1000, 2000);
|
||||
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
|
||||
usleep_range(10000, 11000);
|
||||
}
|
||||
|
||||
static int visionox_g2647fb105_on(struct visionox_g2647fb105 *ctx)
|
||||
{
|
||||
struct mipi_dsi_device *dsi = ctx->dsi;
|
||||
struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
|
||||
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x04);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x32);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbe, 0x17);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbf, 0xbb);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, 0xdd);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0xff);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd0);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x24);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x03);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x08);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
|
||||
|
||||
mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
|
||||
mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000);
|
||||
mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
|
||||
mipi_dsi_msleep(&dsi_ctx, 100);
|
||||
|
||||
mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
|
||||
|
||||
return dsi_ctx.accum_err;
|
||||
}
|
||||
|
||||
static int visionox_g2647fb105_off(struct visionox_g2647fb105 *ctx)
|
||||
{
|
||||
struct mipi_dsi_device *dsi = ctx->dsi;
|
||||
struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
|
||||
|
||||
mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
|
||||
mipi_dsi_msleep(&dsi_ctx, 50);
|
||||
|
||||
mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
|
||||
mipi_dsi_msleep(&dsi_ctx, 20);
|
||||
|
||||
return dsi_ctx.accum_err;
|
||||
}
|
||||
|
||||
static int visionox_g2647fb105_prepare(struct drm_panel *panel)
|
||||
{
|
||||
struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel);
|
||||
struct device *dev = &ctx->dsi->dev;
|
||||
int ret;
|
||||
|
||||
ret = regulator_bulk_enable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to enable regulators: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
visionox_g2647fb105_reset(ctx);
|
||||
|
||||
ret = visionox_g2647fb105_on(ctx);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to initialize panel: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int visionox_g2647fb105_unprepare(struct drm_panel *panel)
|
||||
{
|
||||
struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel);
|
||||
struct device *dev = &ctx->dsi->dev;
|
||||
int ret;
|
||||
|
||||
ret = visionox_g2647fb105_off(ctx);
|
||||
if (ret < 0)
|
||||
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
|
||||
|
||||
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
|
||||
regulator_bulk_disable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_display_mode visionox_g2647fb105_mode = {
|
||||
.clock = (1080 + 28 + 4 + 36) * (2340 + 8 + 4 + 4) * 60 / 1000,
|
||||
.hdisplay = 1080,
|
||||
.hsync_start = 1080 + 28,
|
||||
.hsync_end = 1080 + 28 + 4,
|
||||
.htotal = 1080 + 28 + 4 + 36,
|
||||
.vdisplay = 2340,
|
||||
.vsync_start = 2340 + 8,
|
||||
.vsync_end = 2340 + 8 + 4,
|
||||
.vtotal = 2340 + 8 + 4 + 4,
|
||||
.width_mm = 69,
|
||||
.height_mm = 149,
|
||||
};
|
||||
|
||||
static int visionox_g2647fb105_get_modes(struct drm_panel *panel,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
mode = drm_mode_duplicate(connector->dev, &visionox_g2647fb105_mode);
|
||||
if (!mode)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_mode_set_name(mode);
|
||||
|
||||
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
|
||||
connector->display_info.width_mm = mode->width_mm;
|
||||
connector->display_info.height_mm = mode->height_mm;
|
||||
drm_mode_probed_add(connector, mode);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct drm_panel_funcs visionox_g2647fb105_panel_funcs = {
|
||||
.prepare = visionox_g2647fb105_prepare,
|
||||
.unprepare = visionox_g2647fb105_unprepare,
|
||||
.get_modes = visionox_g2647fb105_get_modes,
|
||||
};
|
||||
|
||||
static int visionox_g2647fb105_bl_update_status(struct backlight_device *bl)
|
||||
{
|
||||
struct mipi_dsi_device *dsi = bl_get_data(bl);
|
||||
u16 brightness = backlight_get_brightness(bl);
|
||||
int ret;
|
||||
|
||||
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
|
||||
|
||||
ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct backlight_ops visionox_g2647fb105_bl_ops = {
|
||||
.update_status = visionox_g2647fb105_bl_update_status,
|
||||
};
|
||||
|
||||
static struct backlight_device *
|
||||
visionox_g2647fb105_create_backlight(struct mipi_dsi_device *dsi)
|
||||
{
|
||||
struct device *dev = &dsi->dev;
|
||||
const struct backlight_properties props = {
|
||||
.type = BACKLIGHT_RAW,
|
||||
.brightness = 1023,
|
||||
.max_brightness = 2047,
|
||||
};
|
||||
|
||||
return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
|
||||
&visionox_g2647fb105_bl_ops, &props);
|
||||
}
|
||||
|
||||
static int visionox_g2647fb105_probe(struct mipi_dsi_device *dsi)
|
||||
{
|
||||
struct device *dev = &dsi->dev;
|
||||
struct visionox_g2647fb105 *ctx;
|
||||
int ret;
|
||||
|
||||
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = devm_regulator_bulk_get_const(dev,
|
||||
ARRAY_SIZE(visionox_g2647fb105_supplies),
|
||||
visionox_g2647fb105_supplies,
|
||||
&ctx->supplies);
|
||||
if (ret < 0)
|
||||
return dev_err_probe(dev, ret, "Failed to get regulators\n");
|
||||
|
||||
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(ctx->reset_gpio))
|
||||
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
|
||||
"Failed to get reset-gpios\n");
|
||||
|
||||
ctx->dsi = dsi;
|
||||
mipi_dsi_set_drvdata(dsi, ctx);
|
||||
|
||||
dsi->lanes = 4;
|
||||
dsi->format = MIPI_DSI_FMT_RGB888;
|
||||
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
|
||||
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
|
||||
|
||||
ctx->panel.prepare_prev_first = true;
|
||||
|
||||
drm_panel_init(&ctx->panel, dev, &visionox_g2647fb105_panel_funcs,
|
||||
DRM_MODE_CONNECTOR_DSI);
|
||||
ctx->panel.prepare_prev_first = true;
|
||||
|
||||
ctx->panel.backlight = visionox_g2647fb105_create_backlight(dsi);
|
||||
if (IS_ERR(ctx->panel.backlight))
|
||||
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
|
||||
"Failed to create backlight\n");
|
||||
|
||||
drm_panel_add(&ctx->panel);
|
||||
|
||||
ret = devm_mipi_dsi_attach(dev, dsi);
|
||||
if (ret < 0) {
|
||||
drm_panel_remove(&ctx->panel);
|
||||
return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void visionox_g2647fb105_remove(struct mipi_dsi_device *dsi)
|
||||
{
|
||||
struct visionox_g2647fb105 *ctx = mipi_dsi_get_drvdata(dsi);
|
||||
drm_panel_remove(&ctx->panel);
|
||||
}
|
||||
|
||||
static const struct of_device_id visionox_g2647fb105_of_match[] = {
|
||||
{ .compatible = "visionox,g2647fb105" },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, visionox_g2647fb105_of_match);
|
||||
|
||||
static struct mipi_dsi_driver visionox_g2647fb105_driver = {
|
||||
.probe = visionox_g2647fb105_probe,
|
||||
.remove = visionox_g2647fb105_remove,
|
||||
.driver = {
|
||||
.name = "panel-visionox-g2647fb105",
|
||||
.of_match_table = visionox_g2647fb105_of_match,
|
||||
},
|
||||
};
|
||||
module_mipi_dsi_driver(visionox_g2647fb105_driver);
|
||||
|
||||
MODULE_AUTHOR("Alexander Baransky <sanyapilot496@gmail.com>");
|
||||
MODULE_DESCRIPTION("DRM driver for Visionox G2647FB105 AMOLED DSI panel");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -209,10 +209,20 @@ int panfrost_device_init(struct panfrost_device *pfdev)
|
|||
|
||||
spin_lock_init(&pfdev->cycle_counter.lock);
|
||||
|
||||
err = panfrost_pm_domain_init(pfdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = panfrost_reset_init(pfdev);
|
||||
if (err) {
|
||||
dev_err(pfdev->dev, "reset init failed %d\n", err);
|
||||
goto out_pm_domain;
|
||||
}
|
||||
|
||||
err = panfrost_clk_init(pfdev);
|
||||
if (err) {
|
||||
dev_err(pfdev->dev, "clk init failed %d\n", err);
|
||||
return err;
|
||||
goto out_reset;
|
||||
}
|
||||
|
||||
err = panfrost_devfreq_init(pfdev);
|
||||
|
@ -229,25 +239,15 @@ int panfrost_device_init(struct panfrost_device *pfdev)
|
|||
goto out_devfreq;
|
||||
}
|
||||
|
||||
err = panfrost_reset_init(pfdev);
|
||||
if (err) {
|
||||
dev_err(pfdev->dev, "reset init failed %d\n", err);
|
||||
goto out_regulator;
|
||||
}
|
||||
|
||||
err = panfrost_pm_domain_init(pfdev);
|
||||
if (err)
|
||||
goto out_reset;
|
||||
|
||||
pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
|
||||
if (IS_ERR(pfdev->iomem)) {
|
||||
err = PTR_ERR(pfdev->iomem);
|
||||
goto out_pm_domain;
|
||||
goto out_regulator;
|
||||
}
|
||||
|
||||
err = panfrost_gpu_init(pfdev);
|
||||
if (err)
|
||||
goto out_pm_domain;
|
||||
goto out_regulator;
|
||||
|
||||
err = panfrost_mmu_init(pfdev);
|
||||
if (err)
|
||||
|
@ -268,16 +268,16 @@ out_mmu:
|
|||
panfrost_mmu_fini(pfdev);
|
||||
out_gpu:
|
||||
panfrost_gpu_fini(pfdev);
|
||||
out_pm_domain:
|
||||
panfrost_pm_domain_fini(pfdev);
|
||||
out_reset:
|
||||
panfrost_reset_fini(pfdev);
|
||||
out_regulator:
|
||||
panfrost_regulator_fini(pfdev);
|
||||
out_devfreq:
|
||||
panfrost_devfreq_fini(pfdev);
|
||||
out_clk:
|
||||
panfrost_clk_fini(pfdev);
|
||||
out_reset:
|
||||
panfrost_reset_fini(pfdev);
|
||||
out_pm_domain:
|
||||
panfrost_pm_domain_fini(pfdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -287,11 +287,11 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
|
|||
panfrost_job_fini(pfdev);
|
||||
panfrost_mmu_fini(pfdev);
|
||||
panfrost_gpu_fini(pfdev);
|
||||
panfrost_pm_domain_fini(pfdev);
|
||||
panfrost_reset_fini(pfdev);
|
||||
panfrost_devfreq_fini(pfdev);
|
||||
panfrost_regulator_fini(pfdev);
|
||||
panfrost_clk_fini(pfdev);
|
||||
panfrost_reset_fini(pfdev);
|
||||
panfrost_pm_domain_fini(pfdev);
|
||||
}
|
||||
|
||||
#define PANFROST_EXCEPTION(id) \
|
||||
|
@ -406,11 +406,36 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
|
|||
static int panfrost_device_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct panfrost_device *pfdev = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) {
|
||||
ret = reset_control_deassert(pfdev->rstc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = clk_enable(pfdev->clock);
|
||||
if (ret)
|
||||
goto err_clk;
|
||||
|
||||
if (pfdev->bus_clock) {
|
||||
ret = clk_enable(pfdev->bus_clock);
|
||||
if (ret)
|
||||
goto err_bus_clk;
|
||||
}
|
||||
}
|
||||
|
||||
panfrost_device_reset(pfdev);
|
||||
panfrost_devfreq_resume(pfdev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_bus_clk:
|
||||
if (pfdev->comp->pm_features & BIT(GPU_PM_RT))
|
||||
clk_disable(pfdev->clock);
|
||||
err_clk:
|
||||
if (pfdev->comp->pm_features & BIT(GPU_PM_RT))
|
||||
reset_control_assert(pfdev->rstc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int panfrost_device_runtime_suspend(struct device *dev)
|
||||
|
@ -426,6 +451,14 @@ static int panfrost_device_runtime_suspend(struct device *dev)
|
|||
panfrost_gpu_suspend_irq(pfdev);
|
||||
panfrost_gpu_power_off(pfdev);
|
||||
|
||||
if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) {
|
||||
if (pfdev->bus_clock)
|
||||
clk_disable(pfdev->bus_clock);
|
||||
|
||||
clk_disable(pfdev->clock);
|
||||
reset_control_assert(pfdev->rstc);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,10 +36,13 @@ enum panfrost_drv_comp_bits {
|
|||
* enum panfrost_gpu_pm - Supported kernel power management features
|
||||
* @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend
|
||||
* @GPU_PM_VREG_OFF: Allow turning off regulators during system suspend
|
||||
* @GPU_PM_RT: Allow disabling clocks and asserting the reset control during
|
||||
* system runtime suspend
|
||||
*/
|
||||
enum panfrost_gpu_pm {
|
||||
GPU_PM_CLK_DIS,
|
||||
GPU_PM_VREG_OFF,
|
||||
GPU_PM_RT
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -776,6 +776,13 @@ static const struct panfrost_compatible default_data = {
|
|||
.pm_domain_names = NULL,
|
||||
};
|
||||
|
||||
static const struct panfrost_compatible allwinner_h616_data = {
|
||||
.num_supplies = ARRAY_SIZE(default_supplies) - 1,
|
||||
.supply_names = default_supplies,
|
||||
.num_pm_domains = 1,
|
||||
.pm_features = BIT(GPU_PM_RT),
|
||||
};
|
||||
|
||||
static const struct panfrost_compatible amlogic_data = {
|
||||
.num_supplies = ARRAY_SIZE(default_supplies) - 1,
|
||||
.supply_names = default_supplies,
|
||||
|
@ -861,6 +868,7 @@ static const struct of_device_id dt_match[] = {
|
|||
{ .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data },
|
||||
{ .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data },
|
||||
{ .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data },
|
||||
{ .compatible = "allwinner,sun50i-h616-mali", .data = &allwinner_h616_data },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, dt_match);
|
||||
|
|
|
@ -200,7 +200,7 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
|
|||
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
|
||||
enum drm_gem_object_status res = 0;
|
||||
|
||||
if (bo->base.base.import_attach || bo->base.pages)
|
||||
if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
|
||||
res |= DRM_GEM_OBJECT_RESIDENT;
|
||||
|
||||
if (bo->base.madv == PANFROST_MADV_DONTNEED)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue