summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohnnLee <johnnlee@google.com>2021-04-08 17:48:36 +0800
committerJohnnLee <johnnlee@google.com>2021-04-08 17:48:50 +0800
commit4a9a34b444975776aa275f729f33cd7bdf151a6a (patch)
treef31321c6c76e05f1b9d856e45cfdbceac132f29e
parent7b81ba77601c1cf4b5468257e8f6e83c377e2813 (diff)
parente471ed45e9ffa031b3a34f3fdb18d48f187cd52b (diff)
downloaddisplay-drivers-android-msm-barbet-4.19-s-qpr3-beta-2.tar.gz
Bug: 184813985 Change-Id: If99a2dc3123c7f322cd1877f8e9cee4064380307
-rw-r--r--msm/Makefile3
-rw-r--r--msm/dp/dp_ctrl.c10
-rw-r--r--msm/dp/dp_debug.c4
-rw-r--r--msm/dp/dp_display.c46
-rw-r--r--msm/dp/dp_panel.c71
-rw-r--r--msm/dp/dp_panel.h5
-rw-r--r--msm/dp/dp_usbpd.c5
-rw-r--r--msm/dsi/dsi_catalog.c8
-rw-r--r--msm/dsi/dsi_catalog.h7
-rw-r--r--msm/dsi/dsi_clk.h10
-rw-r--r--msm/dsi/dsi_ctrl.c195
-rw-r--r--msm/dsi/dsi_ctrl.h17
-rw-r--r--msm/dsi/dsi_ctrl_hw.h29
-rw-r--r--msm/dsi/dsi_ctrl_hw_2_2.c64
-rw-r--r--msm/dsi/dsi_ctrl_hw_cmn.c12
-rw-r--r--msm/dsi/dsi_defs.h5
-rw-r--r--msm/dsi/dsi_display.c247
-rw-r--r--msm/dsi/dsi_display.h16
-rw-r--r--msm/dsi/dsi_drm.c27
-rw-r--r--msm/dsi/dsi_panel.c95
-rw-r--r--msm/dsi/dsi_panel.h10
-rw-r--r--msm/dsi/dsi_phy_hw_v4_0.c10
-rw-r--r--msm/dsi/dsi_phy_timing_calc.c250
-rw-r--r--msm/dsi/dsi_phy_timing_calc.h22
-rw-r--r--msm/dsi/dsi_phy_timing_v2_0.c7
-rw-r--r--msm/dsi/dsi_phy_timing_v3_0.c6
-rw-r--r--msm/dsi/dsi_phy_timing_v4_0.c79
-rw-r--r--msm/msm_atomic.c35
-rw-r--r--msm/msm_drv.c29
-rw-r--r--msm/msm_drv.h8
-rw-r--r--msm/msm_fb.c31
-rw-r--r--msm/msm_kms.h4
-rw-r--r--msm/msm_notifier.c7
-rw-r--r--msm/sde/sde_connector.c27
-rw-r--r--msm/sde/sde_connector.h8
-rw-r--r--msm/sde/sde_core_perf.c23
-rw-r--r--msm/sde/sde_crtc.c167
-rw-r--r--msm/sde/sde_crtc.h28
-rw-r--r--msm/sde/sde_encoder.c169
-rw-r--r--msm/sde/sde_encoder.h38
-rw-r--r--msm/sde/sde_encoder_phys.h24
-rw-r--r--msm/sde/sde_encoder_phys_cmd.c2
-rw-r--r--msm/sde/sde_encoder_phys_vid.c20
-rw-r--r--msm/sde/sde_encoder_phys_wb.c112
-rw-r--r--msm/sde/sde_hw_catalog.c23
-rw-r--r--msm/sde/sde_hw_color_proc_v4.c8
-rw-r--r--msm/sde/sde_hw_reg_dma_v1_color_proc.c65
-rw-r--r--msm/sde/sde_hw_util.c3
-rw-r--r--msm/sde/sde_kms.c151
-rw-r--r--msm/sde/sde_plane.c55
-rw-r--r--msm/sde/sde_rm.c52
-rw-r--r--msm/sde_dbg.c94
-rw-r--r--msm/sde_dbg.h194
-rw-r--r--msm/sde_dbg_evtlog.c48
-rw-r--r--msm/sde_io_util.c3
-rw-r--r--msm/sde_rsc_hw_v3.c12
-rw-r--r--pll/dsi_pll_7nm.c187
-rw-r--r--rotator/sde_rotator_core.c25
58 files changed, 2278 insertions, 634 deletions
diff --git a/msm/Makefile b/msm/Makefile
index e6e4e9bd..3a0e02c5 100644
--- a/msm/Makefile
+++ b/msm/Makefile
@@ -78,8 +78,7 @@ msm_drm-$(CONFIG_DRM_MSM_SDE) += sde/sde_crtc.o \
sde/sde_hw_ds.o \
sde/sde_fence.o \
sde/sde_hw_qdss.o \
-
-msm_drm-$(CONFIG_DEBUG_FS) += sde_dbg.o \
+ sde_dbg.o \
sde_dbg_evtlog.o \
msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \
diff --git a/msm/dp/dp_ctrl.c b/msm/dp/dp_ctrl.c
index 2870cda6..88c41929 100644
--- a/msm/dp/dp_ctrl.c
+++ b/msm/dp/dp_ctrl.c
@@ -996,14 +996,15 @@ static void dp_ctrl_mst_calculate_rg(struct dp_ctrl_private *ctrl,
u64 raw_target_sc, target_sc_fixp;
u64 ts_denom, ts_enum, ts_int;
u64 pclk = panel->pinfo.pixel_clk_khz;
- u64 lclk = panel->link_info.rate;
- u64 lanes = panel->link_info.num_lanes;
+ u64 lclk = 0;
+ u64 lanes = ctrl->link->link_params.lane_count;
u64 bpp = panel->pinfo.bpp;
u64 pbn = panel->pbn;
u64 numerator, denominator, temp, temp1, temp2;
u32 x_int = 0, y_frac_enum = 0;
u64 target_strm_sym, ts_int_fixp, ts_frac_fixp, y_frac_enum_fixp;
+ lclk = drm_dp_bw_code_to_link_rate(ctrl->link->link_params.bw_code);
if (panel->pinfo.comp_info.comp_ratio)
bpp = panel->pinfo.comp_info.dsc_info.bpp;
@@ -1170,6 +1171,11 @@ static int dp_ctrl_stream_on(struct dp_ctrl *dp_ctrl, struct dp_panel *panel)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ if (!ctrl->power_on) {
+ DP_ERR("ctrl off\n");
+ return -EINVAL;
+ }
+
rc = dp_ctrl_enable_stream_clocks(ctrl, panel);
if (rc) {
DP_ERR("failure on stream clock enable\n");
diff --git a/msm/dp/dp_debug.c b/msm/dp/dp_debug.c
index 0459adce..6303c1cf 100644
--- a/msm/dp/dp_debug.c
+++ b/msm/dp/dp_debug.c
@@ -154,7 +154,7 @@ static ssize_t dp_debug_write_edid(struct file *file,
edid = debug->edid;
bail:
kfree(buf);
- debug->panel->set_edid(debug->panel, edid);
+ debug->panel->set_edid(debug->panel, edid, debug->edid_size);
/*
* print edid status as this code is executed
@@ -1628,7 +1628,7 @@ static void dp_debug_set_sim_mode(struct dp_debug_private *debug, bool sim)
debug->aux->set_sim_mode(debug->aux, false, NULL, NULL);
debug->dp_debug.sim_mode = false;
- debug->panel->set_edid(debug->panel, 0);
+ debug->panel->set_edid(debug->panel, 0, 0);
if (debug->edid) {
devm_kfree(debug->dev, debug->edid);
debug->edid = NULL;
diff --git a/msm/dp/dp_display.c b/msm/dp/dp_display.c
index c58c7f7c..2ff34baf 100644
--- a/msm/dp/dp_display.c
+++ b/msm/dp/dp_display.c
@@ -712,7 +712,7 @@ static void dp_display_send_hpd_event(struct dp_display_private *dp)
snprintf(pattern, HPD_STRING_SIZE, "pattern=%d",
dp->link->test_video.test_video_pattern);
- DP_DEBUG("[%s]:[%s] [%s] [%s]\n", name, status, bpp, pattern);
+ DP_INFO("[%s]:[%s] [%s] [%s]\n", name, status, bpp, pattern);
envp[0] = name;
envp[1] = status;
envp[2] = bpp;
@@ -1167,6 +1167,12 @@ static void dp_display_stream_disable(struct dp_display_private *dp,
return;
}
+ if (dp_panel->stream_id == DP_STREAM_MAX ||
+ !dp->active_panels[dp_panel->stream_id]) {
+ DP_ERR("panel is already disabled\n");
+ return;
+ }
+
DP_DEBUG("stream_id=%d, active_stream_cnt=%d\n",
dp_panel->stream_id, dp->active_stream_cnt);
@@ -1334,6 +1340,7 @@ static void dp_display_attention_work(struct work_struct *work)
{
struct dp_display_private *dp = container_of(work,
struct dp_display_private, attention_work);
+ int rc = 0;
SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
mutex_lock(&dp->session_lock);
@@ -1397,16 +1404,20 @@ static void dp_display_attention_work(struct work_struct *work)
if (dp->link->sink_request & DP_TEST_LINK_TRAINING) {
SDE_EVT32_EXTERNAL(dp->state, DP_TEST_LINK_TRAINING);
dp->link->send_test_response(dp->link);
- dp->ctrl->link_maintenance(dp->ctrl);
+ rc = dp->ctrl->link_maintenance(dp->ctrl);
}
if (dp->link->sink_request & DP_LINK_STATUS_UPDATED) {
SDE_EVT32_EXTERNAL(dp->state, DP_LINK_STATUS_UPDATED);
- dp->ctrl->link_maintenance(dp->ctrl);
+ rc = dp->ctrl->link_maintenance(dp->ctrl);
}
- dp_audio_enable(dp, true);
+ if (!rc)
+ dp_audio_enable(dp, true);
+
mutex_unlock(&dp->session_lock);
+ if (rc)
+ goto end;
if (dp->link->sink_request & (DP_TEST_LINK_PHY_TEST_PATTERN |
DP_TEST_LINK_TRAINING))
@@ -1430,6 +1441,8 @@ cp_irq:
mst_attention:
dp_display_mst_attention(dp);
+
+end:
SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
}
@@ -1893,7 +1906,7 @@ end:
mutex_unlock(&dp->session_lock);
SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
- return 0;
+ return rc;
}
static int dp_display_set_stream_info(struct dp_display *dp_display,
@@ -2309,7 +2322,6 @@ static enum drm_mode_status dp_display_validate_mode(
const struct msm_resource_caps_info *avail_res)
{
struct dp_display_private *dp;
- struct drm_dp_link *link_info;
u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
struct dp_panel *dp_panel;
struct dp_debug *debug;
@@ -2338,8 +2350,6 @@ static enum drm_mode_status dp_display_validate_mode(
goto end;
}
- link_info = &dp->panel->link_info;
-
debug = dp->debug;
if (!debug)
goto end;
@@ -2352,7 +2362,7 @@ static enum drm_mode_status dp_display_validate_mode(
mode_rate_khz = mode->clock * mode_bpp;
rate = drm_dp_bw_code_to_link_rate(dp->link->link_params.bw_code);
- supported_rate_khz = link_info->num_lanes * rate * 8;
+ supported_rate_khz = dp->link->link_params.lane_count * rate * 8;
tmds_max_clock = dp_panel->connector->display_info.max_tmds_clock;
if (mode_rate_khz > supported_rate_khz) {
@@ -2539,6 +2549,11 @@ static int dp_display_config_hdr(struct dp_display *dp_display, void *panel,
return -EINVAL;
}
+ if (!dp_display_state_is(DP_STATE_ENABLED)) {
+ dp_display_state_show("[not enabled]");
+ return 0;
+ }
+
/*
* In rare cases where HDR metadata is updated independently
* flush the HDR metadata immediately instead of relying on
@@ -2560,12 +2575,20 @@ static int dp_display_setup_colospace(struct dp_display *dp_display,
u32 colorspace)
{
struct dp_panel *dp_panel;
+ struct dp_display_private *dp;
if (!dp_display || !panel) {
pr_err("invalid input\n");
return -EINVAL;
}
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ if (!dp_display_state_is(DP_STATE_ENABLED)) {
+ dp_display_state_show("[not enabled]");
+ return 0;
+ }
+
dp_panel = panel;
return dp_panel->set_colorspace(dp_panel, colorspace);
@@ -2898,6 +2921,11 @@ static int dp_display_update_pps(struct dp_display *dp_display,
return -EINVAL;
}
+ if (!dp_display_state_is(DP_STATE_ENABLED)) {
+ dp_display_state_show("[not enabled]");
+ return 0;
+ }
+
dp_panel = sde_conn->drv_panel;
dp_panel->update_pps(dp_panel, pps_cmd);
return 0;
diff --git a/msm/dp/dp_panel.c b/msm/dp/dp_panel.c
index b31103a6..1c01b0ee 100644
--- a/msm/dp/dp_panel.c
+++ b/msm/dp/dp_panel.c
@@ -7,6 +7,7 @@
#include <linux/unistd.h>
#include <drm/drm_fixed.h>
#include "dp_debug.h"
+#include <drm/drm_edid.h>
#define DP_KHZ_TO_HZ 1000
#define DP_PANEL_DEFAULT_BPP 24
@@ -1355,8 +1356,11 @@ static void _dp_panel_dsc_bw_overhead_calc(struct dp_panel *dp_panel,
int tot_num_hor_bytes, tot_num_dummy_bytes;
int dwidth_dsc_bytes, eoc_bytes;
u32 num_lanes;
+ struct dp_panel_private *panel;
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- num_lanes = dp_panel->link_info.num_lanes;
+ num_lanes = panel->link->link_params.lane_count;
num_slices = dsc->slice_per_pkt;
eoc_bytes = dsc_byte_cnt % num_lanes;
@@ -1934,7 +1938,25 @@ static int dp_panel_set_default_link_params(struct dp_panel *dp_panel)
return 0;
}
-static int dp_panel_set_edid(struct dp_panel *dp_panel, u8 *edid)
+static bool dp_panel_validate_edid(struct edid *edid, size_t edid_size)
+{
+ if (!edid || (edid_size < EDID_LENGTH))
+ return false;
+
+ if (EDID_LENGTH * (edid->extensions + 1) > edid_size) {
+ DP_ERR("edid size does not match allocated.\n");
+ return false;
+ }
+
+ if (!drm_edid_is_valid(edid)) {
+ DP_ERR("invalid edid.\n");
+ return false;
+ }
+ return true;
+}
+
+static int dp_panel_set_edid(struct dp_panel *dp_panel, u8 *edid,
+ size_t edid_size)
{
struct dp_panel_private *panel;
@@ -1945,7 +1967,7 @@ static int dp_panel_set_edid(struct dp_panel *dp_panel, u8 *edid)
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- if (edid) {
+ if (edid && dp_panel_validate_edid((struct edid *)edid, edid_size)) {
dp_panel->edid_ctrl->edid = (struct edid *)edid;
panel->custom_edid = true;
} else {
@@ -2171,18 +2193,23 @@ end:
static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz)
{
- struct drm_dp_link *link_info;
+ struct dp_link_params *link_params;
+ struct dp_panel_private *panel;
const u32 max_supported_bpp = 30;
u32 min_supported_bpp = 18;
u32 bpp = 0, data_rate_khz = 0, tmds_max_clock = 0;
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
if (dp_panel->dsc_en)
min_supported_bpp = 24;
bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
- link_info = &dp_panel->link_info;
- data_rate_khz = link_info->num_lanes * link_info->rate * 8;
+ link_params = &panel->link->link_params;
+
+ data_rate_khz = link_params->lane_count *
+ drm_dp_bw_code_to_link_rate(link_params->bw_code) * 8;
tmds_max_clock = dp_panel->connector->display_info.max_tmds_clock;
for (; bpp > min_supported_bpp; bpp -= 6) {
@@ -2680,32 +2707,6 @@ static int dp_panel_deinit_panel_info(struct dp_panel *dp_panel, u32 flags)
return rc;
}
-static u32 dp_panel_get_min_req_link_rate(struct dp_panel *dp_panel)
-{
- const u32 encoding_factx10 = 8;
- u32 min_link_rate_khz = 0, lane_cnt;
- struct dp_panel_info *pinfo;
-
- if (!dp_panel) {
- DP_ERR("invalid input\n");
- goto end;
- }
-
- lane_cnt = dp_panel->link_info.num_lanes;
- pinfo = &dp_panel->pinfo;
-
- /* num_lanes * lane_count * 8 >= pclk * bpp * 10 */
- min_link_rate_khz = pinfo->pixel_clk_khz /
- (lane_cnt * encoding_factx10);
- min_link_rate_khz *= pinfo->bpp;
-
- DP_DEBUG("min lclk req=%d khz for pclk=%d khz, lanes=%d, bpp=%d\n",
- min_link_rate_khz, pinfo->pixel_clk_khz, lane_cnt,
- pinfo->bpp);
-end:
- return min_link_rate_khz;
-}
-
static bool dp_panel_hdr_supported(struct dp_panel *dp_panel)
{
struct dp_panel_private *panel;
@@ -2966,8 +2967,9 @@ cached:
dp_panel_setup_dhdr_vsif(panel);
input.mdp_clk = core_clk_rate;
- input.lclk = dp_panel->link_info.rate;
- input.nlanes = dp_panel->link_info.num_lanes;
+ input.lclk = drm_dp_bw_code_to_link_rate(
+ panel->link->link_params.bw_code);
+ input.nlanes = panel->link->link_params.lane_count;
input.pclk = dp_panel->pinfo.pixel_clk_khz;
input.h_active = dp_panel->pinfo.h_active;
input.mst_target_sc = dp_panel->mst_target_sc;
@@ -3371,7 +3373,6 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in)
dp_panel->deinit = dp_panel_deinit_panel_info;
dp_panel->hw_cfg = dp_panel_hw_cfg;
dp_panel->read_sink_caps = dp_panel_read_sink_caps;
- dp_panel->get_min_req_link_rate = dp_panel_get_min_req_link_rate;
dp_panel->get_mode_bpp = dp_panel_get_mode_bpp;
dp_panel->get_modes = dp_panel_get_modes;
dp_panel->handle_sink_request = dp_panel_handle_sink_request;
diff --git a/msm/dp/dp_panel.h b/msm/dp/dp_panel.h
index 212b6152..36629c3c 100644
--- a/msm/dp/dp_panel.h
+++ b/msm/dp/dp_panel.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_PANEL_H_
@@ -141,13 +141,12 @@ struct dp_panel {
int (*hw_cfg)(struct dp_panel *dp_panel, bool enable);
int (*read_sink_caps)(struct dp_panel *dp_panel,
struct drm_connector *connector, bool multi_func);
- u32 (*get_min_req_link_rate)(struct dp_panel *dp_panel);
u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp,
u32 mode_pclk_khz);
int (*get_modes)(struct dp_panel *dp_panel,
struct drm_connector *connector, struct dp_display_mode *mode);
void (*handle_sink_request)(struct dp_panel *dp_panel);
- int (*set_edid)(struct dp_panel *dp_panel, u8 *edid);
+ int (*set_edid)(struct dp_panel *dp_panel, u8 *edid, size_t edid_size);
int (*set_dpcd)(struct dp_panel *dp_panel, u8 *dpcd);
int (*setup_hdr)(struct dp_panel *dp_panel,
struct drm_msm_ext_hdr_metadata *hdr_meta,
diff --git a/msm/dp/dp_usbpd.c b/msm/dp/dp_usbpd.c
index 030fe618..f49df593 100644
--- a/msm/dp/dp_usbpd.c
+++ b/msm/dp/dp_usbpd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/usb/usbpd.h>
@@ -246,7 +246,7 @@ static void dp_usbpd_connect_cb(struct usbpd_svid_handler *hdlr,
return;
}
- DP_DEBUG("peer_usb_comm: %d\n");
+ DP_DEBUG("peer_usb_comm: %d\n", peer_usb_comm);
pd->dp_usbpd.base.peer_usb_comm = peer_usb_comm;
dp_usbpd_send_event(pd, DP_USBPD_EVT_DISCOVER);
}
@@ -403,6 +403,7 @@ static void dp_usbpd_response_cb(struct usbpd_svid_handler *hdlr, u8 cmd,
case DP_USBPD_VDM_CONFIGURE:
pd->alt_mode |= DP_USBPD_ALT_MODE_CONFIGURE;
pd->dp_usbpd.base.alt_mode_cfg_done = true;
+ pd->forced_disconnect = false;
dp_usbpd_get_status(pd);
pd->dp_usbpd.base.orientation =
diff --git a/msm/dsi/dsi_catalog.c b/msm/dsi/dsi_catalog.c
index f26c7296..19fb900c 100644
--- a/msm/dsi/dsi_catalog.c
+++ b/msm/dsi/dsi_catalog.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/errno.h>
@@ -81,6 +81,8 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
ctrl->ops.schedule_dma_cmd = NULL;
ctrl->ops.kickoff_command_non_embedded_mode = NULL;
ctrl->ops.config_clk_gating = NULL;
+ ctrl->ops.map_mdp_regs = NULL;
+ ctrl->ops.log_line_count = NULL;
break;
case DSI_CTRL_VERSION_2_0:
ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
@@ -96,6 +98,8 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
ctrl->ops.schedule_dma_cmd = NULL;
ctrl->ops.kickoff_command_non_embedded_mode = NULL;
ctrl->ops.config_clk_gating = NULL;
+ ctrl->ops.map_mdp_regs = NULL;
+ ctrl->ops.log_line_count = NULL;
break;
case DSI_CTRL_VERSION_2_2:
case DSI_CTRL_VERSION_2_3:
@@ -116,6 +120,8 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
ctrl->ops.schedule_dma_cmd = dsi_ctrl_hw_22_schedule_dma_cmd;
ctrl->ops.kickoff_command_non_embedded_mode =
dsi_ctrl_hw_kickoff_non_embedded_mode;
+ ctrl->ops.map_mdp_regs = dsi_ctrl_hw_22_map_mdp_regs;
+ ctrl->ops.log_line_count = dsi_ctrl_hw_22_log_line_count;
break;
default:
break;
diff --git a/msm/dsi/dsi_catalog.h b/msm/dsi/dsi_catalog.h
index ed047e9e..3118cb57 100644
--- a/msm/dsi/dsi_catalog.h
+++ b/msm/dsi/dsi_catalog.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_CATALOG_H_
@@ -270,4 +270,9 @@ void dsi_phy_hw_v4_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
int dsi_phy_hw_v4_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
u32 *dst, u32 size);
+
+int dsi_ctrl_hw_22_map_mdp_regs(struct platform_device *pdev,
+ struct dsi_ctrl_hw *ctrl);
+
+u32 dsi_ctrl_hw_22_log_line_count(struct dsi_ctrl_hw *ctrl, bool cmd_mode);
#endif /* _DSI_CATALOG_H_ */
diff --git a/msm/dsi/dsi_clk.h b/msm/dsi/dsi_clk.h
index 1a3928a3..ccc1ba7d 100644
--- a/msm/dsi/dsi_clk.h
+++ b/msm/dsi/dsi_clk.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_CLK_H_
@@ -105,10 +105,10 @@ struct dsi_link_lp_clk_info {
/**
* struct link_clk_freq - Clock frequency information for Link clocks
- * @byte_clk_rate: Frequency of DSI byte_clk in KHz.
- * @byte_intf_clk_rate: Frequency of DSI byte_intf_clk in KHz.
- * @pixel_clk_rate: Frequency of DSI pixel_clk in KHz.
- * @esc_clk_rate: Frequency of DSI escape clock in KHz.
+ * @byte_clk_rate: Frequency of DSI byte_clk in Hz.
+ * @byte_intf_clk_rate: Frequency of DSI byte_intf_clk in Hz.
+ * @pixel_clk_rate: Frequency of DSI pixel_clk in Hz.
+ * @esc_clk_rate: Frequency of DSI escape clock in Hz.
*/
struct link_clk_freq {
u32 byte_clk_rate;
diff --git a/msm/dsi/dsi_ctrl.c b/msm/dsi/dsi_ctrl.c
index 8493fdf2..54d5dbfd 100644
--- a/msm/dsi/dsi_ctrl.c
+++ b/msm/dsi/dsi_ctrl.c
@@ -263,6 +263,13 @@ static int dsi_ctrl_debugfs_deinit(struct dsi_ctrl *dsi_ctrl)
static int dsi_ctrl_debugfs_init(struct dsi_ctrl *dsi_ctrl,
struct dentry *parent)
{
+ char dbg_name[DSI_DEBUG_NAME_LEN];
+
+ snprintf(dbg_name, DSI_DEBUG_NAME_LEN, "dsi%d_ctrl",
+ dsi_ctrl->cell_index);
+ sde_dbg_reg_register_base(dbg_name,
+ dsi_ctrl->hw.base,
+ msm_iomap_size(dsi_ctrl->pdev, "dsi_ctrl"));
return 0;
}
static int dsi_ctrl_debugfs_deinit(struct dsi_ctrl *dsi_ctrl)
@@ -295,6 +302,7 @@ static void dsi_ctrl_flush_cmd_dma_queue(struct dsi_ctrl *dsi_ctrl)
cancel_work_sync(&dsi_ctrl->dma_cmd_wait);
} else {
flush_workqueue(dsi_ctrl->dma_cmd_workq);
+ SDE_EVT32(SDE_EVTLOG_FUNC_CASE2);
}
}
@@ -347,7 +355,7 @@ static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
int rc = 0;
struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
- SDE_EVT32(dsi_ctrl->cell_index, op);
+ SDE_EVT32(dsi_ctrl->cell_index, op, op_state);
switch (op) {
case DSI_CTRL_OP_POWER_STATE_CHANGE:
@@ -971,6 +979,7 @@ static int dsi_ctrl_update_link_freqs(struct dsi_ctrl *dsi_ctrl,
DSI_CTRL_DEBUG(dsi_ctrl, "byte_clk_rate = %llu, byte_intf_clk = %llu\n",
byte_clk_rate, byte_intf_clk_rate);
DSI_CTRL_DEBUG(dsi_ctrl, "pclk_rate = %llu\n", pclk_rate);
+ SDE_EVT32(dsi_ctrl->cell_index, bit_rate, byte_clk_rate, pclk_rate);
dsi_ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
dsi_ctrl->clk_freq.byte_intf_clk_rate = byte_intf_clk_rate;
@@ -1144,6 +1153,7 @@ void dsi_message_setup_tx_mode(struct dsi_ctrl *dsi_ctrl,
* override cmd fetch mode during secure session
*/
if (dsi_ctrl->secure_mode) {
+ SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_CASE1);
*flags &= ~DSI_CTRL_CMD_FETCH_MEMORY;
*flags |= DSI_CTRL_CMD_FIFO_STORE;
DSI_CTRL_DEBUG(dsi_ctrl,
@@ -1204,28 +1214,40 @@ int dsi_message_validate_tx_mode(struct dsi_ctrl *dsi_ctrl,
return rc;
}
-
-static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
- const struct mipi_dsi_msg *msg,
- struct dsi_ctrl_cmd_dma_fifo_info *cmd,
- struct dsi_ctrl_cmd_dma_info *cmd_mem,
- u32 flags)
+static u32 calculate_schedule_line(struct dsi_ctrl *dsi_ctrl, u32 flags)
{
- u32 hw_flags = 0;
u32 line_no = 0x1;
struct dsi_mode_info *timing;
- struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;
- SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags);
/* check if custom dma scheduling line needed */
if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
(flags & DSI_CTRL_CMD_CUSTOM_DMA_SCHED))
line_no = dsi_ctrl->host_config.u.video_engine.dma_sched_line;
timing = &(dsi_ctrl->host_config.video_timing);
+
if (timing)
line_no += timing->v_back_porch + timing->v_sync_width +
timing->v_active;
+
+ return line_no;
+}
+
+static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_msg *msg,
+ struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+ struct dsi_ctrl_cmd_dma_info *cmd_mem,
+ u32 flags)
+{
+ u32 hw_flags = 0;
+ u32 line_no = 0x1;
+ struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;
+
+ SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags,
+ msg->flags);
+
+ line_no = calculate_schedule_line(dsi_ctrl, flags);
+
if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
dsi_hw_ops.schedule_dma_cmd &&
(dsi_ctrl->current_state.vid_engine_state ==
@@ -1233,6 +1255,8 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
dsi_hw_ops.schedule_dma_cmd(&dsi_ctrl->hw,
line_no);
+ dsi_ctrl->cmd_mode = (dsi_ctrl->host_config.panel_mode ==
+ DSI_OP_CMD_MODE);
hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ?
DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0;
@@ -1261,9 +1285,7 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
dsi_ctrl_wait_for_video_done(dsi_ctrl);
- if (dsi_hw_ops.mask_error_intr)
- dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
- BIT(DSI_FIFO_OVERFLOW), true);
+ dsi_ctrl_mask_overflow(dsi_ctrl, true);
atomic_set(&dsi_ctrl->dma_irq_trig, 0);
dsi_ctrl_enable_status_interrupt(dsi_ctrl,
@@ -1296,9 +1318,8 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait);
}
- if (dsi_hw_ops.mask_error_intr && !dsi_ctrl->esd_check_underway)
- dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
- BIT(DSI_FIFO_OVERFLOW), false);
+ dsi_ctrl_mask_overflow(dsi_ctrl, false);
+
dsi_hw_ops.reset_cmd_fifo(&dsi_ctrl->hw);
/*
@@ -2031,6 +2052,9 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev)
DSI_CTRL_DEBUG(dsi_ctrl, "failed to init axi bus client, rc = %d\n",
rc);
+ if (dsi_ctrl->hw.ops.map_mdp_regs)
+ dsi_ctrl->hw.ops.map_mdp_regs(pdev, &dsi_ctrl->hw);
+
item->ctrl = dsi_ctrl;
mutex_lock(&dsi_ctrl_list_lock);
@@ -2111,7 +2135,6 @@ static struct platform_driver dsi_ctrl_driver = {
},
};
-#if defined(CONFIG_DEBUG_FS)
void dsi_ctrl_debug_dump(u32 *entries, u32 size)
{
@@ -2133,7 +2156,6 @@ void dsi_ctrl_debug_dump(u32 *entries, u32 size)
mutex_unlock(&dsi_ctrl_list_lock);
}
-#endif
/**
* dsi_ctrl_get() - get a dsi_ctrl handle from an of_node
* @of_node: of_node of the DSI controller.
@@ -2555,6 +2577,7 @@ static bool dsi_ctrl_check_for_spurious_error_interrupts(
if ((jiffies_now - dsi_ctrl->jiffies_start) < intr_check_interval) {
if (dsi_ctrl->error_interrupt_count > interrupt_threshold) {
DSI_CTRL_WARN(dsi_ctrl, "Detected spurious interrupts on dsi ctrl\n");
+ SDE_EVT32_IRQ(dsi_ctrl->error_interrupt_count);
return true;
}
} else {
@@ -2814,7 +2837,7 @@ void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
return;
- SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
+ SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, intr_idx);
spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) {
@@ -2847,7 +2870,7 @@ void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
if (!dsi_ctrl || intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
return;
- SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
+ SDE_EVT32_IRQ(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, intr_idx);
spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx])
@@ -3281,6 +3304,78 @@ error:
}
/**
+ * dsi_ctrl_mask_overflow() - API to mask/unmask overflow error.
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: variable to control masking/unmasking.
+ */
+void dsi_ctrl_mask_overflow(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+ struct dsi_ctrl_hw_ops dsi_hw_ops;
+
+ dsi_hw_ops = dsi_ctrl->hw.ops;
+
+ if (enable) {
+ if (dsi_hw_ops.mask_error_intr)
+ dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
+ BIT(DSI_FIFO_OVERFLOW), true);
+ } else {
+ if (dsi_hw_ops.mask_error_intr && !dsi_ctrl->esd_check_underway)
+ dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
+ BIT(DSI_FIFO_OVERFLOW), false);
+ }
+}
+
+/**
+ * dsi_ctrl_clear_slave_dma_status - API to clear slave DMA status
+ * @dsi_ctrl: DSI controller handle.
+ * @flags: Modifiers
+ */
+int dsi_ctrl_clear_slave_dma_status(struct dsi_ctrl *dsi_ctrl, u32 flags)
+{
+ struct dsi_ctrl_hw_ops dsi_hw_ops;
+ u32 status;
+ u32 mask = DSI_CMD_MODE_DMA_DONE;
+ int rc = 0, wait_for_done = 5;
+
+ if (!dsi_ctrl) {
+ DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
+ return -EINVAL;
+ }
+
+ /* Return if this is not the last command */
+ if (!(flags & DSI_CTRL_CMD_LAST_COMMAND))
+ return rc;
+
+ SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ dsi_hw_ops = dsi_ctrl->hw.ops;
+
+ while (wait_for_done > 0) {
+ status = dsi_hw_ops.get_interrupt_status(&dsi_ctrl->hw);
+ if (status & mask) {
+ status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE);
+ dsi_hw_ops.clear_interrupt_status(&dsi_ctrl->hw,
+ status);
+ SDE_EVT32(dsi_ctrl->cell_index, status);
+ wait_for_done = 1;
+ break;
+ }
+ udelay(10);
+ wait_for_done--;
+ }
+
+ if (wait_for_done == 0)
+ DSI_CTRL_ERR(dsi_ctrl,
+ "DSI1 CMD_MODE_DMA_DONE failed\n");
+
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+ return rc;
+}
+
+/**
* dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command.
* @dsi_ctrl: DSI controller handle.
* @flags: Modifiers.
@@ -3291,6 +3386,10 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
{
int rc = 0;
struct dsi_ctrl_hw_ops dsi_hw_ops;
+ u32 v_total = 0, fps = 0, cur_line = 0, mem_latency_us = 100;
+ u32 line_time = 0, schedule_line = 0x1, latency_by_line = 0;
+ struct dsi_mode_info *timing;
+ unsigned long flag;
if (!dsi_ctrl) {
DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
@@ -3306,22 +3405,60 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
mutex_lock(&dsi_ctrl->ctrl_lock);
+ timing = &(dsi_ctrl->host_config.video_timing);
+
+ if (timing &&
+ (dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE)) {
+ v_total = timing->v_sync_width + timing->v_back_porch +
+ timing->v_front_porch + timing->v_active;
+ fps = timing->refresh_rate;
+ schedule_line = calculate_schedule_line(dsi_ctrl, flags);
+ line_time = (1000000 / fps) / v_total;
+ latency_by_line = CEIL(mem_latency_us, line_time);
+ }
+
if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER))
dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
if ((flags & DSI_CTRL_CMD_BROADCAST) &&
(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
dsi_ctrl_wait_for_video_done(dsi_ctrl);
- if (dsi_hw_ops.mask_error_intr)
- dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
- BIT(DSI_FIFO_OVERFLOW), true);
atomic_set(&dsi_ctrl->dma_irq_trig, 0);
dsi_ctrl_enable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE, NULL);
reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
/* trigger command */
- dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
+ if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
+ dsi_hw_ops.schedule_dma_cmd &&
+ (dsi_ctrl->current_state.vid_engine_state ==
+ DSI_CTRL_ENGINE_ON)) {
+ /*
+ * This change reads the video line count from
+ * MDP_INTF_LINE_COUNT register and checks whether
+ * DMA trigger happens close to the schedule line.
+ * If it is not close to the schedule line, then DMA
+ * command transfer is triggered.
+ */
+ while (1) {
+ local_irq_save(flag);
+ cur_line =
+ dsi_hw_ops.log_line_count(&dsi_ctrl->hw,
+ dsi_ctrl->cmd_mode);
+ if (cur_line <
+ (schedule_line - latency_by_line) ||
+ cur_line > (schedule_line + 1)) {
+ dsi_hw_ops.trigger_command_dma(
+ &dsi_ctrl->hw);
+ local_irq_restore(flag);
+ break;
+ }
+ local_irq_restore(flag);
+ udelay(1000);
+ }
+ } else
+ dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
+
if (flags & DSI_CTRL_CMD_ASYNC_WAIT) {
dsi_ctrl->dma_wait_queued = true;
queue_work(dsi_ctrl->dma_cmd_workq,
@@ -3331,11 +3468,6 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait);
}
- if (dsi_hw_ops.mask_error_intr &&
- !dsi_ctrl->esd_check_underway)
- dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
- BIT(DSI_FIFO_OVERFLOW), false);
-
if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
if (dsi_ctrl->version < DSI_CTRL_VERSION_2_4)
dsi_hw_ops.soft_reset(&dsi_ctrl->hw);
@@ -3562,6 +3694,7 @@ int dsi_ctrl_set_host_engine_state(struct dsi_ctrl *dsi_ctrl,
else
dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, false);
+ SDE_EVT32(dsi_ctrl->cell_index, state);
DSI_CTRL_DEBUG(dsi_ctrl, "Set host engine state = %d\n", state);
dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
error:
@@ -3601,6 +3734,7 @@ int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
else
dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, false);
+ SDE_EVT32(dsi_ctrl->cell_index, state);
DSI_CTRL_DEBUG(dsi_ctrl, "Set cmd engine state = %d\n", state);
dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
error:
@@ -3641,9 +3775,10 @@ int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, on);
/* perform a reset when turning off video engine */
- if (!on)
+ if (!on && dsi_ctrl->version < DSI_CTRL_VERSION_1_3)
dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
+ SDE_EVT32(dsi_ctrl->cell_index, state);
DSI_CTRL_DEBUG(dsi_ctrl, "Set video engine state = %d\n", state);
dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_VID_ENGINE, state);
error:
diff --git a/msm/dsi/dsi_ctrl.h b/msm/dsi/dsi_ctrl.h
index b509cae0..3f73ae11 100644
--- a/msm/dsi/dsi_ctrl.h
+++ b/msm/dsi/dsi_ctrl.h
@@ -239,6 +239,8 @@ struct dsi_ctrl_interrupts {
* insert null packet.
* @modeupdated: Boolean to send new roi if mode is updated.
* @split_link_supported: Boolean to check if hw supports split link.
+ * @cmd_mode: Boolean to indicate if panel is running in
+ command mode.
*/
struct dsi_ctrl {
struct platform_device *pdev;
@@ -299,6 +301,7 @@ struct dsi_ctrl {
bool null_insertion_enabled;
bool modeupdated;
bool split_link_supported;
+ bool cmd_mode;
};
/**
@@ -861,4 +864,18 @@ void dsi_ctrl_set_continuous_clk(struct dsi_ctrl *dsi_ctrl, bool enable);
* @dsi_ctrl: DSI controller handle.
*/
int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl);
+
+/**
+ * dsi_ctrl_mask_overflow() - API to mask/unmask overflow errors.
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: variable to control masking/unmasking.
+ */
+void dsi_ctrl_mask_overflow(struct dsi_ctrl *dsi_ctrl, bool enable);
+
+/**
+ * dsi_ctrl_clear_slave_dma_status - API to clear slave DMA status
+ * @dsi_ctrl: DSI controller handle.
+ * @flags: Modifiers
+ */
+int dsi_ctrl_clear_slave_dma_status(struct dsi_ctrl *dsi_ctrl, u32 flags);
#endif /* _DSI_CTRL_H_ */
diff --git a/msm/dsi/dsi_ctrl_hw.h b/msm/dsi/dsi_ctrl_hw.h
index 2f551922..a6c7f2fd 100644
--- a/msm/dsi/dsi_ctrl_hw.h
+++ b/msm/dsi/dsi_ctrl_hw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_CTRL_HW_H_
@@ -31,6 +31,7 @@
/**
* enum dsi_ctrl_version - version of the dsi host controller
* @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
+ * @DSI_CTRL_VERSION_1_3: DSI host v1.3 controller
* @DSI_CTRL_VERSION_1_4: DSI host v1.4 controller
* @DSI_CTRL_VERSION_2_0: DSI host v2.0 controller
* @DSI_CTRL_VERSION_2_2: DSI host v2.2 controller
@@ -40,6 +41,7 @@
*/
enum dsi_ctrl_version {
DSI_CTRL_VERSION_UNKNOWN,
+ DSI_CTRL_VERSION_1_3,
DSI_CTRL_VERSION_1_4,
DSI_CTRL_VERSION_2_0,
DSI_CTRL_VERSION_2_2,
@@ -836,6 +838,22 @@ struct dsi_ctrl_hw_ops {
* @sel_phy: Bool to control whether to select phy or controller
*/
void (*hs_req_sel)(struct dsi_ctrl_hw *ctrl, bool sel_phy);
+
+ /**
+ * hw.ops.map_mdp_regs() - maps MDP interface line count registers.
+ * @pdev:» Pointer to platform device.
+ * @ctrl:» Pointer to the controller host hardware.
+ */
+ int (*map_mdp_regs)(struct platform_device *pdev,
+ struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * hw.ops.log_line_count() - reads the MDP interface line count
+ * registers.
+ * @ctrl:» Pointer to the controller host hardware.
+ * @cmd_mode:» Boolean to indicate command mode operation.
+ */
+ u32 (*log_line_count)(struct dsi_ctrl_hw *ctrl, bool cmd_mode);
};
/*
@@ -846,6 +864,13 @@ struct dsi_ctrl_hw_ops {
* @mmss_misc_length: Length of mmss_misc register map.
* @disp_cc_base: Base address of disp_cc register map.
* @disp_cc_length: Length of disp_cc register map.
+ * @te_rd_ptr_reg: Address of MDP_TEAR_INTF_TEAR_LINE_COUNT. This
+ * register is used for testing and validating the RD
+ * ptr value when a CMD is triggered and it succeeds.
+ * @line_count_reg: Address of MDP_TEAR_INTF_LINE_COUNT. This
+ * register is used for testing and validating the
+ * line count value when a CMD is triggered and it
+ * succeeds.
* @index: Instance ID of the controller.
* @feature_map: Features supported by the DSI controller.
* @ops: Function pointers to the operations supported by the
@@ -863,6 +888,8 @@ struct dsi_ctrl_hw {
void __iomem *mmss_misc_base;
u32 mmss_misc_length;
void __iomem *disp_cc_base;
+ void __iomem *te_rd_ptr_reg;
+ void __iomem *line_count_reg;
u32 disp_cc_length;
u32 index;
diff --git a/msm/dsi/dsi_ctrl_hw_2_2.c b/msm/dsi/dsi_ctrl_hw_2_2.c
index 0c6d3404..56821e85 100644
--- a/msm/dsi/dsi_ctrl_hw_2_2.c
+++ b/msm/dsi/dsi_ctrl_hw_2_2.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#include "dsi_ctrl_hw.h"
@@ -13,6 +13,10 @@
/* register to configure DMA scheduling */
#define DSI_DMA_SCHEDULE_CTRL 0x100
+/* MDP INTF registers to be mapped*/
+#define MDP_INTF1_TEAR_LINE_COUNT 0xAE6BAB0
+#define MDP_INTF1_LINE_COUNT 0xAE6B8B0
+
/**
* dsi_ctrl_hw_22_phy_reset_config() - to configure clamp control during ulps
* @ctrl: Pointer to the controller host hardware.
@@ -126,3 +130,61 @@ void dsi_ctrl_hw_22_config_clk_gating(struct dsi_ctrl_hw *ctrl, bool enable,
DSI_DISP_CC_W32(ctrl, DISP_CC_MISC_CMD_REG_OFF, reg);
}
+
+/**
+ * dsi_ctrl_hw_22_map_mdp_regs() - maps MDP interface line count registers.
+ * @pdev: Pointer to platform device.
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Return: 0 on success and error on failure.
+ */
+int dsi_ctrl_hw_22_map_mdp_regs(struct platform_device *pdev,
+ struct dsi_ctrl_hw *ctrl)
+{
+ int rc = 0;
+ void __iomem *ptr = NULL, *ptr1 = NULL;
+
+ if (ctrl->index == 0) {
+ ptr = devm_ioremap(&pdev->dev, MDP_INTF1_TEAR_LINE_COUNT, 1);
+ if (IS_ERR_OR_NULL(ptr)) {
+ DSI_CTRL_HW_ERR(ctrl,
+ "MDP TE LINE COUNT address not found\n");
+ rc = PTR_ERR(ptr);
+ return rc;
+ }
+
+ ptr1 = devm_ioremap(&pdev->dev, MDP_INTF1_LINE_COUNT, 1);
+ if (IS_ERR_OR_NULL(ptr1)) {
+ DSI_CTRL_HW_ERR(ctrl,
+ "MDP TE LINE COUNT address not found\n");
+ rc = PTR_ERR(ptr1);
+ return rc;
+ }
+ }
+
+ ctrl->te_rd_ptr_reg = ptr;
+ ctrl->line_count_reg = ptr1;
+
+ return rc;
+}
+
+/**
+ * dsi_ctrl_hw_22_log_line_count() - reads the MDP interface line count
+ * registers.
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd_mode: Boolean to indicate command mode operation.
+ *
+ * Return: INTF register value.
+ */
+u32 dsi_ctrl_hw_22_log_line_count(struct dsi_ctrl_hw *ctrl, bool cmd_mode)
+{
+
+ u32 reg = 0;
+
+ if (cmd_mode && ctrl->te_rd_ptr_reg)
+ reg = readl_relaxed(ctrl->te_rd_ptr_reg);
+ else if (ctrl->line_count_reg)
+ reg = readl_relaxed(ctrl->line_count_reg);
+
+ return reg;
+}
diff --git a/msm/dsi/dsi_ctrl_hw_cmn.c b/msm/dsi/dsi_ctrl_hw_cmn.c
index 4177d286..e3e715c2 100644
--- a/msm/dsi/dsi_ctrl_hw_cmn.c
+++ b/msm/dsi/dsi_ctrl_hw_cmn.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
@@ -92,6 +92,14 @@ void dsi_ctrl_hw_cmn_host_setup(struct dsi_ctrl_hw *ctrl,
dsi_setup_trigger_controls(ctrl, cfg);
dsi_split_link_setup(ctrl, cfg);
+ /* Setup T_CLK_PRE extend register */
+ reg_value = DSI_R32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_ENABLE);
+ if (cfg->t_clk_pre_extend)
+ reg_value |= BIT(0);
+ else
+ reg_value &= ~BIT(0);
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_ENABLE, reg_value);
+
/* Setup clocking timing controls */
reg_value = ((cfg->t_clk_post & 0x3F) << 8);
reg_value |= (cfg->t_clk_pre & 0x3F);
@@ -181,6 +189,7 @@ void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl)
DSI_W32(ctrl, DSI_CTRL, reg_ctrl);
wmb(); /* make sure DSI controller is enabled again */
DSI_CTRL_HW_DBG(ctrl, "ctrl soft reset done\n");
+ SDE_EVT32(ctrl->index);
}
/**
@@ -769,7 +778,6 @@ void dsi_ctrl_hw_cmn_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl)
void dsi_ctrl_hw_cmn_trigger_command_dma(struct dsi_ctrl_hw *ctrl)
{
DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
- DSI_CTRL_HW_DBG(ctrl, "CMD DMA triggered\n");
}
/**
diff --git a/msm/dsi/dsi_defs.h b/msm/dsi/dsi_defs.h
index b1c3857c..30fb2206 100644
--- a/msm/dsi/dsi_defs.h
+++ b/msm/dsi/dsi_defs.h
@@ -480,6 +480,8 @@ struct dsi_split_link_config {
* @t_clk_pre: Number of byte clock cycles that the high spped clock
* shall be driven prior to data lane transitions from LP
* to HS mode.
+ * @t_clk_pre_extend: Increment t_clk_pre counter by 2 byteclk if set to
+ * true, otherwise increment by 1 byteclk.
* @ignore_rx_eot: Ignore Rx EOT packets if set to true.
* @append_tx_eot: Append EOT packets for forward transmissions if set to
* true.
@@ -506,6 +508,7 @@ struct dsi_host_common_cfg {
bool bit_swap_blue;
u32 t_clk_post;
u32 t_clk_pre;
+ bool t_clk_pre_extend;
bool ignore_rx_eot;
bool append_tx_eot;
bool ext_bridge_mode;
@@ -568,7 +571,7 @@ struct dsi_cmd_engine_cfg {
* @common_config: Host configuration common to both Video and Cmd mode.
* @video_engine: Video engine configuration if panel is in video mode.
* @cmd_engine: Cmd engine configuration if panel is in cmd mode.
- * @esc_clk_rate_khz: Esc clock frequency in Hz.
+ * @esc_clk_rate_hz: Esc clock frequency in Hz.
* @bit_clk_rate_hz: Bit clock frequency in Hz.
* @bit_clk_rate_hz_override: DSI bit clk rate override from dt/sysfs.
* @video_timing: Video timing information of a frame.
diff --git a/msm/dsi/dsi_display.c b/msm/dsi/dsi_display.c
index 54f07ed1..2cf726b9 100644
--- a/msm/dsi/dsi_display.c
+++ b/msm/dsi/dsi_display.c
@@ -795,7 +795,7 @@ int dsi_display_check_status(struct drm_connector *connector, void *display,
rc = -EINVAL;
goto release_panel_lock;
}
- SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
+ SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, status_mode, te_check_override);
if (te_check_override && gpio_is_valid(dsi_display->disp_te_gpio))
status_mode = ESD_MODE_PANEL_TE;
@@ -843,7 +843,7 @@ exit:
release_panel_lock:
dsi_panel_release_panel_lock(panel);
- SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
+ SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, rc);
return rc;
}
@@ -868,6 +868,9 @@ static int dsi_display_cmd_prepare(const char *cmd_buf, u32 cmd_buf_len,
return -EINVAL;
}
+ if (cmd->last_command)
+ cmd->msg.flags |= MIPI_DSI_MSG_LASTCOMMAND;
+
for (i = 0; i < cmd->msg.tx_len; i++)
payload[i] = cmd_buf[7 + i];
@@ -1049,6 +1052,7 @@ int dsi_display_set_power(struct drm_connector *connector,
return rc;
}
+ SDE_EVT32(display->panel->power_mode, power_mode, rc);
DSI_DEBUG("Power mode transition from %d to %d %s",
display->panel->power_mode, power_mode,
rc ? "failed" : "successful");
@@ -2681,6 +2685,23 @@ static int dsi_display_wake_up(struct dsi_display *display)
return 0;
}
+static void dsi_display_mask_overflow(struct dsi_display *display, u32 flags,
+ bool enable)
+{
+ struct dsi_display_ctrl *ctrl;
+ int i;
+
+ if (!(flags & DSI_CTRL_CMD_LAST_COMMAND))
+ return;
+
+ display_for_each_ctrl(i, display) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl)
+ continue;
+ dsi_ctrl_mask_overflow(ctrl->ctrl, enable);
+ }
+}
+
static int dsi_display_broadcast_cmd(struct dsi_display *display,
const struct mipi_dsi_msg *msg)
{
@@ -2710,6 +2731,7 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display,
* 2. Trigger commands
*/
m_ctrl = &display->ctrl[display->cmd_master_idx];
+ dsi_display_mask_overflow(display, m_flags, true);
rc = dsi_ctrl_cmd_transfer(m_ctrl->ctrl, msg, &m_flags);
if (rc) {
DSI_ERR("[%s] cmd transfer failed on master,rc=%d\n",
@@ -2744,7 +2766,21 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display,
goto error;
}
+ display_for_each_ctrl(i, display) {
+ ctrl = &display->ctrl[i];
+ if (ctrl == m_ctrl)
+ continue;
+
+ rc = dsi_ctrl_clear_slave_dma_status(ctrl->ctrl, flags);
+ if (rc) {
+ DSI_ERR("[%s] clear interrupt status failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
error:
+ dsi_display_mask_overflow(display, m_flags, false);
return rc;
}
@@ -3006,11 +3042,15 @@ static int dsi_display_clocks_init(struct dsi_display *display)
const char *mux_byte = "mux_byte", *mux_pixel = "mux_pixel";
const char *cphy_byte = "cphy_byte", *cphy_pixel = "cphy_pixel";
const char *shadow_byte = "shadow_byte", *shadow_pixel = "shadow_pixel";
+ const char *shadow_cphybyte = "shadow_cphybyte",
+ *shadow_cphypixel = "shadow_cphypixel";
struct clk *dsi_clk;
struct dsi_clk_link_set *src = &display->clock_info.src_clks;
struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
struct dsi_clk_link_set *cphy = &display->clock_info.cphy_clks;
struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+ struct dsi_clk_link_set *shadow_cphy =
+ &display->clock_info.shadow_cphy_clks;
struct dsi_dyn_clk_caps *dyn_clk_caps = &(display->panel->dyn_clk_caps);
char *dsi_clock_name;
@@ -3069,6 +3109,12 @@ static int dsi_display_clocks_init(struct dsi_display *display)
if (dsi_display_check_prefix(shadow_pixel,
clk_name))
shadow->pixel_clk = NULL;
+ if (dsi_display_check_prefix(shadow_cphybyte,
+ clk_name))
+ shadow_cphy->byte_clk = NULL;
+ if (dsi_display_check_prefix(shadow_cphypixel,
+ clk_name))
+ shadow_cphy->pixel_clk = NULL;
dyn_clk_caps->dyn_clk_support = false;
}
@@ -3113,6 +3159,16 @@ static int dsi_display_clocks_init(struct dsi_display *display)
shadow->pixel_clk = dsi_clk;
continue;
}
+
+ if (dsi_display_check_prefix(shadow_cphybyte, clk_name)) {
+ shadow_cphy->byte_clk = dsi_clk;
+ continue;
+ }
+
+ if (dsi_display_check_prefix(shadow_cphypixel, clk_name)) {
+ shadow_cphy->pixel_clk = dsi_clk;
+ continue;
+ }
}
return 0;
@@ -3967,12 +4023,12 @@ static int dsi_display_update_dsi_bitrate(struct dsi_display *display,
byte_intf_clk_div = host_cfg->byte_intf_clk_div;
do_div(byte_intf_clk_rate, byte_intf_clk_div);
} else {
- do_div(bit_rate, bits_per_symbol);
- bit_rate *= num_of_symbols;
- bit_rate_per_lane = bit_rate;
- do_div(bit_rate_per_lane, num_of_lanes);
- byte_clk_rate = bit_rate_per_lane;
- do_div(byte_clk_rate, 7);
+ bit_rate_per_lane = bit_clk_rate;
+ pclk_rate *= bits_per_symbol;
+ do_div(pclk_rate, num_of_symbols);
+ byte_clk_rate = bit_clk_rate;
+ do_div(byte_clk_rate, num_of_symbols);
+
/* For CPHY, byte_intf_clk is same as byte_clk */
byte_intf_clk_rate = byte_clk_rate;
}
@@ -3982,6 +4038,7 @@ static int dsi_display_update_dsi_bitrate(struct dsi_display *display,
DSI_DEBUG("byte_clk_rate = %llu, byte_intf_clk_rate = %llu\n",
byte_clk_rate, byte_intf_clk_rate);
DSI_DEBUG("pclk_rate = %llu\n", pclk_rate);
+ SDE_EVT32(i, bit_rate, byte_clk_rate, pclk_rate);
ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
ctrl->clk_freq.byte_intf_clk_rate = byte_intf_clk_rate;
@@ -4015,18 +4072,19 @@ static void _dsi_display_calc_pipe_delay(struct dsi_display *display,
struct dsi_display_ctrl *m_ctrl;
struct dsi_ctrl *dsi_ctrl;
struct dsi_phy_cfg *cfg;
+ int phy_ver;
m_ctrl = &display->ctrl[display->clk_master_idx];
dsi_ctrl = m_ctrl->ctrl;
cfg = &(m_ctrl->phy->cfg);
- esc_clk_rate_hz = dsi_ctrl->clk_freq.esc_clk_rate * 1000;
- pclk_to_esc_ratio = ((dsi_ctrl->clk_freq.pix_clk_rate * 1000) /
+ esc_clk_rate_hz = dsi_ctrl->clk_freq.esc_clk_rate;
+ pclk_to_esc_ratio = (dsi_ctrl->clk_freq.pix_clk_rate /
esc_clk_rate_hz);
- byte_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 1000) /
+ byte_to_esc_ratio = (dsi_ctrl->clk_freq.byte_clk_rate /
esc_clk_rate_hz);
- hr_bit_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 4 * 1000) /
+ hr_bit_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 4) /
esc_clk_rate_hz);
hsync_period = DSI_H_TOTAL_DSC(&mode->timing);
@@ -4052,8 +4110,28 @@ static void _dsi_display_calc_pipe_delay(struct dsi_display *display,
((cfg->timing.lane_v3[4] >> 1) + 1)) /
hr_bit_to_esc_ratio);
- /* 130 us pll delay recommended by h/w doc */
- delay->pll_delay = ((130 * esc_clk_rate_hz) / 1000000) * 2;
+ /*
+ * 100us pll delay recommended for phy ver 2.0 and 3.0
+ * 25us pll delay recommended for phy ver 4.0
+ */
+ phy_ver = dsi_phy_get_version(m_ctrl->phy);
+ if (phy_ver <= DSI_PHY_VERSION_3_0)
+ delay->pll_delay = 100;
+ else
+ delay->pll_delay = 25;
+
+ delay->pll_delay = ((delay->pll_delay * esc_clk_rate_hz) / 1000000);
+}
+
+/*
+ * dsi_display_is_type_cphy - check if panel type is cphy
+ * @display: Pointer to private display structure
+ * Returns: True if panel type is cphy
+ */
+static inline bool dsi_display_is_type_cphy(struct dsi_display *display)
+{
+ return (display->panel->host_config.phy_type ==
+ DSI_PHY_TYPE_CPHY) ? true : false;
}
static int _dsi_display_dyn_update_clks(struct dsi_display *display,
@@ -4061,15 +4139,24 @@ static int _dsi_display_dyn_update_clks(struct dsi_display *display,
{
int rc = 0, i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
+ struct dsi_clk_link_set *parent_clk, *enable_clk;
m_ctrl = &display->ctrl[display->clk_master_idx];
- dsi_clk_prepare_enable(&display->clock_info.src_clks);
+ if (dsi_display_is_type_cphy(display)) {
+ enable_clk = &display->clock_info.cphy_clks;
+ parent_clk = &display->clock_info.shadow_cphy_clks;
+ } else {
+ enable_clk = &display->clock_info.src_clks;
+ parent_clk = &display->clock_info.shadow_clks;
+ }
+
+ dsi_clk_prepare_enable(enable_clk);
- rc = dsi_clk_update_parent(&display->clock_info.shadow_clks,
- &display->clock_info.mux_clks);
+ rc = dsi_clk_update_parent(parent_clk,
+ &display->clock_info.mux_clks);
if (rc) {
- DSI_ERR("failed update mux parent to shadow\n");
+ DSI_ERR("failed to update mux parent\n");
goto exit;
}
@@ -4118,12 +4205,13 @@ static int _dsi_display_dyn_update_clks(struct dsi_display *display,
dsi_phy_dynamic_refresh_clear(ctrl->phy);
}
- rc = dsi_clk_update_parent(&display->clock_info.src_clks,
- &display->clock_info.mux_clks);
+ rc = dsi_clk_update_parent(enable_clk,
+ &display->clock_info.mux_clks);
+
if (rc)
DSI_ERR("could not switch back to src clks %d\n", rc);
- dsi_clk_disable_unprepare(&display->clock_info.src_clks);
+ dsi_clk_disable_unprepare(enable_clk);
return rc;
@@ -4441,6 +4529,9 @@ static int dsi_display_get_dfps_timing(struct dsi_display *display,
DSI_V_TOTAL(timing),
timing->v_front_porch,
&adj_mode->timing.v_front_porch);
+ SDE_EVT32(SDE_EVTLOG_FUNC_CASE1, DSI_DFPS_IMMEDIATE_VFP,
+ curr_refresh_rate, timing->refresh_rate,
+ timing->v_front_porch, adj_mode->timing.v_front_porch);
break;
case DSI_DFPS_IMMEDIATE_HFP:
@@ -4451,6 +4542,9 @@ static int dsi_display_get_dfps_timing(struct dsi_display *display,
DSI_H_TOTAL_DSC(timing),
timing->h_front_porch,
&adj_mode->timing.h_front_porch);
+ SDE_EVT32(SDE_EVTLOG_FUNC_CASE2, DSI_DFPS_IMMEDIATE_HFP,
+ curr_refresh_rate, timing->refresh_rate,
+ timing->h_front_porch, adj_mode->timing.h_front_porch);
if (!rc)
adj_mode->timing.h_front_porch *= display->ctrl_count;
break;
@@ -4503,7 +4597,7 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,
return -EINVAL;
}
- SDE_EVT32(mode->dsi_mode_flags);
+ SDE_EVT32(mode->dsi_mode_flags, mode->panel_mode);
if (mode->dsi_mode_flags & DSI_MODE_FLAG_POMS) {
display->config.panel_mode = mode->panel_mode;
display->panel->panel_mode = mode->panel_mode;
@@ -4629,7 +4723,7 @@ static int _dsi_display_dev_init(struct dsi_display *display)
return -EINVAL;
}
- if (!display->panel_node)
+ if (!display->panel_node && !display->fw)
return 0;
mutex_lock(&display->display_lock);
@@ -4714,6 +4808,7 @@ int dsi_display_cont_splash_config(void *dsi_display)
/* Update splash status for clock manager */
dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
display->is_cont_splash_enabled);
+ SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, display->is_cont_splash_enabled);
/* Set up ctrl isr before enabling core clk */
dsi_display_ctrl_isr_configure(display, true);
@@ -4782,6 +4877,7 @@ int dsi_display_splash_res_cleanup(struct dsi_display *display)
dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
display->is_cont_splash_enabled);
+ SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, display->is_cont_splash_enabled);
return rc;
}
@@ -4875,7 +4971,7 @@ static int dsi_display_bind(struct device *dev,
drm, display);
return -EINVAL;
}
- if (!display->panel_node)
+ if (!display->panel_node && !display->fw)
return 0;
if (!display->fw)
@@ -5170,11 +5266,19 @@ static void dsi_display_firmware_display(const struct firmware *fw,
struct dsi_display *display = context;
if (fw) {
- DSI_DEBUG("reading data from firmware, size=%zd\n",
+ DSI_INFO("reading data from firmware, size=%zd\n",
fw->size);
display->fw = fw;
- display->name = "dsi_firmware_display";
+
+ if (!strcmp(display->display_type, "primary"))
+ display->name = "dsi_firmware_display";
+
+ else if (!strcmp(display->display_type, "secondary"))
+ display->name = "dsi_firmware_display_secondary";
+
+ } else {
+ DSI_INFO("no firmware available, fallback to device node\n");
}
if (dsi_display_init(display))
@@ -5238,12 +5342,6 @@ int dsi_display_dev_probe(struct platform_device *pdev)
"qcom,dsi-default-panel", 0);
if (!panel_node)
DSI_WARN("default panel not found\n");
-
- if (IS_ENABLED(CONFIG_DSI_PARSER))
- firm_req = !request_firmware_nowait(
- THIS_MODULE, 1, "dsi_prop",
- &pdev->dev, GFP_KERNEL, display,
- dsi_display_firmware_display);
}
boot_disp->node = pdev->dev.of_node;
@@ -5258,6 +5356,20 @@ int dsi_display_dev_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, display);
/* initialize display in firmware callback */
+ if (!boot_disp->boot_disp_en && IS_ENABLED(CONFIG_DSI_PARSER)) {
+ if (!strcmp(display->display_type, "primary"))
+ firm_req = !request_firmware_nowait(
+ THIS_MODULE, 1, "dsi_prop",
+ &pdev->dev, GFP_KERNEL, display,
+ dsi_display_firmware_display);
+
+ else if (!strcmp(display->display_type, "secondary"))
+ firm_req = !request_firmware_nowait(
+ THIS_MODULE, 1, "dsi_prop_sec",
+ &pdev->dev, GFP_KERNEL, display,
+ dsi_display_firmware_display);
+ }
+
if (!firm_req) {
rc = dsi_display_init(display);
if (rc)
@@ -5314,7 +5426,8 @@ int dsi_display_get_num_of_displays(void)
for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
struct dsi_display *display = boot_displays[i].disp;
- if (display && display->panel_node)
+ if ((display && display->panel_node) ||
+ (display && display->fw))
count++;
}
@@ -5333,7 +5446,8 @@ int dsi_display_get_active_displays(void **display_array, u32 max_display_count)
for (index = 0; index < MAX_DSI_ACTIVE_DISPLAY; index++) {
struct dsi_display *display = boot_displays[index].disp;
- if (display && display->panel_node)
+ if ((display && display->panel_node) ||
+ (display && display->fw))
display_array[count++] = display;
}
@@ -5884,7 +5998,10 @@ int dsi_display_get_info(struct drm_connector *connector,
info->max_width = 1920;
info->max_height = 1080;
info->qsync_min_fps =
- display->panel->qsync_min_fps;
+ display->panel->qsync_caps.qsync_min_fps;
+ info->has_qsync_min_fps_list =
+ (display->panel->qsync_caps.qsync_min_fps_list_len > 0) ?
+ true : false;
switch (display->panel->panel_mode) {
case DSI_OP_VIDEO_MODE:
@@ -5937,12 +6054,15 @@ int dsi_display_get_mode_count(struct dsi_display *display,
return 0;
}
-void dsi_display_adjust_mode_timing(
- struct dsi_dyn_clk_caps *dyn_clk_caps,
+void dsi_display_adjust_mode_timing(struct dsi_display *display,
struct dsi_display_mode *dsi_mode,
int lanes, int bpp)
{
u64 new_htotal, new_vtotal, htotal, vtotal, old_htotal, div;
+ struct dsi_dyn_clk_caps *dyn_clk_caps;
+ u32 bits_per_symbol = 16, num_of_symbols = 7; /* For Cphy */
+
+ dyn_clk_caps = &(display->panel->dyn_clk_caps);
/* Constant FPS is not supported on command mode */
if (dsi_mode->panel_mode == DSI_OP_CMD_MODE)
@@ -5961,21 +6081,31 @@ void dsi_display_adjust_mode_timing(
case DSI_DYN_CLK_TYPE_CONST_FPS_ADJUST_HFP:
vtotal = DSI_V_TOTAL(&dsi_mode->timing);
old_htotal = DSI_H_TOTAL_DSC(&dsi_mode->timing);
+ do_div(old_htotal, display->ctrl_count);
new_htotal = dsi_mode->timing.clk_rate_hz * lanes;
div = bpp * vtotal * dsi_mode->timing.refresh_rate;
+ if (dsi_display_is_type_cphy(display)) {
+ new_htotal = new_htotal * bits_per_symbol;
+ div = div * num_of_symbols;
+ }
do_div(new_htotal, div);
if (old_htotal > new_htotal)
dsi_mode->timing.h_front_porch -=
- (old_htotal - new_htotal);
+ ((old_htotal - new_htotal) * display->ctrl_count);
else
dsi_mode->timing.h_front_porch +=
- (new_htotal - old_htotal);
+ ((new_htotal - old_htotal) * display->ctrl_count);
break;
case DSI_DYN_CLK_TYPE_CONST_FPS_ADJUST_VFP:
htotal = DSI_H_TOTAL_DSC(&dsi_mode->timing);
+ do_div(htotal, display->ctrl_count);
new_vtotal = dsi_mode->timing.clk_rate_hz * lanes;
div = bpp * htotal * dsi_mode->timing.refresh_rate;
+ if (dsi_display_is_type_cphy(display)) {
+ new_vtotal = new_vtotal * bits_per_symbol;
+ div = div * num_of_symbols;
+ }
do_div(new_vtotal, div);
dsi_mode->timing.v_front_porch = new_vtotal -
dsi_mode->timing.v_back_porch -
@@ -6028,7 +6158,7 @@ static void _dsi_display_populate_bit_clks(struct dsi_display *display,
*/
src->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[0];
- dsi_display_adjust_mode_timing(dyn_clk_caps, src, lanes, bpp);
+ dsi_display_adjust_mode_timing(display, src, lanes, bpp);
src->pixel_clk_khz =
div_u64(src->timing.clk_rate_hz * lanes, bpp);
@@ -6050,7 +6180,7 @@ static void _dsi_display_populate_bit_clks(struct dsi_display *display,
memcpy(dst, src, sizeof(struct dsi_display_mode));
dst->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[i];
- dsi_display_adjust_mode_timing(dyn_clk_caps, dst, lanes,
+ dsi_display_adjust_mode_timing(display, dst, lanes,
bpp);
dst->pixel_clk_khz =
@@ -6333,6 +6463,25 @@ void dsi_display_set_idle_hint(void *dsi_display, bool is_idle)
dsi_panel_wakeup(display->panel);
}
+int dsi_display_get_qsync_min_fps(void *display_dsi, u32 mode_fps)
+{
+ struct dsi_display *display = (struct dsi_display *)display_dsi;
+ struct dsi_panel *panel;
+ u32 i;
+
+ if (display == NULL || display->panel == NULL)
+ return -EINVAL;
+
+ panel = display->panel;
+ for (i = 0; i < panel->dfps_caps.dfps_list_len; i++) {
+ if (panel->dfps_caps.dfps_list[i] == mode_fps)
+ return panel->qsync_caps.qsync_min_fps_list[i];
+ }
+ SDE_EVT32(mode_fps);
+ DSI_DEBUG("Invalid mode_fps %d\n", mode_fps);
+ return -EINVAL;
+}
+
int dsi_display_find_mode(struct dsi_display *display,
const struct dsi_display_mode *cmp,
struct dsi_display_mode **out_mode)
@@ -6440,10 +6589,13 @@ int dsi_display_validate_mode_change(struct dsi_display *display,
dyn_clk_caps->maintain_const_fps) {
DSI_DEBUG("Mode switch is seamless variable refresh\n");
adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
- SDE_EVT32(cur_mode->timing.refresh_rate,
+ SDE_EVT32(SDE_EVTLOG_FUNC_CASE1,
+ cur_mode->timing.refresh_rate,
adj_mode->timing.refresh_rate,
cur_mode->timing.h_front_porch,
- adj_mode->timing.h_front_porch);
+ adj_mode->timing.h_front_porch,
+ cur_mode->timing.v_front_porch,
+ adj_mode->timing.v_front_porch);
} else {
DSI_DEBUG("Switching to %d FPS with mode switch\n",
adj_mode->timing.refresh_rate);
@@ -6466,8 +6618,9 @@ int dsi_display_validate_mode_change(struct dsi_display *display,
adj_mode->dsi_mode_flags |=
DSI_MODE_FLAG_DYN_CLK;
- SDE_EVT32(cur_mode->pixel_clk_khz,
- adj_mode->pixel_clk_khz);
+ SDE_EVT32(SDE_EVTLOG_FUNC_CASE2,
+ cur_mode->pixel_clk_khz,
+ adj_mode->pixel_clk_khz);
}
}
}
@@ -7173,7 +7326,7 @@ static int dsi_display_qsync(struct dsi_display *display, bool enable)
int i;
int rc = 0;
- if (!display->panel->qsync_min_fps) {
+ if (!display->panel->qsync_caps.qsync_min_fps) {
DSI_ERR("%s:ERROR: qsync set, but no fps\n", __func__);
return 0;
}
@@ -7201,7 +7354,7 @@ static int dsi_display_qsync(struct dsi_display *display, bool enable)
}
exit:
- SDE_EVT32(enable, display->panel->qsync_min_fps, rc);
+ SDE_EVT32(enable, display->panel->qsync_caps.qsync_min_fps, rc);
mutex_unlock(&display->display_lock);
return rc;
}
diff --git a/msm/dsi/dsi_display.h b/msm/dsi/dsi_display.h
index e2c73c62..f7d64489 100644
--- a/msm/dsi/dsi_display.h
+++ b/msm/dsi/dsi_display.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_DISPLAY_H_
@@ -106,13 +106,15 @@ struct dsi_display_boot_param {
* struct dsi_display_clk_info - dsi display clock source information
* @src_clks: Source clocks for DSI display.
* @mux_clks: Mux clocks used for DFPS.
- * @shadow_clks: Used for DFPS.
+ * @shadow_clks: Used for D-phy clock switch.
+ * @shadow_cphy_clks: Used for C-phy clock switch.
*/
struct dsi_display_clk_info {
struct dsi_clk_link_set src_clks;
struct dsi_clk_link_set mux_clks;
struct dsi_clk_link_set cphy_clks;
struct dsi_clk_link_set shadow_clks;
+ struct dsi_clk_link_set shadow_cphy_clks;
};
/**
@@ -436,6 +438,16 @@ void dsi_display_put_mode(struct dsi_display *display,
int dsi_display_get_default_lms(void *dsi_display, u32 *num_lm);
/**
+ * dsi_display_get_qsync_min_fps() - get qsync min fps for given fps
+ * @display: Handle to display.
+ * @mode_fps: Fps value of current mode
+ *
+ * Return: error code.
+ */
+int dsi_display_get_qsync_min_fps(void *dsi_display, u32 mode_fps);
+
+
+/**
* dsi_display_find_mode() - retrieve cached DSI mode given relevant params
* @display: Handle to display.
* @cmp: Mode to use as comparison to find original
diff --git a/msm/dsi/dsi_drm.c b/msm/dsi/dsi_drm.c
index 584b37bc..9ea51375 100644
--- a/msm/dsi/dsi_drm.c
+++ b/msm/dsi/dsi_drm.c
@@ -11,6 +11,7 @@
#include "sde_connector.h"
#include "dsi_drm.h"
#include "sde_trace.h"
+#include "sde_dbg.h"
#define to_dsi_bridge(x) container_of((x), struct dsi_bridge, base)
#define to_dsi_state(x) container_of((x), struct dsi_connector_state, base)
@@ -415,16 +416,32 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
if ((dsi_mode.panel_mode != cur_dsi_mode.panel_mode) &&
(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
(crtc_state->enable ==
- crtc_state->crtc->state->enable))
+ crtc_state->crtc->state->enable)) {
dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_POMS;
+
+ SDE_EVT32(SDE_EVTLOG_FUNC_CASE1,
+ dsi_mode.timing.h_active,
+ dsi_mode.timing.v_active,
+ dsi_mode.timing.refresh_rate,
+ dsi_mode.pixel_clk_khz,
+ dsi_mode.panel_mode);
+ }
/* No DMS/VRR when drm pipeline is changing */
if (!drm_mode_equal(cur_mode, adjusted_mode) &&
(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_POMS)) &&
(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)) &&
(!crtc_state->active_changed ||
- display->is_cont_splash_enabled))
+ display->is_cont_splash_enabled)) {
dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
+
+ SDE_EVT32(SDE_EVTLOG_FUNC_CASE2,
+ dsi_mode.timing.h_active,
+ dsi_mode.timing.v_active,
+ dsi_mode.timing.refresh_rate,
+ dsi_mode.pixel_clk_khz,
+ dsi_mode.panel_mode);
+ }
}
/* Reject seamless transition when active changed */
@@ -577,14 +594,16 @@ int dsi_conn_set_info_blob(struct drm_connector *connector,
case DSI_OP_VIDEO_MODE:
sde_kms_info_add_keystr(info, "panel mode", "video");
sde_kms_info_add_keystr(info, "qsync support",
- panel->qsync_min_fps ? "true" : "false");
+ panel->qsync_caps.qsync_min_fps ?
+ "true" : "false");
break;
case DSI_OP_CMD_MODE:
sde_kms_info_add_keystr(info, "panel mode", "command");
sde_kms_info_add_keyint(info, "mdp_transfer_time_us",
mode_info->mdp_transfer_time_us);
sde_kms_info_add_keystr(info, "qsync support",
- panel->qsync_min_fps ? "true" : "false");
+ panel->qsync_caps.qsync_min_fps ?
+ "true" : "false");
break;
default:
DSI_DEBUG("invalid panel type:%d\n", panel->panel_mode);
diff --git a/msm/dsi/dsi_panel.c b/msm/dsi/dsi_panel.c
index 603f4bf9..ef6605b0 100644
--- a/msm/dsi/dsi_panel.c
+++ b/msm/dsi/dsi_panel.c
@@ -13,6 +13,7 @@
#include "dsi_panel.h"
#include "dsi_ctrl_hw.h"
#include "dsi_parser.h"
+#include "sde_dbg.h"
/**
* topology is currently defined by a set of following 3 values:
@@ -352,6 +353,7 @@ int dsi_panel_trigger_esd_attack(struct dsi_panel *panel)
if (gpio_is_valid(r_config->reset_gpio)) {
gpio_set_value(r_config->reset_gpio, 0);
+ SDE_EVT32(SDE_EVTLOG_FUNC_CASE1);
DSI_INFO("GPIO pulled low to simulate ESD\n");
return 0;
}
@@ -491,7 +493,8 @@ static int dsi_panel_power_off(struct dsi_panel *panel)
if (gpio_is_valid(panel->reset_config.disp_en_gpio))
gpio_set_value(panel->reset_config.disp_en_gpio, 0);
- if (gpio_is_valid(panel->reset_config.reset_gpio))
+ if (gpio_is_valid(panel->reset_config.reset_gpio) &&
+ !panel->reset_gpio_always_on)
gpio_set_value(panel->reset_config.reset_gpio, 0);
if (gpio_is_valid(panel->reset_config.lcd_mode_sel_gpio))
@@ -945,6 +948,9 @@ static int dsi_panel_parse_misc_host_config(struct dsi_host_common_cfg *host,
DSI_DEBUG("[%s] t_clk_pre = %d\n", name, val);
}
+ host->t_clk_pre_extend = utils->read_bool(utils->data,
+ "qcom,mdss-dsi-t-clk-pre-extend");
+
host->ignore_rx_eot = utils->read_bool(utils->data,
"qcom,mdss-dsi-rx-eot-ignore");
@@ -1429,8 +1435,15 @@ static int dsi_panel_parse_qsync_caps(struct dsi_panel *panel,
struct device_node *of_node)
{
int rc = 0;
- u32 val = 0;
+ u32 val = 0, i;
+ struct dsi_qsync_capabilities *qsync_caps = &panel->qsync_caps;
+ struct dsi_parser_utils *utils = &panel->utils;
+ const char *name = panel->name;
+ /**
+ * "mdss-dsi-qsync-min-refresh-rate" is defined in cmd mode and
+ * video mode when there is only one qsync min fps present.
+ */
rc = of_property_read_u32(of_node,
"qcom,mdss-dsi-qsync-min-refresh-rate",
&val);
@@ -1438,8 +1451,75 @@ static int dsi_panel_parse_qsync_caps(struct dsi_panel *panel,
DSI_DEBUG("[%s] qsync min fps not defined rc:%d\n",
panel->name, rc);
- panel->qsync_min_fps = val;
+ qsync_caps->qsync_min_fps = val;
+
+ /**
+ * "dsi-supported-qsync-min-fps-list" may be defined in video
+ * mode, only in dfps case when "qcom,dsi-supported-dfps-list"
+ * is defined.
+ */
+ qsync_caps->qsync_min_fps_list_len = utils->count_u32_elems(utils->data,
+ "qcom,dsi-supported-qsync-min-fps-list");
+ if (qsync_caps->qsync_min_fps_list_len < 1)
+ goto qsync_support;
+
+ /**
+ * qcom,dsi-supported-qsync-min-fps-list cannot be defined
+ * along with qcom,mdss-dsi-qsync-min-refresh-rate.
+ */
+ if (qsync_caps->qsync_min_fps_list_len >= 1 &&
+ qsync_caps->qsync_min_fps) {
+ DSI_ERR("[%s] Both qsync nodes are defined\n",
+ name);
+ rc = -EINVAL;
+ goto error;
+ }
+ if (panel->dfps_caps.dfps_list_len !=
+ qsync_caps->qsync_min_fps_list_len) {
+ DSI_ERR("[%s] Qsync min fps list mismatch with dfps\n", name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ qsync_caps->qsync_min_fps_list =
+ kcalloc(qsync_caps->qsync_min_fps_list_len, sizeof(u32),
+ GFP_KERNEL);
+ if (!qsync_caps->qsync_min_fps_list) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ rc = utils->read_u32_array(utils->data,
+ "qcom,dsi-supported-qsync-min-fps-list",
+ qsync_caps->qsync_min_fps_list,
+ qsync_caps->qsync_min_fps_list_len);
+ if (rc) {
+ DSI_ERR("[%s] Qsync min fps list parse failed\n", name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ qsync_caps->qsync_min_fps = qsync_caps->qsync_min_fps_list[0];
+
+ for (i = 1; i < qsync_caps->qsync_min_fps_list_len; i++) {
+ if (qsync_caps->qsync_min_fps_list[i] <
+ qsync_caps->qsync_min_fps)
+ qsync_caps->qsync_min_fps =
+ qsync_caps->qsync_min_fps_list[i];
+ }
+
+qsync_support:
+ /* allow qsync support only if DFPS is with VFP approach */
+ if ((panel->dfps_caps.dfps_support) &&
+ !(panel->dfps_caps.type == DSI_DFPS_IMMEDIATE_VFP))
+ panel->qsync_caps.qsync_min_fps = 0;
+
+error:
+ if (rc < 0) {
+ qsync_caps->qsync_min_fps = 0;
+ qsync_caps->qsync_min_fps_list_len = 0;
+ }
return rc;
}
@@ -2217,6 +2297,9 @@ static int dsi_panel_parse_misc_features(struct dsi_panel *panel)
panel->lp11_init = utils->read_bool(utils->data,
"qcom,mdss-dsi-lp11-init");
+ panel->reset_gpio_always_on = utils->read_bool(utils->data,
+ "qcom,platform-reset-gpio-always-on");
+
if (!utils->read_u32(utils->data,
"qcom,mdss-dsi-init-delay-us",
&val)) {
@@ -3426,11 +3509,6 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
if (rc)
DSI_DEBUG("failed to parse qsync features, rc=%d\n", rc);
- /* allow qsync support only if DFPS is with VFP approach */
- if ((panel->dfps_caps.dfps_support) &&
- !(panel->dfps_caps.type == DSI_DFPS_IMMEDIATE_VFP))
- panel->qsync_min_fps = 0;
-
rc = dsi_panel_parse_dyn_clk_caps(panel);
if (rc)
DSI_ERR("failed to parse dynamic clk config, rc=%d\n", rc);
@@ -4922,6 +5000,7 @@ int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx,
}
DSI_DEBUG("[%s] send roi x %d y %d w %d h %d\n", panel->name,
roi->x, roi->y, roi->w, roi->h);
+ SDE_EVT32(roi->x, roi->y, roi->w, roi->h);
mutex_lock(&panel->panel_lock);
diff --git a/msm/dsi/dsi_panel.h b/msm/dsi/dsi_panel.h
index 524a5654..51694f38 100644
--- a/msm/dsi/dsi_panel.h
+++ b/msm/dsi/dsi_panel.h
@@ -97,6 +97,13 @@ struct dsi_dfps_capabilities {
bool dfps_support;
};
+struct dsi_qsync_capabilities {
+ /* qsync disabled if qsync_min_fps = 0 */
+ u32 qsync_min_fps;
+ u32 *qsync_min_fps_list;
+ int qsync_min_fps_list_len;
+};
+
struct dsi_dyn_clk_caps {
bool dyn_clk_support;
u32 *bit_clk_list;
@@ -391,11 +398,12 @@ struct dsi_panel {
bool ulps_feature_enabled;
bool ulps_suspend_enabled;
bool allow_phy_power_off;
+ bool reset_gpio_always_on;
atomic_t esd_recovery_pending;
bool panel_initialized;
bool te_using_watchdog_timer;
- u32 qsync_min_fps;
+ struct dsi_qsync_capabilities qsync_caps;
char dsc_pps_cmd[DSI_CMD_PPS_SIZE];
enum dsi_dms_mode dms_mode;
diff --git a/msm/dsi/dsi_phy_hw_v4_0.c b/msm/dsi/dsi_phy_hw_v4_0.c
index d780f9fe..220e9de5 100644
--- a/msm/dsi/dsi_phy_hw_v4_0.c
+++ b/msm/dsi/dsi_phy_hw_v4_0.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/math64.h>
@@ -652,6 +652,8 @@ void dsi_phy_hw_v4_0_dyn_refresh_config(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, bool is_master)
{
u32 reg;
+ bool is_cphy = (cfg->phy_type == DSI_PHY_TYPE_CPHY) ?
+ true : false;
if (is_master) {
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL19,
@@ -677,7 +679,7 @@ void dsi_phy_hw_v4_0_dyn_refresh_config(struct dsi_phy_hw *phy,
cfg->timing.lane_v4[12], cfg->timing.lane_v4[13]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL26,
DSIPHY_CMN_CTRL_0, DSIPHY_CMN_LANE_CTRL0,
- 0x7f, 0x1f);
+ 0x7f, is_cphy ? 0x17 : 0x1f);
} else {
reg = DSI_R32(phy, DSIPHY_CMN_CLK_CFG1);
@@ -712,7 +714,7 @@ void dsi_phy_hw_v4_0_dyn_refresh_config(struct dsi_phy_hw *phy,
cfg->timing.lane_v4[13], 0x7f);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_2,
- 0x1f, 0x40);
+ is_cphy ? 0x17 : 0x1f, 0x40);
/*
* fill with dummy register writes since controller will blindly
* send these values to DSI PHY.
@@ -721,7 +723,7 @@ void dsi_phy_hw_v4_0_dyn_refresh_config(struct dsi_phy_hw *phy,
while (reg <= DSI_DYN_REFRESH_PLL_CTRL29) {
DSI_DYN_REF_REG_W(phy->dyn_pll_base, reg,
DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_0,
- 0x1f, 0x7f);
+ is_cphy ? 0x17 : 0x1f, 0x7f);
reg += 0x4;
}
diff --git a/msm/dsi/dsi_phy_timing_calc.c b/msm/dsi/dsi_phy_timing_calc.c
index 948e2038..655b8178 100644
--- a/msm/dsi/dsi_phy_timing_calc.c
+++ b/msm/dsi/dsi_phy_timing_calc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#include "dsi_phy_timing_calc.h"
@@ -37,6 +37,12 @@ static int calc_clk_prepare(struct dsi_phy_hw *phy,
s64 intermediate;
s64 clk_prep_actual;
+ t->rec_min = DIV_ROUND_UP((t->mipi_min * clk_params->bitclk_mbps),
+ (8 * clk_params->tlpx_numer_ns));
+ t->rec_max = rounddown(
+ mult_frac((t->mipi_max * clk_params->bitclk_mbps),
+ 1, (8 * clk_params->tlpx_numer_ns)), 1);
+
dividend = ((t->rec_max - t->rec_min) *
clk_params->clk_prep_buf * multiplier);
temp = roundup(div_s64(dividend, 100), multiplier);
@@ -623,6 +629,218 @@ error:
}
/**
+ * calc_cphy_clk_prepare - calculates cphy_clk_prepare parameter for cphy.
+ */
+static int calc_cphy_clk_prepare(struct dsi_phy_hw *phy,
+ struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ u64 multiplier = BIT(20);
+ struct timing_entry *t = &desc->clk_prepare;
+ int rc = 0;
+ u64 dividend, temp;
+
+ t->rec_min = DIV_ROUND_UP((t->mipi_min * clk_params->bitclk_mbps),
+ (7 * clk_params->tlpx_numer_ns));
+ t->rec_max = rounddown(
+ mult_frac((t->mipi_max * clk_params->bitclk_mbps),
+ 1, (7 * clk_params->tlpx_numer_ns)), 1);
+
+ dividend = ((t->rec_max - t->rec_min) *
+ clk_params->clk_prep_buf * multiplier);
+ temp = roundup(div_s64(dividend, 100), multiplier);
+ temp += (t->rec_min * multiplier);
+ t->rec = div_s64(temp, multiplier);
+
+ rc = dsi_phy_cmn_validate_and_set(t, "cphy_clk_prepare");
+
+ DSI_DEBUG("CPHY_CLK_PREPARE: rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->rec_min, t->rec_max, t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_cphy_clk_pre - calculates cphy_clk_pre parameter for cphy.
+ */
+static int calc_cphy_clk_pre(struct dsi_phy_hw *phy,
+ struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ u64 multiplier = BIT(20);
+ struct timing_entry *t = &desc->clk_pre;
+ int rc = 0;
+ u64 dividend, temp;
+
+ t->mipi_min = min(300 - 38 - mult_frac(7, clk_params->tlpx_numer_ns,
+ clk_params->bitclk_mbps),
+ mult_frac(448, clk_params->tlpx_numer_ns,
+ clk_params->bitclk_mbps));
+ t->mipi_max = mult_frac(448, clk_params->tlpx_numer_ns,
+ clk_params->bitclk_mbps);
+
+ t->rec_min = DIV_ROUND_UP((t->mipi_min * clk_params->bitclk_mbps),
+ (7 * clk_params->tlpx_numer_ns));
+ t->rec_max = rounddown(
+ mult_frac((t->mipi_max * clk_params->bitclk_mbps),
+ 1, (7 * clk_params->tlpx_numer_ns)), 1);
+
+ dividend = ((t->rec_max - t->rec_min) * clk_params->clk_pre_buf
+ * multiplier);
+ temp = roundup(div_s64(dividend, 100), multiplier);
+ temp += (t->rec_min * multiplier);
+ t->rec = div_s64(temp, multiplier);
+
+ rc = dsi_phy_cmn_validate_and_set(t, "cphy_clk_pre");
+
+ DSI_DEBUG("CPHY_CLK_PRE: rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->rec_min, t->rec_max, t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_cphy_clk_post - calculates cphy_clk_post parameter for cphy.
+ */
+static int calc_cphy_clk_post(struct dsi_phy_hw *phy,
+ struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ u64 multiplier = BIT(20);
+ struct timing_entry *t = &desc->clk_post;
+ int rc = 0;
+ u64 dividend, temp;
+
+ t->mipi_min = mult_frac(7, clk_params->tlpx_numer_ns,
+ clk_params->bitclk_mbps);
+ t->mipi_max = mult_frac(224, clk_params->tlpx_numer_ns,
+ clk_params->bitclk_mbps);
+
+ t->rec_min = DIV_ROUND_UP((t->mipi_min * clk_params->bitclk_mbps),
+ (7 * clk_params->tlpx_numer_ns));
+ t->rec_max = rounddown(
+ mult_frac((t->mipi_max * clk_params->bitclk_mbps),
+ 1, (7 * clk_params->tlpx_numer_ns)), 1);
+
+ dividend = ((t->rec_max - t->rec_min) * clk_params->clk_post_buf
+ * multiplier);
+ temp = roundup(div_s64(dividend, 100), multiplier);
+ temp += (t->rec_min * multiplier);
+ t->rec = div_s64(temp, multiplier);
+
+ rc = dsi_phy_cmn_validate_and_set(t, "cphy_clk_post");
+
+ DSI_DEBUG("CPHY_CLK_POST: rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->rec_min, t->rec_max, t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_cphy_hs_rqst - calculates cphy_hs_rqst parameter for cphy.
+ */
+static int calc_cphy_hs_rqst(struct dsi_phy_hw *phy,
+ struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ u64 multiplier = BIT(20);
+ struct timing_entry *t = &desc->hs_rqst;
+ int rc = 0;
+ u64 dividend, temp;
+
+ t->rec_min = DIV_ROUND_UP(
+ ((t->mipi_min * clk_params->bitclk_mbps) -
+ (7 * clk_params->tlpx_numer_ns)),
+ (7 * clk_params->tlpx_numer_ns));
+
+ dividend = ((t->rec_max - t->rec_min) *
+ clk_params->hs_rqst_buf * multiplier);
+ temp = roundup(div_s64(dividend, 100), multiplier);
+ temp += t->rec_min * multiplier;
+ t->rec = div_s64(temp, multiplier);
+
+ rc = dsi_phy_cmn_validate_and_set(t, "cphy_hs_rqst");
+
+ DSI_DEBUG("CPHY_HS_RQST: rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->rec_min, t->rec_max, t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_cphy_hs_exit - calculates cphy_hs_exit parameter for cphy.
+ */
+static int calc_cphy_hs_exit(struct dsi_phy_hw *phy,
+ struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ int rc = 0;
+ u64 multiplier = BIT(20);
+ u64 dividend, temp;
+ struct timing_entry *t = &desc->hs_exit;
+
+ t->rec_min = (DIV_ROUND_UP(
+ (t->mipi_min * clk_params->bitclk_mbps),
+ (7 * clk_params->tlpx_numer_ns)) - 1);
+
+ dividend = ((t->rec_max - t->rec_min) *
+ clk_params->hs_exit_buf * multiplier);
+ temp = roundup(div_s64(dividend, 100), multiplier);
+ temp += t->rec_min * multiplier;
+ t->rec = div_s64(temp, multiplier);
+
+ rc = dsi_phy_cmn_validate_and_set(t, "cphy_hs_exit");
+
+ DSI_DEBUG("CPHY_HS_EXIT: rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->rec_min, t->rec_max, t->reg_value);
+
+ return rc;
+}
+
+/**
+ * dsi_phy_calc_cphy_timing_params - calculates cphy timing parameters
+ * for a given bit clock
+ */
+static int dsi_phy_cmn_calc_cphy_timing_params(struct dsi_phy_hw *phy,
+ struct phy_clk_params *clk_params, struct phy_timing_desc *desc)
+{
+ int rc = 0;
+
+ rc = calc_cphy_clk_prepare(phy, clk_params, desc);
+ if (rc) {
+ DSI_ERR("clk_prepare calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_cphy_clk_pre(phy, clk_params, desc);
+ if (rc) {
+ DSI_ERR("clk_pre calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_cphy_clk_post(phy, clk_params, desc);
+ if (rc) {
+ DSI_ERR("clk_zero calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_cphy_hs_rqst(phy, clk_params, desc);
+ if (rc) {
+ DSI_ERR("hs_rqst calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_cphy_hs_exit(phy, clk_params, desc);
+ if (rc) {
+ DSI_ERR("hs_exit calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+/**
* calculate_timing_params() - calculates timing parameters.
* @phy: Pointer to DSI PHY hardware object.
* @mode: Mode information for which timing has to be calculated.
@@ -648,6 +866,7 @@ int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
u32 const hs_exit_spec_min = 100;
u32 const hs_exit_reco_max = 255;
u32 const hs_rqst_spec_min = 50;
+ u32 const hs_rqst_reco_max = 255;
/* local vars */
int rc = 0;
@@ -660,6 +879,8 @@ int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
struct phy_clk_params clk_params = {0};
struct phy_timing_ops *ops = phy->ops.timing_ops;
+ u32 phy_type = host->phy_type;
+
memset(&desc, 0x0, sizeof(desc));
h_total = DSI_H_TOTAL_DSC(mode);
v_total = DSI_V_TOTAL(mode);
@@ -680,8 +901,11 @@ int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
if (use_mode_bit_clk)
x = mode->clk_rate_hz;
- else
+ else {
x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+ if (phy_type == DSI_PHY_TYPE_CPHY)
+ x = mult_frac(x, 7, 16);
+ }
y = rounddown(x, 1);
clk_params.bitclk_mbps = rounddown(DIV_ROUND_UP_ULL(y, 1000000), 1);
@@ -699,35 +923,31 @@ int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
desc.hs_exit.rec_max = hs_exit_reco_max;
desc.hs_rqst.mipi_min = hs_rqst_spec_min;
desc.hs_rqst_clk.mipi_min = hs_rqst_spec_min;
+ desc.hs_rqst.rec_max = hs_rqst_reco_max;
if (ops->get_default_phy_params) {
- ops->get_default_phy_params(&clk_params);
+ ops->get_default_phy_params(&clk_params, phy_type);
} else {
rc = -EINVAL;
goto error;
}
- desc.clk_prepare.rec_min = DIV_ROUND_UP(
- (desc.clk_prepare.mipi_min * clk_params.bitclk_mbps),
- (8 * clk_params.tlpx_numer_ns)
- );
-
- desc.clk_prepare.rec_max = rounddown(
- mult_frac((desc.clk_prepare.mipi_max * clk_params.bitclk_mbps),
- 1, (8 * clk_params.tlpx_numer_ns)),
- 1);
-
DSI_PHY_DBG(phy, "BIT CLOCK = %d, tlpx_numer_ns=%d, treot_ns=%d\n",
clk_params.bitclk_mbps, clk_params.tlpx_numer_ns,
clk_params.treot_ns);
- rc = dsi_phy_cmn_calc_timing_params(phy, &clk_params, &desc);
+
+ if (phy_type == DSI_PHY_TYPE_CPHY)
+ rc = dsi_phy_cmn_calc_cphy_timing_params(phy, &clk_params,
+ &desc);
+ else
+ rc = dsi_phy_cmn_calc_timing_params(phy, &clk_params, &desc);
if (rc) {
DSI_PHY_ERR(phy, "Timing calc failed, rc=%d\n", rc);
goto error;
}
if (ops->update_timing_params) {
- ops->update_timing_params(timing, &desc);
+ ops->update_timing_params(timing, &desc, phy_type);
} else {
rc = -EINVAL;
goto error;
diff --git a/msm/dsi/dsi_phy_timing_calc.h b/msm/dsi/dsi_phy_timing_calc.h
index 2ed5e72f..536e7670 100644
--- a/msm/dsi/dsi_phy_timing_calc.h
+++ b/msm/dsi/dsi_phy_timing_calc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_PHY_TIMING_CALC_H_
@@ -81,7 +81,8 @@ struct phy_clk_params {
* Various Ops needed for auto-calculation of DSI PHY timing parameters.
*/
struct phy_timing_ops {
- void (*get_default_phy_params)(struct phy_clk_params *params);
+ void (*get_default_phy_params)(struct phy_clk_params *params,
+ u32 phy_type);
int32_t (*calc_clk_zero)(s64 rec_temp1, s64 mult);
@@ -96,14 +97,15 @@ struct phy_timing_ops {
struct phy_timing_desc *desc);
void (*update_timing_params)(struct dsi_phy_per_lane_cfgs *timing,
- struct phy_timing_desc *desc);
+ struct phy_timing_desc *desc, u32 phy_type);
};
#define roundup64(x, y) \
({ u64 _tmp = (x)+(y)-1; do_div(_tmp, y); _tmp * y; })
/* DSI PHY timing functions for 14nm */
-void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params);
+void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params,
+ u32 phy_type);
int32_t dsi_phy_hw_v2_0_calc_clk_zero(s64 rec_temp1, s64 mult);
@@ -118,10 +120,11 @@ void dsi_phy_hw_v2_0_calc_hs_trail(struct phy_clk_params *clk_params,
struct phy_timing_desc *desc);
void dsi_phy_hw_v2_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
- struct phy_timing_desc *desc);
+ struct phy_timing_desc *desc, u32 phy_type);
/* DSI PHY timing functions for 10nm */
-void dsi_phy_hw_v3_0_get_default_phy_params(struct phy_clk_params *params);
+void dsi_phy_hw_v3_0_get_default_phy_params(struct phy_clk_params *params,
+ u32 phy_type);
int32_t dsi_phy_hw_v3_0_calc_clk_zero(s64 rec_temp1, s64 mult);
@@ -136,10 +139,11 @@ void dsi_phy_hw_v3_0_calc_hs_trail(struct phy_clk_params *clk_params,
struct phy_timing_desc *desc);
void dsi_phy_hw_v3_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
- struct phy_timing_desc *desc);
+ struct phy_timing_desc *desc, u32 phy_type);
/* DSI PHY timing functions for 7nm */
-void dsi_phy_hw_v4_0_get_default_phy_params(struct phy_clk_params *params);
+void dsi_phy_hw_v4_0_get_default_phy_params(struct phy_clk_params *params,
+ u32 phy_type);
int32_t dsi_phy_hw_v4_0_calc_clk_zero(s64 rec_temp1, s64 mult);
@@ -154,6 +158,6 @@ void dsi_phy_hw_v4_0_calc_hs_trail(struct phy_clk_params *clk_params,
struct phy_timing_desc *desc);
void dsi_phy_hw_v4_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
- struct phy_timing_desc *desc);
+ struct phy_timing_desc *desc, u32 phy_type);
#endif /* _DSI_PHY_TIMING_CALC_H_ */
diff --git a/msm/dsi/dsi_phy_timing_v2_0.c b/msm/dsi/dsi_phy_timing_v2_0.c
index d3c1cb10..38399939 100644
--- a/msm/dsi/dsi_phy_timing_v2_0.c
+++ b/msm/dsi/dsi_phy_timing_v2_0.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#include "dsi_phy_timing_calc.h"
-void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params)
+void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params,
+ u32 phy_type)
{
params->clk_prep_buf = 50;
params->clk_zero_buf = 2;
@@ -77,7 +78,7 @@ void dsi_phy_hw_v2_0_calc_hs_trail(struct phy_clk_params *clk_params,
void dsi_phy_hw_v2_0_update_timing_params(
struct dsi_phy_per_lane_cfgs *timing,
- struct phy_timing_desc *desc)
+ struct phy_timing_desc *desc, u32 phy_type)
{
int i = 0;
diff --git a/msm/dsi/dsi_phy_timing_v3_0.c b/msm/dsi/dsi_phy_timing_v3_0.c
index 562d2962..c57b6b5f 100644
--- a/msm/dsi/dsi_phy_timing_v3_0.c
+++ b/msm/dsi/dsi_phy_timing_v3_0.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#include "dsi_phy_timing_calc.h"
void dsi_phy_hw_v3_0_get_default_phy_params(
- struct phy_clk_params *params)
+ struct phy_clk_params *params, u32 phy_type)
{
params->clk_prep_buf = 0;
params->clk_zero_buf = 0;
@@ -72,7 +72,7 @@ void dsi_phy_hw_v3_0_calc_hs_trail(struct phy_clk_params *clk_params,
void dsi_phy_hw_v3_0_update_timing_params(
struct dsi_phy_per_lane_cfgs *timing,
- struct phy_timing_desc *desc)
+ struct phy_timing_desc *desc, u32 phy_type)
{
timing->lane_v3[0] = 0x00;
timing->lane_v3[1] = desc->clk_zero.reg_value;
diff --git a/msm/dsi/dsi_phy_timing_v4_0.c b/msm/dsi/dsi_phy_timing_v4_0.c
index eb6a8f7c..11276289 100644
--- a/msm/dsi/dsi_phy_timing_v4_0.c
+++ b/msm/dsi/dsi_phy_timing_v4_0.c
@@ -1,24 +1,32 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#include "dsi_phy_timing_calc.h"
void dsi_phy_hw_v4_0_get_default_phy_params(
- struct phy_clk_params *params)
+ struct phy_clk_params *params, u32 phy_type)
{
- params->clk_prep_buf = 50;
- params->clk_zero_buf = 2;
- params->clk_trail_buf = 30;
- params->hs_prep_buf = 50;
- params->hs_zero_buf = 10;
- params->hs_trail_buf = 30;
- params->hs_rqst_buf = 0;
- params->hs_exit_buf = 10;
- /* 1.25 is used in code for precision */
- params->clk_pre_buf = 1;
- params->clk_post_buf = 5;
+ if (phy_type == DSI_PHY_TYPE_CPHY) {
+ params->clk_prep_buf = 50;
+ params->clk_pre_buf = 20;
+ params->clk_post_buf = 80;
+ params->hs_rqst_buf = 1;
+ params->hs_exit_buf = 10;
+ } else {
+ params->clk_prep_buf = 50;
+ params->clk_zero_buf = 2;
+ params->clk_trail_buf = 30;
+ params->hs_prep_buf = 50;
+ params->hs_zero_buf = 10;
+ params->hs_trail_buf = 30;
+ params->hs_rqst_buf = 0;
+ params->hs_exit_buf = 10;
+ /* 1.25 is used in code for precision */
+ params->clk_pre_buf = 1;
+ params->clk_post_buf = 5;
+ }
}
int32_t dsi_phy_hw_v4_0_calc_clk_zero(s64 rec_temp1, s64 mult)
@@ -75,22 +83,37 @@ void dsi_phy_hw_v4_0_calc_hs_trail(struct phy_clk_params *clk_params,
void dsi_phy_hw_v4_0_update_timing_params(
struct dsi_phy_per_lane_cfgs *timing,
- struct phy_timing_desc *desc)
+ struct phy_timing_desc *desc, u32 phy_type)
{
- timing->lane_v4[0] = 0x00;
- timing->lane_v4[1] = desc->clk_zero.reg_value;
- timing->lane_v4[2] = desc->clk_prepare.reg_value;
- timing->lane_v4[3] = desc->clk_trail.reg_value;
- timing->lane_v4[4] = desc->hs_exit.reg_value;
- timing->lane_v4[5] = desc->hs_zero.reg_value;
- timing->lane_v4[6] = desc->hs_prepare.reg_value;
- timing->lane_v4[7] = desc->hs_trail.reg_value;
- timing->lane_v4[8] = desc->hs_rqst.reg_value;
- timing->lane_v4[9] = 0x02;
- timing->lane_v4[10] = 0x04;
- timing->lane_v4[11] = 0x00;
- timing->lane_v4[12] = desc->clk_pre.reg_value;
- timing->lane_v4[13] = desc->clk_post.reg_value;
+ if (phy_type == DSI_PHY_TYPE_CPHY) {
+ timing->lane_v4[0] = 0x00;
+ timing->lane_v4[1] = 0x00;
+ timing->lane_v4[2] = 0x00;
+ timing->lane_v4[3] = 0x00;
+ timing->lane_v4[4] = desc->hs_exit.reg_value;
+ timing->lane_v4[5] = desc->clk_pre.reg_value;
+ timing->lane_v4[6] = desc->clk_prepare.reg_value;
+ timing->lane_v4[7] = desc->clk_post.reg_value;
+ timing->lane_v4[8] = desc->hs_rqst.reg_value;
+ timing->lane_v4[9] = 0x02;
+ timing->lane_v4[10] = 0x04;
+ timing->lane_v4[11] = 0x00;
+ } else {
+ timing->lane_v4[0] = 0x00;
+ timing->lane_v4[1] = desc->clk_zero.reg_value;
+ timing->lane_v4[2] = desc->clk_prepare.reg_value;
+ timing->lane_v4[3] = desc->clk_trail.reg_value;
+ timing->lane_v4[4] = desc->hs_exit.reg_value;
+ timing->lane_v4[5] = desc->hs_zero.reg_value;
+ timing->lane_v4[6] = desc->hs_prepare.reg_value;
+ timing->lane_v4[7] = desc->hs_trail.reg_value;
+ timing->lane_v4[8] = desc->hs_rqst.reg_value;
+ timing->lane_v4[9] = 0x02;
+ timing->lane_v4[10] = 0x04;
+ timing->lane_v4[11] = 0x00;
+ timing->lane_v4[12] = desc->clk_pre.reg_value;
+ timing->lane_v4[13] = desc->clk_post.reg_value;
+ }
DSI_DEBUG("[%d %d %d %d]\n", timing->lane_v4[0],
timing->lane_v4[1], timing->lane_v4[2], timing->lane_v4[3]);
diff --git a/msm/msm_atomic.c b/msm/msm_atomic.c
index 9eafad31..cd5bca16 100644
--- a/msm/msm_atomic.c
+++ b/msm/msm_atomic.c
@@ -76,13 +76,17 @@ int msm_drm_notifier_call_chain(unsigned long val, void *v)
v);
}
-static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
+static inline bool _msm_seamless_for_crtc(struct drm_device *dev,
+ struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state, bool enable)
{
struct drm_connector *connector = NULL;
struct drm_connector_state *conn_state = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
int i = 0;
int conn_cnt = 0;
+ bool splash_en = false;
if (msm_is_mode_seamless(&crtc_state->mode) ||
msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode) ||
@@ -101,7 +105,11 @@ static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
crtc_state->crtc))
conn_cnt++;
- if (MULTIPLE_CONN_DETECTED(conn_cnt))
+ if (kms && kms->funcs && kms->funcs->check_for_splash)
+ splash_en = kms->funcs->check_for_splash(kms,
+ crtc_state->crtc);
+
+ if (MULTIPLE_CONN_DETECTED(conn_cnt) && !splash_en)
return true;
}
}
@@ -257,7 +265,7 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_crtc_state->active)
continue;
- if (_msm_seamless_for_crtc(old_state, crtc->state, false))
+ if (_msm_seamless_for_crtc(dev, old_state, crtc->state, false))
continue;
funcs = crtc->helper_private;
@@ -405,7 +413,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
if (!new_crtc_state->active)
continue;
- if (_msm_seamless_for_crtc(old_state, crtc->state, true))
+ if (_msm_seamless_for_crtc(dev, old_state, crtc->state, true))
continue;
funcs = crtc->helper_private;
@@ -609,7 +617,7 @@ static void msm_atomic_commit_dispatch(struct drm_device *dev,
struct msm_drm_private *priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
struct drm_crtc_state *crtc_state = NULL;
- int ret = -EINVAL, i = 0, j = 0;
+ int ret = -ECANCELED, i = 0, j = 0;
bool nonblock;
/* cache since work will kfree commit in non-blocking case */
@@ -630,6 +638,7 @@ static void msm_atomic_commit_dispatch(struct drm_device *dev,
} else {
DRM_ERROR(" Error for crtc_id: %d\n",
priv->disp_thread[j].crtc_id);
+ ret = -EINVAL;
}
break;
}
@@ -645,13 +654,17 @@ static void msm_atomic_commit_dispatch(struct drm_device *dev,
}
if (ret) {
+ if (ret == -EINVAL)
+ DRM_ERROR("failed to dispatch commit to any CRTC\n");
+ else
+ DRM_DEBUG_DRIVER_RATELIMITED("empty crtc state\n");
+
/**
* this is not expected to happen, but at this point the state
* has been swapped, but we couldn't dispatch to a crtc thread.
* fallback now to a synchronous complete_commit to try and
* ensure that SW and HW state don't get out of sync.
*/
- DRM_ERROR("failed to dispatch commit to any CRTC\n");
complete_commit(commit);
} else if (!nonblock) {
kthread_flush_work(&commit->commit_work);
@@ -727,6 +740,16 @@ int msm_atomic_commit(struct drm_device *dev,
c->plane_mask |= (1 << drm_plane_index(plane));
}
+ /* Protection for prepare_fence callback */
+retry:
+ ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(state->acquire_ctx);
+ goto retry;
+ }
+
/*
* Wait for pending updates on any of the same crtc's and then
* mark our set of crtc's as busy:
diff --git a/msm/msm_drv.c b/msm/msm_drv.c
index 9b65de57..0ab8b1f0 100644
--- a/msm/msm_drv.c
+++ b/msm/msm_drv.c
@@ -1177,7 +1177,7 @@ static void msm_lastclose(struct drm_device *dev)
* if kms module is not yet initialized.
*/
if (!kms || (kms && kms->funcs && kms->funcs->check_for_splash
- && kms->funcs->check_for_splash(kms)))
+ && kms->funcs->check_for_splash(kms, NULL)))
return;
/*
@@ -1496,24 +1496,27 @@ static int msm_ioctl_register_event(struct drm_device *dev, void *data,
* calls add to client list and return.
*/
count = msm_event_client_count(dev, req_event, false);
- /* Add current client to list */
- spin_lock_irqsave(&dev->event_lock, flag);
- list_add_tail(&client->base.link, &priv->client_event_list);
- spin_unlock_irqrestore(&dev->event_lock, flag);
-
- if (count)
+ if (count) {
+ /* Add current client to list */
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_add_tail(&client->base.link, &priv->client_event_list);
+ spin_unlock_irqrestore(&dev->event_lock, flag);
return 0;
+ }
ret = msm_register_event(dev, req_event, file, true);
if (ret) {
DRM_ERROR("failed to enable event %x object %x object id %d\n",
req_event->event, req_event->object_type,
req_event->object_id);
+ kfree(client);
+ } else {
+ /* Add current client to list */
spin_lock_irqsave(&dev->event_lock, flag);
- list_del(&client->base.link);
+ list_add_tail(&client->base.link, &priv->client_event_list);
spin_unlock_irqrestore(&dev->event_lock, flag);
- kfree(client);
}
+
return ret;
}
@@ -1654,6 +1657,13 @@ static int msm_release(struct inode *inode, struct file *filp)
kfree(node);
}
+ /**
+ * Handle preclose operation here for removing fb's whose
+ * refcount > 1. This operation is not triggered from upstream
+ * drm as msm_driver does not support DRIVER_LEGACY feature.
+ */
+ msm_preclose(dev, file_priv);
+
return drm_release(inode, filp);
}
@@ -1814,7 +1824,6 @@ static struct drm_driver msm_driver = {
DRIVER_ATOMIC |
DRIVER_MODESET,
.open = msm_open,
- .preclose = msm_preclose,
.postclose = msm_postclose,
.lastclose = msm_lastclose,
.irq_handler = msm_irq,
diff --git a/msm/msm_drv.h b/msm/msm_drv.h
index 525473ce..34263b7a 100644
--- a/msm/msm_drv.h
+++ b/msm/msm_drv.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -512,6 +512,7 @@ struct msm_resource_caps_info {
* used instead of panel TE in cmd mode panels
* @roi_caps: Region of interest capability info
* @qsync_min_fps Minimum fps supported by Qsync feature
+ * @has_qsync_min_fps_list True if dsi-supported-qsync-min-fps-list exits
* @te_source vsync source pin information
*/
struct msm_display_info {
@@ -535,6 +536,8 @@ struct msm_display_info {
struct msm_roi_caps roi_caps;
uint32_t qsync_min_fps;
+ bool has_qsync_min_fps_list;
+
uint32_t te_source;
};
@@ -891,7 +894,8 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
int w, int h, int p, uint32_t format);
-
+int msm_fb_obj_get_attrs(struct drm_gem_object *obj, int *fb_ns,
+ int *fb_sec, int *fb_sec_dir, unsigned long *flags);
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
void msm_fbdev_free(struct drm_device *dev);
diff --git a/msm/msm_fb.c b/msm/msm_fb.c
index f5ddd139..6b7b34ad 100644
--- a/msm/msm_fb.c
+++ b/msm/msm_fb.c
@@ -18,6 +18,7 @@
#include <linux/dma-mapping.h>
#include <linux/dma-buf.h>
+#include <linux/msm_ion.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -527,3 +528,33 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
return fb;
}
+
+int msm_fb_obj_get_attrs(struct drm_gem_object *obj, int *fb_ns,
+ int *fb_sec, int *fb_sec_dir, unsigned long *flags)
+{
+
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ if (!obj->import_attach) {
+ DRM_DEBUG("NULL attachment in gem object flags: 0x%x\n",
+ msm_obj->flags);
+ return -EINVAL;
+ }
+
+ ret = dma_buf_get_flags(obj->import_attach->dmabuf, flags);
+ if (ret) {
+ DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
+ return ret;
+ }
+
+ if (!(*flags & ION_FLAG_SECURE))
+ *fb_ns = 1;
+ else if (*flags & ION_FLAG_CP_PIXEL)
+ *fb_sec = 1;
+ else if (*flags & (ION_FLAG_CP_SEC_DISPLAY |
+ ION_FLAG_CP_CAMERA_PREVIEW))
+ *fb_sec_dir = 1;
+
+ return ret;
+}
diff --git a/msm/msm_kms.h b/msm/msm_kms.h
index 8472f847..de34cb6d 100644
--- a/msm/msm_kms.h
+++ b/msm/msm_kms.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -123,7 +123,7 @@ struct msm_kms_funcs {
/* handle continuous splash */
int (*cont_splash_config)(struct msm_kms *kms);
/* check for continuous splash status */
- bool (*check_for_splash)(struct msm_kms *kms);
+ bool (*check_for_splash)(struct msm_kms *kms, struct drm_crtc *crtc);
/* topology information */
int (*get_mixer_count)(const struct msm_kms *kms,
const struct drm_display_mode *mode,
diff --git a/msm/msm_notifier.c b/msm/msm_notifier.c
index 94352688..ed528b0c 100644
--- a/msm/msm_notifier.c
+++ b/msm/msm_notifier.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
@@ -52,10 +52,11 @@ static int msm_notifier_fps_chg_callback(struct notifier_block *nb,
/*
* Get ceiling of fps from notifier data to pass to scheduler.
* Default will be FPS60 and sent to scheduler during suspend.
- * Currently scheduler expects FPS120 for any fps over 90.
*/
fps = notifier_data->refresh_rate;
- if (fps > FPS90)
+ if (fps > FPS120)
+ sched_fps = FPS144;
+ else if (fps > FPS90)
sched_fps = FPS120;
else if (fps > FPS60)
sched_fps = FPS90;
diff --git a/msm/sde/sde_connector.c b/msm/sde/sde_connector.c
index 29a84337..efb72e47 100644
--- a/msm/sde/sde_connector.c
+++ b/msm/sde/sde_connector.c
@@ -1124,12 +1124,6 @@ static int _sde_connector_set_ext_hdr_info(
connector = &c_conn->base;
- if (!connector->hdr_supported) {
- SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n");
- rc = -ENOTSUPP;
- goto end;
- }
-
memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta));
if (!usr_ptr) {
@@ -1137,6 +1131,12 @@ static int _sde_connector_set_ext_hdr_info(
goto end;
}
+ if (!connector->hdr_supported) {
+ SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n");
+ rc = -ENOTSUPP;
+ goto end;
+ }
+
if (copy_from_user(&c_state->hdr_meta,
(void __user *)usr_ptr,
sizeof(*hdr_meta))) {
@@ -1998,8 +1998,6 @@ static int sde_connector_atomic_check(struct drm_connector *connector,
struct drm_connector_state *new_conn_state)
{
struct sde_connector *c_conn;
- struct sde_connector_state *c_state;
- bool qsync_dirty = false, has_modeset = false;
if (!connector) {
SDE_ERROR("invalid connector\n");
@@ -2012,19 +2010,6 @@ static int sde_connector_atomic_check(struct drm_connector *connector,
}
c_conn = to_sde_connector(connector);
- c_state = to_sde_connector_state(new_conn_state);
-
- has_modeset = sde_crtc_atomic_check_has_modeset(new_conn_state->state,
- new_conn_state->crtc);
- qsync_dirty = msm_property_is_dirty(&c_conn->property_info,
- &c_state->property_state,
- CONNECTOR_PROP_QSYNC_MODE);
-
- SDE_DEBUG("has_modeset %d qsync_dirty %d\n", has_modeset, qsync_dirty);
- if (has_modeset && qsync_dirty) {
- SDE_ERROR("invalid qsync update during modeset\n");
- return -EINVAL;
- }
if (c_conn->ops.atomic_check)
return c_conn->ops.atomic_check(connector,
diff --git a/msm/sde/sde_connector.h b/msm/sde/sde_connector.h
index 9d173377..dba34391 100644
--- a/msm/sde/sde_connector.h
+++ b/msm/sde/sde_connector.h
@@ -336,6 +336,14 @@ struct sde_connector_ops {
* @is_idle: true if display is idle, false otherwise
*/
void (*set_idle_hint)(void *display, bool is_idle);
+
+ /**
+ * get_qsync_min_fps - Get qsync min fps from qsync-min-fps-list
+ * @display: Pointer to private display structure
+ * @mode_fps: Fps value in dfps list
+ * Returns: Qsync min fps value on success
+ */
+ int (*get_qsync_min_fps)(void *display, u32 mode_fps);
};
/**
diff --git a/msm/sde/sde_core_perf.c b/msm/sde/sde_core_perf.c
index 787244ea..3add93d9 100644
--- a/msm/sde/sde_core_perf.c
+++ b/msm/sde/sde_core_perf.c
@@ -105,12 +105,15 @@ static void _sde_core_perf_calc_doze_suspend(struct drm_crtc *crtc,
if (!old_perf)
return;
- if (!perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] &&
- !perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] &&
- !perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI] &&
- !perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] &&
+ if (!perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC] &&
+ !perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC] &&
state->plane_mask) {
+ perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC] =
+ old_perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC];
+ perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC] =
+ old_perf->max_per_pipe_ib
+ [SDE_POWER_HANDLE_DBUS_ID_MNOC];
perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
old_perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC];
perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
@@ -120,6 +123,9 @@ static void _sde_core_perf_calc_doze_suspend(struct drm_crtc *crtc,
perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] =
old_perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI];
+ if (!old_perf->core_clk_rate)
+ perf->core_clk_rate = old_perf->core_clk_rate;
+
for (i = 0; i < new_cstate->num_connectors; i++) {
conn = new_cstate->connectors[i];
if (!conn)
@@ -130,13 +136,14 @@ static void _sde_core_perf_calc_doze_suspend(struct drm_crtc *crtc,
is_doze_suspend = true;
}
- if (!is_doze_suspend && conn && c_conn) {
- SDE_DEBUG("No BW, planes:%x dpms_mode:%d lpmode:%d\n",
+ if (!is_doze_suspend && conn && c_conn)
+ SDE_ERROR("No BW, planes:%x dpms_mode:%d lpmode:%d\n",
state->plane_mask, c_conn->dpms_mode,
sde_connector_get_lp(conn));
+ if (conn && c_conn)
SDE_EVT32(state->plane_mask, c_conn->dpms_mode,
- sde_connector_get_lp(conn), SDE_EVTLOG_ERROR);
- }
+ sde_connector_get_lp(conn), is_doze_suspend,
+ SDE_EVTLOG_ERROR);
}
}
diff --git a/msm/sde/sde_crtc.c b/msm/sde/sde_crtc.c
index c5b6224b..cb0dd377 100644
--- a/msm/sde/sde_crtc.c
+++ b/msm/sde/sde_crtc.c
@@ -420,16 +420,36 @@ static ssize_t vsync_event_show(struct device *device,
ktime_to_ns(sde_crtc->vblank_last_cb_time));
}
+static ssize_t retire_frame_event_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_crtc *crtc;
+ struct sde_crtc *sde_crtc;
+
+ if (!device || !buf) {
+ SDE_ERROR("invalid input param(s)\n");
+ return -EAGAIN;
+ }
+
+ crtc = dev_get_drvdata(device);
+ sde_crtc = to_sde_crtc(crtc);
+ SDE_EVT32(DRMID(&sde_crtc->base));
+ return scnprintf(buf, PAGE_SIZE, "RETIRE_FRAME_TIME=%llu\n",
+ ktime_to_ns(sde_crtc->retire_frame_event_time));
+}
+
static DEVICE_ATTR_RO(vsync_event);
static DEVICE_ATTR_RO(measured_fps);
static DEVICE_ATTR_RW(fps_periodicity_ms);
static DEVICE_ATTR_WO(early_wakeup);
+static DEVICE_ATTR_RO(retire_frame_event);
static struct attribute *sde_crtc_dev_attrs[] = {
&dev_attr_vsync_event.attr,
&dev_attr_measured_fps.attr,
&dev_attr_fps_periodicity_ms.attr,
&dev_attr_early_wakeup.attr,
+ &dev_attr_retire_frame_event.attr,
NULL
};
@@ -453,6 +473,8 @@ static void sde_crtc_destroy(struct drm_crtc *crtc)
if (sde_crtc->vsync_event_sf)
sysfs_put(sde_crtc->vsync_event_sf);
+ if (sde_crtc->retire_frame_event_sf)
+ sysfs_put(sde_crtc->retire_frame_event_sf);
if (sde_crtc->sysfs_dev)
device_unregister(sde_crtc->sysfs_dev);
@@ -1747,8 +1769,12 @@ int sde_crtc_state_find_plane_fb_modes(struct drm_crtc_state *state,
static void _sde_drm_fb_sec_dir_trans(
struct sde_kms_smmu_state_data *smmu_state, uint32_t secure_level,
- struct sde_mdss_cfg *catalog, bool old_valid_fb, int *ops)
+ struct sde_mdss_cfg *catalog, bool old_valid_fb, int *ops,
+ struct drm_crtc_state *old_crtc_state)
{
+ struct sde_crtc_state *old_cstate = to_sde_crtc_state(old_crtc_state);
+ int old_secure_session = old_cstate->secure_session;
+
/* secure display usecase */
if ((smmu_state->state == ATTACHED)
&& (secure_level == SDE_DRM_SEC_ONLY)) {
@@ -1769,6 +1795,10 @@ static void _sde_drm_fb_sec_dir_trans(
smmu_state->secure_level = secure_level;
smmu_state->transition_type = PRE_COMMIT;
*ops |= SDE_KMS_OPS_SECURE_STATE_CHANGE;
+ if (old_secure_session ==
+ SDE_SECURE_VIDEO_SESSION)
+ *ops |= (SDE_KMS_OPS_WAIT_FOR_TX_DONE |
+ SDE_KMS_OPS_CLEANUP_PLANE_FB);
}
}
@@ -1894,7 +1924,7 @@ int sde_crtc_get_secure_transition_ops(struct drm_crtc *crtc,
switch (translation_mode) {
case SDE_DRM_FB_SEC_DIR_TRANS:
_sde_drm_fb_sec_dir_trans(smmu_state, secure_level,
- catalog, old_valid_fb, &ops);
+ catalog, old_valid_fb, &ops, old_crtc_state);
if (clone_mode && (ops & SDE_KMS_OPS_SECURE_STATE_CHANGE))
ops |= SDE_KMS_OPS_WAIT_FOR_TX_DONE;
break;
@@ -2203,6 +2233,12 @@ static void sde_crtc_frame_event_cb(void *data, u32 event)
}
}
+ if ((event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE) &&
+ (sde_crtc && sde_crtc->retire_frame_event_sf)) {
+ sde_crtc->retire_frame_event_time = ktime_get();
+ sysfs_notify_dirent(sde_crtc->retire_frame_event_sf);
+ }
+
fevent->event = event;
fevent->crtc = crtc;
fevent->connector = cb_data->connector;
@@ -2488,17 +2524,16 @@ static void _sde_crtc_set_input_fence_timeout(struct sde_crtc_state *cstate)
cstate->input_fence_timeout_ns *= NSEC_PER_MSEC;
}
-/**
- * _sde_crtc_clear_dim_layers_v1 - clear all dim layer settings
- * @cstate: Pointer to sde crtc state
- */
-static void _sde_crtc_clear_dim_layers_v1(struct sde_crtc_state *cstate)
+void _sde_crtc_clear_dim_layers_v1(struct drm_crtc_state *state)
{
u32 i;
+ struct sde_crtc_state *cstate;
- if (!cstate)
+ if (!state)
return;
+ cstate = to_sde_crtc_state(state);
+
for (i = 0; i < cstate->num_dim_layers; i++)
memset(&cstate->dim_layer[i], 0, sizeof(cstate->dim_layer[i]));
@@ -2527,7 +2562,7 @@ static void _sde_crtc_set_dim_layer_v1(struct drm_crtc *crtc,
if (!usr_ptr) {
/* usr_ptr is null when setting the default property value */
- _sde_crtc_clear_dim_layers_v1(cstate);
+ _sde_crtc_clear_dim_layers_v1(&cstate->base);
SDE_DEBUG("dim_layer data removed\n");
return;
}
@@ -4247,6 +4282,9 @@ static int _sde_crtc_check_secure_blend_config(struct drm_crtc *crtc,
{
struct drm_plane *plane;
int i;
+ struct drm_crtc_state *old_state = crtc->state;
+ struct sde_crtc_state *old_cstate = to_sde_crtc_state(old_state);
+
if (secure == SDE_DRM_SEC_ONLY) {
/*
* validate planes - only fb_sec_dir is allowed during sec_crtc
@@ -4307,6 +4345,8 @@ static int _sde_crtc_check_secure_blend_config(struct drm_crtc *crtc,
* - fail empty commit
* - validate dim_layer or plane is staged in the supported
* blendstage
+ * - fail if previous commit has no planes staged and
+ * no dim layer at highest blendstage.
*/
if (sde_kms->catalog->sui_supported_blendstage) {
int sec_stage = cnt ? pstates[0].sde_pstate->stage :
@@ -4324,6 +4364,14 @@ static int _sde_crtc_check_secure_blend_config(struct drm_crtc *crtc,
cstate->num_dim_layers, sec_stage);
return -EINVAL;
}
+
+ if (!old_state->plane_mask &&
+ !old_cstate->num_dim_layers) {
+ SDE_ERROR(
+ "crtc%d: no dim layer in nonsecure to secure transition\n",
+ DRMID(crtc));
+ return -EINVAL;
+ }
}
}
@@ -4350,6 +4398,55 @@ static int _sde_crtc_check_secure_single_encoder(struct drm_crtc *crtc,
return 0;
}
+static int _sde_crtc_check_secure_transition(struct drm_crtc *crtc,
+ struct drm_crtc_state *state, bool is_video_mode)
+{
+ struct sde_crtc_state *old_cstate = to_sde_crtc_state(crtc->state);
+ struct sde_crtc_state *new_cstate = to_sde_crtc_state(state);
+ int old_secure_session = old_cstate->secure_session;
+ int new_secure_session = new_cstate->secure_session;
+ int ret = 0;
+
+ /*
+ * Direct transition from Secure Camera to Secure UI(&viceversa)
+ * is not allowed
+ */
+ if ((old_secure_session == SDE_SECURE_CAMERA_SESSION &&
+ new_secure_session == SDE_SECURE_UI_SESSION) ||
+ (old_secure_session == SDE_SECURE_UI_SESSION &&
+ new_secure_session == SDE_SECURE_CAMERA_SESSION)) {
+ SDE_EVT32(DRMID(crtc), old_secure_session,
+ new_secure_session, SDE_EVTLOG_ERROR);
+ ret = -EINVAL;
+ }
+
+ /*
+ * In video mode, null commit is required for transition between
+ * secure video & secure camera
+ */
+ if (is_video_mode &&
+ ((old_secure_session == SDE_SECURE_CAMERA_SESSION &&
+ new_secure_session == SDE_SECURE_VIDEO_SESSION) ||
+ (old_secure_session == SDE_SECURE_VIDEO_SESSION &&
+ new_secure_session == SDE_SECURE_CAMERA_SESSION))) {
+ SDE_EVT32(DRMID(crtc), old_secure_session,
+ new_secure_session, SDE_EVTLOG_ERROR);
+ ret = -EINVAL;
+ }
+
+ if (old_secure_session != new_secure_session)
+ SDE_EVT32(DRMID(crtc), old_secure_session,
+ new_secure_session);
+
+ SDE_DEBUG("old session: %d new session : %d\n",
+ old_secure_session, new_secure_session);
+ if (ret)
+ SDE_ERROR("invalid transition old:%d new:%d\n",
+ old_secure_session, new_secure_session);
+
+ return ret;
+}
+
static int _sde_crtc_check_secure_state_smmu_translation(struct drm_crtc *crtc,
struct drm_crtc_state *state, struct sde_kms *sde_kms, int secure,
int fb_ns, int fb_sec, int fb_sec_dir)
@@ -4364,19 +4461,8 @@ static int _sde_crtc_check_secure_state_smmu_translation(struct drm_crtc *crtc,
MSM_DISPLAY_VIDEO_MODE);
}
- /*
- * Secure display to secure camera needs without direct
- * transition is currently not allowed
- */
- if (fb_sec_dir && secure == SDE_DRM_SEC_NON_SEC &&
- smmu_state->state != ATTACHED &&
- smmu_state->secure_level == SDE_DRM_SEC_ONLY) {
-
- SDE_EVT32(DRMID(crtc), fb_ns, fb_sec_dir,
- smmu_state->state, smmu_state->secure_level,
- secure);
+ if (_sde_crtc_check_secure_transition(crtc, state, is_video_mode))
goto sec_err;
- }
/*
* In video mode check for null commit before transition
@@ -4442,6 +4528,33 @@ static int _sde_crtc_check_secure_conn(struct drm_crtc *crtc,
return 0;
}
+static int _sde_crtc_populate_secure_session(struct drm_crtc_state *state,
+ int secure, int fb_ns, int fb_sec, int fb_sec_dir)
+{
+ struct sde_crtc_state *cstate = to_sde_crtc_state(state);
+
+ if (secure == SDE_DRM_SEC_ONLY && fb_sec_dir && !fb_sec && !fb_ns)
+ cstate->secure_session = SDE_SECURE_UI_SESSION;
+ else if (secure == SDE_DRM_SEC_NON_SEC && fb_sec_dir && !fb_sec)
+ cstate->secure_session = SDE_SECURE_CAMERA_SESSION;
+ else if (secure == SDE_DRM_SEC_NON_SEC && !fb_sec_dir && fb_sec)
+ cstate->secure_session = SDE_SECURE_VIDEO_SESSION;
+ else if (secure == SDE_DRM_SEC_NON_SEC && !fb_sec_dir &&
+ !fb_sec && fb_ns)
+ cstate->secure_session = SDE_NON_SECURE_SESSION;
+ else if (!fb_sec_dir && !fb_sec && !fb_ns)
+ cstate->secure_session = SDE_NULL_SESSION;
+ else {
+ SDE_ERROR(
+ "invalid session sec:%d fb_sec_dir:%d fb_sec:%d fb_ns:%d\n",
+ cstate->secure_session, fb_sec_dir,
+ fb_sec, fb_ns);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
struct drm_crtc_state *state, struct plane_state pstates[],
int cnt)
@@ -4472,6 +4585,11 @@ static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
if (rc)
return rc;
+ rc = _sde_crtc_populate_secure_session(state, secure,
+ fb_ns, fb_sec, fb_sec_dir);
+ if (rc)
+ return rc;
+
rc = _sde_crtc_check_secure_blend_config(crtc, state, pstates, cstate,
sde_kms, cnt, secure, fb_ns, fb_sec, fb_sec_dir);
if (rc)
@@ -5892,6 +6010,7 @@ static int _sde_debugfs_fence_status_show(struct seq_file *s, void *data)
pstate->stage);
fence = pstate->input_fence;
+ SDE_EVT32(DRMID(crtc), fence);
if (fence)
sde_fence_list_dump(fence, &s);
}
@@ -6325,6 +6444,12 @@ int sde_crtc_post_init(struct drm_device *dev, struct drm_crtc *crtc)
SDE_ERROR("crtc:%d vsync_event sysfs create failed\n",
crtc->base.id);
+ sde_crtc->retire_frame_event_sf = sysfs_get_dirent(
+ sde_crtc->sysfs_dev->kobj.sd, "retire_frame_event");
+ if (!sde_crtc->retire_frame_event_sf)
+ SDE_ERROR("crtc:%d retire frame event sysfs create failed\n",
+ crtc->base.id);
+
end:
return rc;
}
diff --git a/msm/sde/sde_crtc.h b/msm/sde/sde_crtc.h
index 9b18dea1..13e0ac8a 100644
--- a/msm/sde/sde_crtc.h
+++ b/msm/sde/sde_crtc.h
@@ -36,6 +36,22 @@
#define SDE_CRTC_FRAME_EVENT_SIZE (4 * 2)
/**
+ * enum sde_session_type: session type
+ * @SDE_SECURE_UI_SESSION: secure UI usecase
+ * @SDE_SECURE_CAMERA_SESSION: secure camera usecase
+ * @SDE_SECURE_VIDEO_SESSION: secure video usecase
+ * @SDE_NON_SECURE_SESSION: non secure usecase
+ * @SDE_NULL_SESSION: null commit usecase
+ */
+enum sde_session_type {
+ SDE_SECURE_UI_SESSION,
+ SDE_SECURE_CAMERA_SESSION,
+ SDE_SECURE_VIDEO_SESSION,
+ SDE_NON_SECURE_SESSION,
+ SDE_NULL_SESSION,
+};
+
+/**
* enum sde_crtc_client_type: crtc client type
* @RT_CLIENT: RealTime client like video/cmd mode display
* voting through apps rsc
@@ -221,11 +237,13 @@ struct sde_crtc_misr_info {
* @debugfs_root : Parent of debugfs node
* @priv_handle : Pointer to external private handle, if present
* @vblank_cb_count : count of vblank callback since last reset
+ * @retire_frame_event_time : ktime at last retire frame event
* @play_count : frame count between crtc enable and disable
* @vblank_cb_time : ktime at vblank count reset
* @vblank_last_cb_time : ktime at last vblank notification
* @sysfs_dev : sysfs device node for crtc
* @vsync_event_sf : vsync event notifier sysfs device
+ * @retire_frame_event_sf :retire frame event notifier sysfs device
* @enabled : whether the SDE CRTC is currently enabled. updated in the
* commit-thread, not state-swap time which is earlier, so
* safe to make decisions on during VBLANK on/off work
@@ -296,10 +314,12 @@ struct sde_crtc {
u32 vblank_cb_count;
u64 play_count;
ktime_t vblank_cb_time;
+ ktime_t retire_frame_event_time;
ktime_t vblank_last_cb_time;
struct sde_crtc_fps_info fps_info;
struct device *sysfs_dev;
struct kernfs_node *vsync_event_sf;
+ struct kernfs_node *retire_frame_event_sf;
bool enabled;
bool ds_reconfig;
@@ -382,6 +402,7 @@ struct sde_crtc {
* @ds_cfg: Destination scaler config
* @scl3_lut_cfg: QSEED3 lut config
* @new_perf: new performance state being requested
+ * @secure_session: Indicates the type of secure session
*/
struct sde_crtc_state {
struct drm_crtc_state base;
@@ -411,6 +432,7 @@ struct sde_crtc_state {
struct sde_hw_scaler3_lut_cfg scl3_lut_cfg;
struct sde_core_perf_params new_perf;
+ int secure_session;
};
enum sde_crtc_irq_state {
@@ -841,6 +863,12 @@ void sde_crtc_get_misr_info(struct drm_crtc *crtc,
int sde_crtc_get_num_datapath(struct drm_crtc *crtc,
struct drm_connector *connector);
+/**
+ * _sde_crtc_clear_dim_layers_v1 - clear all dim layer settings
+ * @cstate: Pointer to drm crtc state
+ */
+void _sde_crtc_clear_dim_layers_v1(struct drm_crtc_state *state);
+
/*
* sde_crtc_set_compression_ratio - set compression ratio src_bpp/target_bpp
* @msm_mode_info: Mode info
diff --git a/msm/sde/sde_encoder.c b/msm/sde/sde_encoder.c
index 5d97711c..3ad77c7e 100644
--- a/msm/sde/sde_encoder.c
+++ b/msm/sde/sde_encoder.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -931,6 +931,29 @@ bool sde_encoder_in_clone_mode(struct drm_encoder *drm_enc)
return false;
}
+bool sde_encoder_is_cwb_disabling(struct drm_encoder *drm_enc,
+ struct drm_crtc *crtc)
+{
+ struct sde_encoder_virt *sde_enc;
+ int i;
+
+ if (!drm_enc)
+ return false;
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ if (sde_enc->disp_info.intf_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ return false;
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (sde_encoder_phys_is_cwb_disabling(phys, crtc))
+ return true;
+ }
+
+ return false;
+}
+
static int _sde_encoder_atomic_check_phys_enc(struct sde_encoder_virt *sde_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -1095,6 +1118,7 @@ static int sde_encoder_virt_atomic_check(
struct sde_crtc_state *sde_crtc_state = NULL;
enum sde_rm_topology_name old_top;
int ret = 0;
+ bool qsync_dirty = false, has_modeset = false;
if (!drm_enc || !crtc_state || !conn_state) {
SDE_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
@@ -1161,6 +1185,22 @@ static int sde_encoder_virt_atomic_check(
}
drm_mode_set_crtcinfo(adj_mode, 0);
+
+ has_modeset = sde_crtc_atomic_check_has_modeset(conn_state->state,
+ conn_state->crtc);
+ qsync_dirty = msm_property_is_dirty(&sde_conn->property_info,
+ &sde_conn_state->property_state,
+ CONNECTOR_PROP_QSYNC_MODE);
+
+ if (has_modeset && qsync_dirty &&
+ (msm_is_mode_seamless_poms(adj_mode) ||
+ msm_is_mode_seamless_dms(adj_mode) ||
+ msm_is_mode_seamless_dyn_clk(adj_mode))) {
+ SDE_ERROR("invalid qsync update during modeset priv flag:%x\n",
+ adj_mode->private_flags);
+ return -EINVAL;
+ }
+
SDE_EVT32(DRMID(drm_enc), adj_mode->flags, adj_mode->private_flags);
return ret;
@@ -1352,6 +1392,7 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
struct msm_display_dsc_info *dsc = NULL;
struct sde_hw_ctl *hw_ctl;
struct sde_ctl_dsc_cfg cfg;
+ bool half_panel_partial_update;
if (hw_dsc == NULL || hw_pp == NULL || !enc_master) {
SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
@@ -1370,15 +1411,19 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
enc_ip_w = intf_ip_w;
_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
+ half_panel_partial_update = (sde_enc->cur_conn_roi.w <=
+ sde_enc->cur_master->cached_mode.hdisplay / 2);
- ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
+ ich_res = _sde_encoder_dsc_ich_reset_override_needed(
+ half_panel_partial_update, dsc);
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode = DSC_MODE_VIDEO;
- SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
- roi->w, roi->h, dsc_common_mode);
- SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h, dsc_common_mode);
+ SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d ich_res:%d\n",
+ roi->w, roi->h, dsc_common_mode, ich_res);
+ SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
+ dsc_common_mode, ich_res, half_panel_partial_update);
_sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, dsc, dsc_common_mode,
ich_res, true, hw_dsc_pp, false);
@@ -2020,8 +2065,13 @@ static int _sde_encoder_update_rsc_client(
qsync_mode = sde_connector_get_qsync_mode(
sde_enc->cur_master->connector);
- if (sde_encoder_in_clone_mode(drm_enc) ||
- (disp_info->display_type != SDE_CONNECTOR_PRIMARY) ||
+ /* left primary encoder keep vote */
+ if (sde_encoder_in_clone_mode(drm_enc)) {
+ SDE_EVT32(rsc_state, SDE_EVTLOG_FUNC_CASE1);
+ return 0;
+ }
+
+ if ((disp_info->display_type != SDE_CONNECTOR_PRIMARY) ||
(disp_info->display_type && qsync_mode))
rsc_state = enable ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE;
else if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
@@ -3447,7 +3497,8 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
_sde_encoder_input_handler_register(drm_enc);
- if ((drm_enc->crtc->state->connectors_changed &&
+ if ((drm_enc->crtc && drm_enc->crtc->state &&
+ drm_enc->crtc->state->connectors_changed &&
sde_encoder_in_clone_mode(drm_enc)) ||
!(msm_is_mode_seamless_vrr(cur_mode)
|| msm_is_mode_seamless_dms(cur_mode)
@@ -3511,6 +3562,33 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
_sde_encoder_virt_enable_helper(drm_enc);
}
+void sde_encoder_virt_reset(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+ struct sde_kms *sde_kms = sde_encoder_get_kms(drm_enc);
+ int i = 0;
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ if (sde_enc->phys_encs[i]) {
+ sde_enc->phys_encs[i]->cont_splash_enabled = false;
+ sde_enc->phys_encs[i]->connector = NULL;
+ }
+ atomic_set(&sde_enc->frame_done_cnt[i], 0);
+ }
+
+ sde_enc->cur_master = NULL;
+ /*
+ * clear the cached crtc in sde_enc on use case finish, after all the
+ * outstanding events and timers have been completed
+ */
+ sde_enc->crtc = NULL;
+ memset(&sde_enc->mode_info, 0, sizeof(sde_enc->mode_info));
+
+ SDE_DEBUG_ENC(sde_enc, "encoder disabled\n");
+
+ sde_rm_release(&sde_kms->rm, drm_enc, false);
+}
+
static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc = NULL;
@@ -3545,7 +3623,8 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
SDE_EVT32(DRMID(drm_enc));
/* wait for idle */
- sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+ if (!sde_encoder_in_clone_mode(drm_enc))
+ sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
_sde_encoder_input_handler_unregister(drm_enc);
@@ -3588,25 +3667,8 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_STOP);
- for (i = 0; i < sde_enc->num_phys_encs; i++) {
- if (sde_enc->phys_encs[i]) {
- sde_enc->phys_encs[i]->cont_splash_enabled = false;
- sde_enc->phys_encs[i]->connector = NULL;
- }
- atomic_set(&sde_enc->frame_done_cnt[i], 0);
- }
-
- sde_enc->cur_master = NULL;
- /*
- * clear the cached crtc in sde_enc on use case finish, after all the
- * outstanding events and timers have been completed
- */
- sde_enc->crtc = NULL;
- memset(&sde_enc->mode_info, 0, sizeof(sde_enc->mode_info));
-
- SDE_DEBUG_ENC(sde_enc, "encoder disabled\n");
-
- sde_rm_release(&sde_kms->rm, drm_enc, false);
+ if (!sde_encoder_in_clone_mode(drm_enc))
+ sde_encoder_virt_reset(drm_enc);
}
void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
@@ -3811,6 +3873,11 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
SDE_DEBUG_ENC(sde_enc, "\n");
SDE_EVT32(DRMID(drm_enc), enable);
+ if (sde_encoder_in_clone_mode(drm_enc)) {
+ SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_ERROR);
+ return;
+ }
+
spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
sde_enc->crtc_vblank_cb = vbl_cb;
sde_enc->crtc_vblank_cb_data = vbl_data;
@@ -3882,20 +3949,15 @@ static void sde_encoder_frame_done_callback(
if (sde_enc->phys_encs[i] == ready_phys) {
SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
atomic_read(&sde_enc->frame_done_cnt[i]));
- if (!atomic_add_unless(
- &sde_enc->frame_done_cnt[i], 1, 1)) {
+ if (atomic_inc_return(
+ &sde_enc->frame_done_cnt[i]) > 1)
SDE_EVT32(DRMID(drm_enc), event,
ready_phys->intf_idx,
SDE_EVTLOG_ERROR);
- SDE_ERROR_ENC(sde_enc,
- "intf idx:%d, event:%d\n",
- ready_phys->intf_idx, event);
- return;
- }
}
if (topology != SDE_RM_TOPOLOGY_PPSPLIT &&
- atomic_read(&sde_enc->frame_done_cnt[i]) != 1)
+ !atomic_read(&sde_enc->frame_done_cnt[i]))
trigger = false;
}
@@ -3908,7 +3970,7 @@ static void sde_encoder_frame_done_callback(
&sde_enc->crtc_frame_event_cb_data,
event);
for (i = 0; i < sde_enc->num_phys_encs; i++)
- atomic_set(&sde_enc->frame_done_cnt[i], 0);
+ atomic_dec(&sde_enc->frame_done_cnt[i]);
}
} else if (sde_enc->crtc_frame_event_cb) {
if (!is_cmd_mode)
@@ -3922,10 +3984,12 @@ static void sde_encoder_frame_done_callback(
static void sde_encoder_get_qsync_fps_callback(
struct drm_encoder *drm_enc,
- u32 *qsync_fps)
+ u32 *qsync_fps, u32 vrr_fps)
{
struct msm_display_info *disp_info;
struct sde_encoder_virt *sde_enc;
+ int rc = 0;
+ struct sde_connector *sde_conn;
if (!qsync_fps)
return;
@@ -3939,6 +4003,31 @@ static void sde_encoder_get_qsync_fps_callback(
sde_enc = to_sde_encoder_virt(drm_enc);
disp_info = &sde_enc->disp_info;
*qsync_fps = disp_info->qsync_min_fps;
+
+ /**
+ * If "dsi-supported-qsync-min-fps-list" is defined, get
+ * the qsync min fps corresponding to the fps in dfps list
+ */
+ if (disp_info->has_qsync_min_fps_list) {
+
+ if (!sde_enc->cur_master ||
+ !(sde_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_VID_MODE)) {
+ SDE_ERROR("invalid qsync settings %b\n",
+ !sde_enc->cur_master);
+ return;
+ }
+ sde_conn = to_sde_connector(sde_enc->cur_master->connector);
+
+ if (sde_conn->ops.get_qsync_min_fps)
+ rc = sde_conn->ops.get_qsync_min_fps(sde_conn->display,
+ vrr_fps);
+ if (rc <= 0) {
+ SDE_ERROR("invalid qsync min fps %d\n", rc);
+ return;
+ }
+ *qsync_fps = rc;
+ }
}
int sde_encoder_idle_request(struct drm_encoder *drm_enc)
@@ -4850,6 +4939,10 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
sde_connector_is_qsync_updated(
sde_enc->cur_master->connector)) {
_helper_flush_qsync(phys);
+
+ if (is_cmd_mode)
+ _sde_encoder_update_rsc_client(drm_enc,
+ true);
}
}
}
diff --git a/msm/sde/sde_encoder.h b/msm/sde/sde_encoder.h
index 32c25d01..aef88918 100644
--- a/msm/sde/sde_encoder.h
+++ b/msm/sde/sde_encoder.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -313,6 +313,15 @@ void sde_encoder_recovery_events_handler(struct drm_encoder *encoder,
*/
bool sde_encoder_in_clone_mode(struct drm_encoder *enc);
+/*
+ * sde_encoder_is_cwb_disabling - check if cwb encoder disable is pending
+ * @drm_enc: Pointer to drm encoder structure
+ * @drm_crtc: Pointer to drm crtc structure
+ * @Return: true if cwb encoder disable is pending
+ */
+bool sde_encoder_is_cwb_disabling(struct drm_encoder *drm_enc,
+ struct drm_crtc *drm_crtc);
+
/**
* sde_encoder_is_primary_display - checks if underlying display is primary
* display or not.
@@ -362,4 +371,31 @@ void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable);
*/
void sde_encoder_trigger_early_wakeup(struct drm_encoder *drm_enc);
+/**
+ * sde_encoder_virt_reset - delay encoder virt reset
+ * @drm_enc: Pointer to drm encoder structure
+ */
+void sde_encoder_virt_reset(struct drm_encoder *drm_enc);
+
+/**
+ * sde_encoder_get_kms - retrieve the kms from encoder
+ * @drm_enc: Pointer to drm encoder structure
+ */
+static inline struct sde_kms *sde_encoder_get_kms(struct drm_encoder *drm_enc)
+{
+ struct msm_drm_private *priv;
+
+ if (!drm_enc || !drm_enc->dev) {
+ SDE_ERROR("invalid encoder\n");
+ return NULL;
+ }
+ priv = drm_enc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_sde_kms(priv->kms);
+}
+
#endif /* __SDE_ENCODER_H__ */
diff --git a/msm/sde/sde_encoder_phys.h b/msm/sde/sde_encoder_phys.h
index 8f35a8fa..22355dc9 100644
--- a/msm/sde/sde_encoder_phys.h
+++ b/msm/sde/sde_encoder_phys.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ENCODER_PHYS_H__
@@ -82,7 +82,7 @@ struct sde_encoder_virt_ops {
void (*handle_frame_done)(struct drm_encoder *parent,
struct sde_encoder_phys *phys, u32 event);
void (*get_qsync_fps)(struct drm_encoder *parent,
- u32 *qsync_fps);
+ u32 *qsync_fps, u32 vrr_fps);
};
/**
@@ -611,6 +611,26 @@ static inline enum sde_3d_blend_mode sde_encoder_helper_get_3d_blend_mode(
}
/**
+ * sde_encoder_phys_is_cwb_disabling - Check if CWB encoder attached to this
+ * CRTC and it is in SDE_ENC_DISABLING state.
+ * @phys_enc: Pointer to physical encoder structure
+ * @crtc: drm crtc
+ * @Return: true if cwb encoder is in disabling state
+ */
+static inline bool sde_encoder_phys_is_cwb_disabling(
+ struct sde_encoder_phys *phys, struct drm_crtc *crtc)
+{
+ struct sde_encoder_phys_wb *wb_enc;
+
+ if (!phys || !phys->in_clone_mode ||
+ phys->enable_state != SDE_ENC_DISABLING)
+ return false;
+
+ wb_enc = container_of(phys, struct sde_encoder_phys_wb, base);
+ return (wb_enc->crtc == crtc) ? true : false;
+}
+
+/**
* sde_encoder_helper_split_config - split display configuration helper function
* This helper function may be used by physical encoders to configure
* the split display related registers.
diff --git a/msm/sde/sde_encoder_phys_cmd.c b/msm/sde/sde_encoder_phys_cmd.c
index 4f99a496..f9f3873e 100644
--- a/msm/sde/sde_encoder_phys_cmd.c
+++ b/msm/sde/sde_encoder_phys_cmd.c
@@ -949,7 +949,7 @@ static int _get_tearcheck_threshold(struct sde_encoder_phys *phys_enc,
if (phys_enc->parent_ops.get_qsync_fps)
phys_enc->parent_ops.get_qsync_fps(
- phys_enc->parent, &qsync_min_fps);
+ phys_enc->parent, &qsync_min_fps, 0);
if (!qsync_min_fps || !default_fps || !yres) {
SDE_ERROR_CMDENC(cmd_enc,
diff --git a/msm/sde/sde_encoder_phys_vid.c b/msm/sde/sde_encoder_phys_vid.c
index ce778f00..7708cf8f 100644
--- a/msm/sde/sde_encoder_phys_vid.c
+++ b/msm/sde/sde_encoder_phys_vid.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -270,9 +270,9 @@ static void programmable_fetch_config(struct sde_encoder_phys *phys_enc,
m = phys_enc->sde_kms->catalog;
vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc,
- timing, true);
+ timing, false);
if (vfp_fetch_lines) {
- vert_total = get_vertical_total(timing, true);
+ vert_total = get_vertical_total(timing, false);
horiz_total = get_horizontal_total(timing);
vfp_fetch_start_vsync_counter =
(vert_total - vfp_fetch_lines) * horiz_total + 1;
@@ -461,7 +461,7 @@ static void sde_encoder_phys_vid_setup_timing_engine(
exit:
if (phys_enc->parent_ops.get_qsync_fps)
phys_enc->parent_ops.get_qsync_fps(
- phys_enc->parent, &qsync_min_fps);
+ phys_enc->parent, &qsync_min_fps, mode.vrefresh);
/* only panels which support qsync will have a non-zero min fps */
if (qsync_min_fps) {
@@ -1125,13 +1125,21 @@ static void sde_encoder_phys_vid_handle_post_kickoff(
static void sde_encoder_phys_vid_prepare_for_commit(
struct sde_encoder_phys *phys_enc)
{
+ struct drm_crtc *crtc;
- if (!phys_enc) {
+ if (!phys_enc || !phys_enc->parent) {
SDE_ERROR("invalid encoder parameters\n");
return;
}
- if (sde_connector_is_qsync_updated(phys_enc->connector))
+ crtc = phys_enc->parent->crtc;
+ if (!crtc || !crtc->state) {
+ SDE_ERROR("invalid crtc or crtc state\n");
+ return;
+ }
+
+ if (!msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode) &&
+ sde_connector_is_qsync_updated(phys_enc->connector))
_sde_encoder_phys_vid_avr_ctrl(phys_enc);
}
diff --git a/msm/sde/sde_encoder_phys_wb.c b/msm/sde/sde_encoder_phys_wb.c
index eace7d51..e044ed0f 100644
--- a/msm/sde/sde_encoder_phys_wb.c
+++ b/msm/sde/sde_encoder_phys_wb.c
@@ -548,6 +548,13 @@ static void sde_encoder_phys_wb_setup_cdp(struct sde_encoder_phys *phys_enc,
intf_cfg_v1->wb_count = num_wb;
intf_cfg_v1->wb[0] = hw_wb->idx;
if (SDE_FORMAT_IS_YUV(format)) {
+ if (!phys_enc->hw_cdm) {
+ SDE_ERROR("Format:YUV but no cdm allocated\n");
+ SDE_EVT32(DRMID(phys_enc->parent),
+ SDE_EVTLOG_ERROR);
+ return;
+ }
+
intf_cfg_v1->cdm_count = num_wb;
intf_cfg_v1->cdm[0] = hw_cdm->idx;
}
@@ -582,18 +589,16 @@ static void sde_encoder_phys_wb_setup_cdp(struct sde_encoder_phys *phys_enc,
}
-static void _sde_enc_phys_wb_detect_cwb(struct sde_encoder_phys *phys_enc,
+static bool _sde_enc_phys_wb_detect_cwb(struct sde_encoder_phys *phys_enc,
struct drm_crtc_state *crtc_state)
{
struct drm_encoder *encoder;
struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
const struct sde_wb_cfg *wb_cfg = wb_enc->hw_wb->caps;
- phys_enc->in_clone_mode = false;
-
/* Check if WB has CWB support */
if (!(wb_cfg->features & BIT(SDE_WB_HAS_CWB)))
- return;
+ return false;
/* if any other encoder is connected to same crtc enable clone mode*/
drm_for_each_encoder(encoder, crtc_state->crtc->dev) {
@@ -601,12 +606,11 @@ static void _sde_enc_phys_wb_detect_cwb(struct sde_encoder_phys *phys_enc,
continue;
if (phys_enc->parent != encoder) {
- phys_enc->in_clone_mode = true;
- break;
+ return true;
}
}
- SDE_DEBUG("detect CWB - status:%d\n", phys_enc->in_clone_mode);
+ return false;
}
static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc,
@@ -686,6 +690,7 @@ static int sde_encoder_phys_wb_atomic_check(
struct sde_rect wb_roi;
const struct drm_display_mode *mode = &crtc_state->mode;
int rc;
+ bool clone_mode_curr = false;
SDE_DEBUG("[atomic_check:%d,%d,\"%s\",%d,%d]\n",
hw_wb->idx - WB_0, mode->base.id, mode->name,
@@ -701,8 +706,20 @@ static int sde_encoder_phys_wb_atomic_check(
return -EINVAL;
}
- _sde_enc_phys_wb_detect_cwb(phys_enc, crtc_state);
+ clone_mode_curr = _sde_enc_phys_wb_detect_cwb(phys_enc, crtc_state);
+
+ /**
+ * Fail the WB commit when there is a CWB session enabled in HW.
+ * CWB session needs to be disabled since WB and CWB share the same
+ * writeback hardware block.
+ */
+ if (phys_enc->in_clone_mode && !clone_mode_curr) {
+ SDE_ERROR("WB commit before CWB disable\n");
+ return -EINVAL;
+ }
+ SDE_DEBUG("detect CWB - status:%d\n", clone_mode_curr);
+ phys_enc->in_clone_mode = clone_mode_curr;
memset(&wb_roi, 0, sizeof(struct sde_rect));
rc = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi);
@@ -714,11 +731,10 @@ static int sde_encoder_phys_wb_atomic_check(
SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi.x, wb_roi.y,
wb_roi.w, wb_roi.h);
- /* bypass check if commit with no framebuffer */
fb = sde_wb_connector_state_get_output_fb(conn_state);
if (!fb) {
- SDE_DEBUG("no output framebuffer\n");
- return 0;
+ SDE_ERROR("no output framebuffer\n");
+ return -EINVAL;
}
SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
@@ -1019,7 +1035,8 @@ static void _sde_encoder_phys_wb_frame_done_helper(void *arg, bool frame_error)
SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0, wb_enc->frame_count);
/* don't notify upper layer for internal commit */
- if (phys_enc->enable_state == SDE_ENC_DISABLING)
+ if (phys_enc->enable_state == SDE_ENC_DISABLING &&
+ !phys_enc->in_clone_mode)
goto complete;
if (phys_enc->parent_ops.handle_frame_done &&
@@ -1199,6 +1216,32 @@ static int sde_encoder_phys_wb_frame_timeout(struct sde_encoder_phys *phys_enc)
return event;
}
+static void _sde_encoder_phys_wb_reset_state(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+
+ /*
+ * frame count and kickoff count are only used for debug purpose. Frame
+ * count can be more than kickoff count at the end of disable call due
+ * to extra frame_done wait. It does not cause any issue because
+ * frame_done wait is based on retire_fence count. Leaving these
+ * counters for debugging purpose.
+ */
+ if (wb_enc->frame_count != wb_enc->kickoff_count) {
+ SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
+ wb_enc->kickoff_count, wb_enc->frame_count,
+ phys_enc->in_clone_mode);
+ wb_enc->frame_count = wb_enc->kickoff_count;
+ }
+
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+ wb_enc->crtc = NULL;
+ phys_enc->hw_cdm = NULL;
+ phys_enc->hw_ctl = NULL;
+ phys_enc->in_clone_mode = false;
+}
+
static int _sde_encoder_phys_wb_wait_for_commit_done(
struct sde_encoder_phys *phys_enc, bool is_disable)
{
@@ -1282,7 +1325,18 @@ skip_wait:
static int sde_encoder_phys_wb_wait_for_commit_done(
struct sde_encoder_phys *phys_enc)
{
- return _sde_encoder_phys_wb_wait_for_commit_done(phys_enc, false);
+ int rc;
+
+ if (phys_enc->enable_state == SDE_ENC_DISABLING &&
+ phys_enc->in_clone_mode) {
+ rc = _sde_encoder_phys_wb_wait_for_commit_done(phys_enc, true);
+ _sde_encoder_phys_wb_reset_state(phys_enc);
+ sde_encoder_phys_wb_irq_ctrl(phys_enc, false);
+ } else {
+ rc = _sde_encoder_phys_wb_wait_for_commit_done(phys_enc, false);
+ }
+
+ return rc;
}
static int sde_encoder_phys_wb_wait_for_cwb_done(
@@ -1566,7 +1620,9 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
SDE_DEBUG("[wait_for_done: wb:%d, frame:%u, kickoff:%u]\n",
hw_wb->idx - WB_0, wb_enc->frame_count,
wb_enc->kickoff_count);
- _sde_encoder_phys_wb_wait_for_commit_done(phys_enc, true);
+
+ if (!phys_enc->in_clone_mode || !wb_enc->crtc->state->active)
+ _sde_encoder_phys_wb_wait_for_commit_done(phys_enc, true);
if (!phys_enc->hw_ctl || !phys_enc->parent ||
!phys_enc->sde_kms || !wb_enc->fb_disable) {
@@ -1574,11 +1630,16 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
goto exit;
}
- /* avoid reset frame for CWB */
if (phys_enc->in_clone_mode) {
_sde_encoder_phys_wb_setup_cwb(phys_enc, false);
_sde_encoder_phys_wb_update_cwb_flush(phys_enc, false);
- phys_enc->in_clone_mode = false;
+ phys_enc->enable_state = SDE_ENC_DISABLING;
+
+ if (wb_enc->crtc->state->active) {
+ sde_encoder_phys_wb_irq_ctrl(phys_enc, true);
+ return;
+ }
+
goto exit;
}
@@ -1611,24 +1672,7 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
sde_encoder_phys_wb_irq_ctrl(phys_enc, false);
exit:
- /*
- * frame count and kickoff count are only used for debug purpose. Frame
- * count can be more than kickoff count at the end of disable call due
- * to extra frame_done wait. It does not cause any issue because
- * frame_done wait is based on retire_fence count. Leaving these
- * counters for debugging purpose.
- */
- if (wb_enc->frame_count != wb_enc->kickoff_count) {
- SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
- wb_enc->kickoff_count, wb_enc->frame_count,
- phys_enc->in_clone_mode);
- wb_enc->frame_count = wb_enc->kickoff_count;
- }
-
- phys_enc->enable_state = SDE_ENC_DISABLED;
- wb_enc->crtc = NULL;
- phys_enc->hw_cdm = NULL;
- phys_enc->hw_ctl = NULL;
+ _sde_encoder_phys_wb_reset_state(phys_enc);
}
/**
diff --git a/msm/sde/sde_hw_catalog.c b/msm/sde/sde_hw_catalog.c
index 8ebbcb48..2e8bb2af 100644
--- a/msm/sde/sde_hw_catalog.c
+++ b/msm/sde/sde_hw_catalog.c
@@ -3209,11 +3209,22 @@ static int _sde_parse_prop_check(struct sde_mdss_cfg *cfg,
of_fdt_get_ddrtype() == LP_DDR4_TYPE)
cfg->mdp[0].highest_bank_bit = 0x02;
+ cfg->mdp[0].ubwc_static = PROP_VALUE_ACCESS(prop_value, UBWC_STATIC, 0);
+ if (!prop_exists[UBWC_STATIC])
+ cfg->mdp[0].ubwc_static = DEFAULT_SDE_UBWC_STATIC;
+
if (IS_SDE_MAJOR_MINOR_SAME(cfg->hwversion, SDE_HW_VER_630)) {
ret = _sde_get_ubwc_hbb(prop_exists, prop_value);
- if (ret >= 0)
+ if (ret >= 0) {
+ u32 ubwc_static, hbb;
+
cfg->mdp[0].highest_bank_bit = ret;
+ ubwc_static = cfg->mdp[0].ubwc_static;
+ hbb = ((cfg->mdp[0].highest_bank_bit & 0x7) << 4);
+ ubwc_static = ((ubwc_static & 0xff8f) | hbb);
+ cfg->mdp[0].ubwc_static = ubwc_static;
+ }
}
cfg->macrotile_mode = PROP_VALUE_ACCESS(prop_value, MACROTILE_MODE, 0);
@@ -3223,10 +3234,6 @@ static int _sde_parse_prop_check(struct sde_mdss_cfg *cfg,
cfg->ubwc_bw_calc_version =
PROP_VALUE_ACCESS(prop_value, UBWC_BW_CALC_VERSION, 0);
- cfg->mdp[0].ubwc_static = PROP_VALUE_ACCESS(prop_value, UBWC_STATIC, 0);
- if (!prop_exists[UBWC_STATIC])
- cfg->mdp[0].ubwc_static = DEFAULT_SDE_UBWC_STATIC;
-
cfg->mdp[0].ubwc_swizzle = PROP_VALUE_ACCESS(prop_value,
UBWC_SWIZZLE, 0);
if (!prop_exists[UBWC_SWIZZLE])
@@ -4295,7 +4302,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->has_cwb_support = true;
sde_cfg->has_wb_ubwc = true;
sde_cfg->has_qsync = true;
- sde_cfg->perf.min_prefill_lines = 24;
+ sde_cfg->perf.min_prefill_lines = 35;
sde_cfg->vbif_qos_nlvl = 8;
sde_cfg->ts_prefill_rev = 2;
sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
@@ -4345,7 +4352,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
} else if (IS_LAGOON_TARGET(hw_rev)) {
sde_cfg->has_cwb_support = true;
sde_cfg->has_qsync = true;
- sde_cfg->perf.min_prefill_lines = 24;
+ sde_cfg->perf.min_prefill_lines = 35;
sde_cfg->vbif_qos_nlvl = 8;
sde_cfg->ts_prefill_rev = 2;
sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
@@ -4359,6 +4366,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->has_hdr = true;
sde_cfg->has_vig_p010 = true;
sde_cfg->true_inline_rot_rev = SDE_INLINE_ROT_VERSION_2_0_0;
+ sde_cfg->has_3d_merge_reset = true;
} else if (IS_SCUBA_TARGET(hw_rev)) {
sde_cfg->has_cwb_support = false;
sde_cfg->has_qsync = true;
@@ -4372,6 +4380,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->sui_block_xin_mask = 0x1;
sde_cfg->has_hdr = false;
sde_cfg->has_sui_blendstage = true;
+ sde_cfg->allow_gdsc_toggle = true;
clear_bit(MDSS_INTR_AD4_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_AD4_1_INTR, sde_cfg->mdss_irqs);
} else {
diff --git a/msm/sde/sde_hw_color_proc_v4.c b/msm/sde/sde_hw_color_proc_v4.c
index f266683a..19ccb254 100644
--- a/msm/sde/sde_hw_color_proc_v4.c
+++ b/msm/sde/sde_hw_color_proc_v4.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#include <drm/msm_drm_pp.h>
#include "sde_hw_color_proc_common_v4.h"
@@ -335,7 +335,11 @@ void sde_setup_dspp_ltm_hist_ctrlv1(struct sde_hw_dspp *ctx, void *cfg,
op_mode = SDE_REG_READ(&ctx->hw, offset);
if (!enable) {
- op_mode &= ~BIT(0);
+ if (op_mode & BIT(1))
+ op_mode &= ~BIT(0);
+ else
+ op_mode = 0;
+
SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x4,
(op_mode & 0x1FFFFFF));
return;
diff --git a/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/msm/sde/sde_hw_reg_dma_v1_color_proc.c
index ae789c2c..4d7a4d00 100644
--- a/msm/sde/sde_hw_reg_dma_v1_color_proc.c
+++ b/msm/sde/sde_hw_reg_dma_v1_color_proc.c
@@ -3586,16 +3586,10 @@ void reg_dmav1_setup_ltm_roiv1(struct sde_hw_dspp *ctx, void *cfg)
}
}
-static void ltm_vlutv1_disable(struct sde_hw_dspp *ctx, void *cfg,
- u32 num_mixers, enum sde_ltm *dspp_idx)
+static void ltm_vlutv1_disable(struct sde_hw_dspp *ctx)
{
- struct sde_hw_cp_cfg *hw_cfg = cfg;
- struct sde_hw_reg_dma_ops *dma_ops;
- struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
- struct sde_reg_dma_kickoff_cfg kick_off;
- int rc, i = 0;
enum sde_ltm idx = 0;
- u32 opmode = 0;
+ u32 opmode = 0, offset = 0;
idx = (enum sde_ltm)ctx->idx;
if (idx >= LTM_MAX) {
@@ -3603,40 +3597,15 @@ static void ltm_vlutv1_disable(struct sde_hw_dspp *ctx, void *cfg,
return;
}
- dma_ops = sde_reg_dma_get_ops();
- dma_ops->reset_reg_dma_buf(ltm_buf[LTM_VLUT][idx]);
- REG_DMA_INIT_OPS(dma_write_cfg, ltm_mapping[idx], LTM_VLUT,
- ltm_buf[LTM_VLUT][idx]);
-
- for (i = 0; i < num_mixers; i++) {
- dma_write_cfg.blk = ltm_mapping[dspp_idx[i]];
- REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0,
- 0, 0);
- rc = dma_ops->setup_payload(&dma_write_cfg);
- if (rc) {
- DRM_ERROR("write decode select failed ret %d\n", rc);
- return;
- }
-
- ltm_vlut_ops_mask[dspp_idx[i]] &= ~ltm_vlut;
+ offset = ctx->cap->sblk->ltm.base + 0x4;
+ ltm_vlut_ops_mask[ctx->idx] &= ~ltm_vlut;
+ opmode = SDE_REG_READ(&ctx->hw, offset);
+ if (opmode & BIT(0))
/* disable VLUT/INIT/ROI */
- REG_DMA_SETUP_OPS(dma_write_cfg, 0x04, &opmode, sizeof(opmode),
- REG_SINGLE_MODIFY, 0, 0,
- REG_DMA_LTM_VLUT_DISABLE_OP_MASK);
- rc = dma_ops->setup_payload(&dma_write_cfg);
- if (rc) {
- DRM_ERROR("opmode write failed ret %d\n", rc);
- return;
- }
- }
-
- REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_VLUT][idx],
- REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
- rc = dma_ops->kick_off(&kick_off);
- if (rc) {
- DRM_ERROR("failed to kick off ret %d\n", rc);
- return;
- }
+ opmode &= REG_DMA_LTM_VLUT_DISABLE_OP_MASK;
+ else
+ opmode = 0;
+ SDE_REG_WRITE(&ctx->hw, offset, opmode);
}
void reg_dmav1_setup_ltm_vlutv1(struct sde_hw_dspp *ctx, void *cfg)
@@ -3656,6 +3625,13 @@ void reg_dmav1_setup_ltm_vlutv1(struct sde_hw_dspp *ctx, void *cfg)
if (rc)
return;
+ /* disable case */
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("Disable LTM vlut feature\n");
+ ltm_vlutv1_disable(ctx);
+ return;
+ }
+
idx = (enum sde_ltm)ctx->idx;
num_mixers = hw_cfg->num_of_mixers;
rc = reg_dmav1_get_ltm_blk(hw_cfg, idx, &dspp_idx[0], &blk);
@@ -3665,13 +3641,6 @@ void reg_dmav1_setup_ltm_vlutv1(struct sde_hw_dspp *ctx, void *cfg)
return;
}
- /* disable case */
- if (!hw_cfg->payload) {
- DRM_DEBUG_DRIVER("Disable LTM vlut feature\n");
- ltm_vlutv1_disable(ctx, cfg, num_mixers, dspp_idx);
- return;
- }
-
if (hw_cfg->len != sizeof(struct drm_msm_ltm_data)) {
DRM_ERROR("invalid size of payload len %d exp %zd\n",
hw_cfg->len, sizeof(struct drm_msm_ltm_data));
diff --git a/msm/sde/sde_hw_util.c b/msm/sde/sde_hw_util.c
index ff4b5dfd..8b65855d 100644
--- a/msm/sde/sde_hw_util.c
+++ b/msm/sde/sde_hw_util.c
@@ -76,7 +76,10 @@ void sde_reg_write(struct sde_hw_blk_reg_map *c,
if (c->log_mask & sde_hw_util_log_mask)
SDE_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
name, c->blk_off + reg_off, val);
+ SDE_EVT32_REGWRITE(c->blk_off, reg_off, val);
writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+ SDE_REG_LOG(c->log_mask ? ilog2(c->log_mask)+1 : 0,
+ val, c->blk_off + reg_off);
}
int sde_reg_read(struct sde_hw_blk_reg_map *c, u32 reg_off)
diff --git a/msm/sde/sde_kms.c b/msm/sde/sde_kms.c
index 29804065..150145ed 100644
--- a/msm/sde/sde_kms.c
+++ b/msm/sde/sde_kms.c
@@ -1141,10 +1141,12 @@ static void sde_kms_check_for_ext_vote(struct sde_kms *sde_kms,
* cases, allow the target to go through a gdsc toggle after
* crtc is disabled.
*/
- if (!crtc_enabled && phandle->is_ext_vote_en) {
+ if (!crtc_enabled && (phandle->is_ext_vote_en ||
+ !dev->dev->power.runtime_auto)) {
pm_runtime_put_sync(sde_kms->dev->dev);
- SDE_EVT32(phandle->is_ext_vote_en);
pm_runtime_get_sync(sde_kms->dev->dev);
+ SDE_EVT32(phandle->is_ext_vote_en,
+ dev->dev->power.runtime_auto);
}
mutex_unlock(&phandle->ext_client_lock);
@@ -1223,6 +1225,7 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
struct drm_encoder *encoder;
struct drm_device *dev;
int ret;
+ bool cwb_disabling;
if (!kms || !crtc || !crtc->state) {
SDE_ERROR("invalid params\n");
@@ -1248,8 +1251,14 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
SDE_ATRACE_BEGIN("sde_kms_wait_for_commit_done");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
+ cwb_disabling = false;
+ if (encoder->crtc != crtc) {
+ cwb_disabling = sde_encoder_is_cwb_disabling(encoder,
+ crtc);
+ if (!cwb_disabling)
+ continue;
+ }
+
/*
* Wait for post-flush if necessary to delay before
* plane_cleanup. For example, wait for vsync in case of video
@@ -1264,6 +1273,9 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
}
sde_crtc_complete_flip(crtc, NULL);
+
+ if (cwb_disabling)
+ sde_encoder_virt_reset(encoder);
}
SDE_ATRACE_END("sde_ksm_wait_for_commit_done");
@@ -1274,7 +1286,7 @@ static void sde_kms_prepare_fence(struct msm_kms *kms,
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
- int i, rc;
+ int i;
if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
SDE_ERROR("invalid argument(s)\n");
@@ -1282,15 +1294,6 @@ static void sde_kms_prepare_fence(struct msm_kms *kms,
}
SDE_ATRACE_BEGIN("sde_kms_prepare_fence");
-retry:
- /* attempt to acquire ww mutex for connection */
- rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
- old_state->acquire_ctx);
-
- if (rc == -EDEADLK) {
- drm_modeset_backoff(old_state->acquire_ctx);
- goto retry;
- }
/* old_state actually contains updated crtc pointers */
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -1589,6 +1592,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.get_panel_vfp = dsi_display_get_panel_vfp,
.get_default_lms = dsi_display_get_default_lms,
.set_idle_hint = dsi_display_set_idle_hint,
+ .get_qsync_min_fps = dsi_display_get_qsync_min_fps,
};
static const struct sde_connector_ops wb_ops = {
.post_init = sde_wb_connector_post_init,
@@ -2188,6 +2192,48 @@ static void sde_kms_destroy(struct msm_kms *kms)
kfree(sde_kms);
}
+static int sde_kms_set_crtc_for_conn(struct drm_device *dev,
+ struct drm_encoder *enc, struct drm_atomic_state *state)
+{
+ struct drm_connector *conn = NULL;
+ struct drm_connector *tmp_conn = NULL;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_crtc_state *crtc_state = NULL;
+ struct drm_connector_state *conn_state = NULL;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(tmp_conn, &conn_iter) {
+ if (enc == tmp_conn->state->best_encoder) {
+ conn = tmp_conn;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (!conn) {
+ SDE_ERROR("error in finding conn for enc:%d\n", DRMID(enc));
+ return -EINVAL;
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state, enc->crtc);
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ if (IS_ERR(conn_state)) {
+ SDE_ERROR("error %d getting connector %d state\n",
+ ret, DRMID(conn));
+ return -EINVAL;
+ }
+
+ crtc_state->active = true;
+ ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
+ if (ret)
+ SDE_ERROR("error %d setting the crtc\n", ret);
+
+ _sde_crtc_clear_dim_layers_v1(crtc_state);
+
+ return 0;
+}
+
static void _sde_kms_plane_force_remove(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -2219,8 +2265,9 @@ static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
struct drm_framebuffer *fb, *tfb;
struct list_head fbs;
struct drm_plane *plane;
+ struct drm_crtc *crtc = NULL;
+ unsigned int crtc_mask = 0;
int ret = 0;
- u32 plane_mask = 0;
INIT_LIST_HEAD(&fbs);
@@ -2229,9 +2276,11 @@ static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
list_move_tail(&fb->filp_head, &fbs);
drm_for_each_plane(plane, dev) {
- if (plane->fb == fb) {
- plane_mask |=
- 1 << drm_plane_index(plane);
+ if (plane->state &&
+ plane->state->fb == fb) {
+ if (plane->state->crtc)
+ crtc_mask |= drm_crtc_mask(
+ plane->state->crtc);
_sde_kms_plane_force_remove(
plane, state);
}
@@ -2244,11 +2293,22 @@ static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
if (list_empty(&fbs)) {
SDE_DEBUG("skip commit as no fb(s)\n");
- drm_atomic_state_put(state);
return 0;
}
- SDE_DEBUG("committing after removing all the pipes\n");
+ drm_for_each_crtc(crtc, dev) {
+ if ((crtc_mask & drm_crtc_mask(crtc)) && crtc->state->active) {
+ struct drm_encoder *drm_enc;
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev,
+ crtc->state->encoder_mask)
+ ret = sde_kms_set_crtc_for_conn(
+ dev, drm_enc, state);
+ }
+ }
+
+ SDE_EVT32(state, crtc_mask);
+ SDE_DEBUG("null commit after removing all the pipes\n");
ret = drm_atomic_commit(state);
if (ret) {
@@ -2828,9 +2888,10 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms)
return rc;
}
-static bool sde_kms_check_for_splash(struct msm_kms *kms)
+static bool sde_kms_check_for_splash(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct sde_kms *sde_kms;
+ struct drm_encoder *encoder;
if (!kms) {
SDE_ERROR("invalid kms\n");
@@ -2838,7 +2899,18 @@ static bool sde_kms_check_for_splash(struct msm_kms *kms)
}
sde_kms = to_sde_kms(kms);
- return sde_kms->splash_data.num_splash_displays;
+
+ if (!crtc || !sde_kms->splash_data.num_splash_displays)
+ return !!sde_kms->splash_data.num_splash_displays;
+
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ if (sde_encoder_in_cont_splash(encoder))
+ return true;
+ }
+
+ return false;
+
}
static int sde_kms_get_mixer_count(const struct msm_kms *kms,
@@ -2891,12 +2963,7 @@ static void _sde_kms_null_commit(struct drm_device *dev,
struct drm_encoder *enc)
{
struct drm_modeset_acquire_ctx ctx;
- struct drm_connector *conn = NULL;
- struct drm_connector *tmp_conn = NULL;
- struct drm_connector_list_iter conn_iter;
struct drm_atomic_state *state = NULL;
- struct drm_crtc_state *crtc_state = NULL;
- struct drm_connector_state *conn_state = NULL;
int retry_cnt = 0;
int ret = 0;
@@ -2920,32 +2987,10 @@ retry:
}
state->acquire_ctx = &ctx;
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(tmp_conn, &conn_iter) {
- if (enc == tmp_conn->state->best_encoder) {
- conn = tmp_conn;
- break;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
-
- if (!conn) {
- SDE_ERROR("error in finding conn for enc:%d\n", DRMID(enc));
- goto end;
- }
-
- crtc_state = drm_atomic_get_crtc_state(state, enc->crtc);
- conn_state = drm_atomic_get_connector_state(state, conn);
- if (IS_ERR(conn_state)) {
- SDE_ERROR("error %d getting connector %d state\n",
- ret, DRMID(conn));
- goto end;
- }
- crtc_state->active = true;
- ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
+ ret = sde_kms_set_crtc_for_conn(dev, enc, state);
if (ret)
- SDE_ERROR("error %d setting the crtc\n", ret);
+ goto end;
ret = drm_atomic_commit(state);
if (ret)
@@ -3473,6 +3518,7 @@ static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
{
struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
+ struct msm_drm_private *priv;
SDE_DEBUG("\n");
@@ -3480,6 +3526,9 @@ static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
SDE_EVT32(genpd->device_count);
+ priv = sde_kms->dev->dev_private;
+ sde_kms_check_for_ext_vote(sde_kms, &priv->phandle);
+
return 0;
}
diff --git a/msm/sde/sde_plane.c b/msm/sde/sde_plane.c
index 3062b7a5..6b2341ba 100644
--- a/msm/sde/sde_plane.c
+++ b/msm/sde/sde_plane.c
@@ -248,6 +248,7 @@ static void _sde_plane_set_qos_lut(struct drm_plane *plane,
u32 frame_rate, qos_count, fps_index = 0, lut_index, index;
struct sde_perf_cfg *perf;
struct sde_plane_state *pstate;
+ struct sde_kms *kms;
if (!plane || !fb) {
SDE_ERROR("invalid arguments\n");
@@ -256,6 +257,11 @@ static void _sde_plane_set_qos_lut(struct drm_plane *plane,
psde = to_sde_plane(plane);
pstate = to_sde_plane_state(plane->state);
+ kms = _sde_plane_get_kms(plane);
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
if (!psde->pipe_hw || !psde->pipe_sblk || !psde->catalog) {
SDE_ERROR("invalid arguments\n");
@@ -282,7 +288,12 @@ static void _sde_plane_set_qos_lut(struct drm_plane *plane,
fb->format->format,
fb->modifier);
- if (fmt && SDE_FORMAT_IS_LINEAR(fmt))
+ if (fmt && SDE_FORMAT_IS_LINEAR(fmt) &&
+ pstate->scaler3_cfg.enable &&
+ IS_SDE_MAJOR_MINOR_SAME(kms->catalog->hwversion,
+ SDE_HW_VER_640))
+ lut_index = SDE_QOS_LUT_USAGE_MACROTILE_QSEED;
+ else if (fmt && SDE_FORMAT_IS_LINEAR(fmt))
lut_index = SDE_QOS_LUT_USAGE_LINEAR;
else if (pstate->scaler3_cfg.enable)
lut_index = SDE_QOS_LUT_USAGE_MACROTILE_QSEED;
@@ -2551,6 +2562,39 @@ static int _sde_plane_validate_shared_crtc(struct sde_plane *psde,
}
+static int _sde_plane_validate_fb(struct sde_plane *psde,
+ struct drm_plane_state *state)
+{
+ struct sde_plane_state *pstate;
+ struct drm_framebuffer *fb;
+ uint32_t fb_ns = 0, fb_sec = 0, fb_sec_dir = 0;
+ unsigned long flags = 0;
+ int mode, ret = 0, n, i;
+
+ pstate = to_sde_plane_state(state);
+ mode = sde_plane_get_property(pstate,
+ PLANE_PROP_FB_TRANSLATION_MODE);
+
+ fb = state->fb;
+ n = fb->format->num_planes;
+ for (i = 0; i < n; i++) {
+ ret = msm_fb_obj_get_attrs(fb->obj[i], &fb_ns, &fb_sec,
+ &fb_sec_dir, &flags);
+
+ if (!ret && ((fb_ns && (mode != SDE_DRM_FB_NON_SEC)) ||
+ (fb_sec && (mode != SDE_DRM_FB_SEC)) ||
+ (fb_sec_dir && (mode != SDE_DRM_FB_SEC_DIR_TRANS)))) {
+ SDE_ERROR_PLANE(psde, "mode:%d fb:%d flag:0x%x rc:%d\n",
+ mode, fb->base.id, flags, ret);
+ SDE_EVT32(psde->base.base.id, fb->base.id, flags,
+ fb_ns, fb_sec, fb_sec_dir, ret, SDE_EVTLOG_ERROR);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int sde_plane_sspp_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -2664,6 +2708,11 @@ static int sde_plane_sspp_atomic_check(struct drm_plane *plane,
if (ret)
return ret;
+ ret = _sde_plane_validate_fb(psde, state);
+
+ if (ret)
+ return ret;
+
pstate->const_alpha_en = fmt->alpha_enable &&
(SDE_DRM_BLEND_OP_OPAQUE !=
sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP)) &&
@@ -4118,6 +4167,7 @@ static void sde_plane_destroy_state(struct drm_plane *plane,
/* remove ref count for fence */
if (pstate->input_fence)
sde_sync_put(pstate->input_fence);
+ pstate->input_fence = 0;
/* destroy value helper */
msm_property_destroy_state(&psde->property_info, pstate,
@@ -4551,7 +4601,8 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
SDE_ERROR("[%u]SSPP init failed\n", pipe);
ret = PTR_ERR(psde->pipe_hw);
goto clean_plane;
- } else if (!psde->pipe_hw->cap || !psde->pipe_hw->cap->sblk) {
+ } else if (!psde->pipe_hw || !psde->pipe_hw->cap ||
+ !psde->pipe_hw->cap->sblk) {
SDE_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
goto clean_sspp;
}
diff --git a/msm/sde/sde_rm.c b/msm/sde/sde_rm.c
index 24c830c2..76e908b4 100644
--- a/msm/sde/sde_rm.c
+++ b/msm/sde/sde_rm.c
@@ -33,6 +33,9 @@
(t).num_intf == (r).num_intf)
#define IS_COMPATIBLE_PP_DSC(p, d) (p % 2 == d % 2)
+/* ~one vsync poll time for rsvp_nxt to cleared by modeset from commit thread */
+#define RM_NXT_CLEAR_POLL_TIMEOUT_US 16600
+
/**
* toplogy information to be used when ctl path version does not
* support driving more than one interface per ctl_path
@@ -57,12 +60,12 @@ static const struct sde_rm_topology_def g_ctl_ver_1_top_table[] = {
{ SDE_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
{ SDE_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
{ SDE_RM_TOPOLOGY_SINGLEPIPE_DSC, 1, 1, 1, 1, false },
- { SDE_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 1, true },
- { SDE_RM_TOPOLOGY_DUALPIPE_DSC, 2, 2, 2, 1, true },
+ { SDE_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 1, false },
+ { SDE_RM_TOPOLOGY_DUALPIPE_DSC, 2, 2, 2, 1, false },
{ SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
{ SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC, 2, 1, 1, 1, false },
{ SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE, 2, 2, 1, 1, false },
- { SDE_RM_TOPOLOGY_PPSPLIT, 1, 0, 2, 1, true },
+ { SDE_RM_TOPOLOGY_PPSPLIT, 1, 0, 2, 1, false },
};
@@ -2103,6 +2106,30 @@ static int _sde_rm_commit_rsvp(
return ret;
}
+/* call this only after rm_mutex held */
+struct sde_rm_rsvp *_sde_rm_poll_get_rsvp_nxt_locked(struct sde_rm *rm,
+ struct drm_encoder *enc)
+{
+ int i;
+ u32 loop_count = 20;
+ struct sde_rm_rsvp *rsvp_nxt = NULL;
+ u32 sleep = RM_NXT_CLEAR_POLL_TIMEOUT_US / loop_count;
+
+ for (i = 0; i < loop_count; i++) {
+ rsvp_nxt = _sde_rm_get_rsvp_nxt(rm, enc);
+ if (!rsvp_nxt)
+ return rsvp_nxt;
+
+ mutex_unlock(&rm->rm_lock);
+ SDE_DEBUG("iteration i:%d sleep range:%uus to %uus\n",
+ i, sleep, sleep * 2);
+ usleep_range(sleep, sleep * 2);
+ mutex_lock(&rm->rm_lock);
+ }
+ /* make sure to get latest rsvp_next to avoid use after free issues */
+ return _sde_rm_get_rsvp_nxt(rm, enc);
+}
+
int sde_rm_reserve(
struct sde_rm *rm,
struct drm_encoder *enc,
@@ -2154,16 +2181,19 @@ int sde_rm_reserve(
* commit rsvps. This rsvp_nxt can be cleared by a back to back
* check_only commit with modeset when its predecessor atomic
* commit is delayed / not committed the reservation yet.
- * Bail out in such cases so that check only commit
- * comes again after earlier commit gets processed.
+ * Poll for rsvp_nxt clear, allow the check_only commit if rsvp_nxt
+ * gets cleared and bailout if it does not get cleared before timeout.
*/
-
if (test_only && rsvp_cur && rsvp_nxt) {
- SDE_ERROR("cur %d nxt %d enc %d conn %d\n", rsvp_cur->seq,
- rsvp_nxt->seq, enc->base.id,
- conn_state->connector->base.id);
- ret = -EINVAL;
- goto end;
+ rsvp_nxt = _sde_rm_poll_get_rsvp_nxt_locked(rm, enc);
+ if (rsvp_nxt) {
+ SDE_ERROR("poll timeout cur %d nxt %d enc %d\n",
+ rsvp_cur->seq, rsvp_nxt->seq, enc->base.id);
+ SDE_EVT32(rsvp_cur->seq, rsvp_nxt->seq,
+ enc->base.id, SDE_EVTLOG_ERROR);
+ ret = -EINVAL;
+ goto end;
+ }
}
if (!test_only && rsvp_nxt)
diff --git a/msm/sde_dbg.c b/msm/sde_dbg.c
index 6178e1bf..2723d01c 100644
--- a/msm/sde_dbg.c
+++ b/msm/sde_dbg.c
@@ -197,6 +197,7 @@ struct sde_dbg_regbuf {
/**
* struct sde_dbg_base - global sde debug base structure
* @evtlog: event log instance
+ * @reglog: reg log instance
* @reg_base_list: list of register dumping regions
* @dev: device pointer
* @mutex: mutex to serialize access to serialze dumps, debugfs access
@@ -212,12 +213,15 @@ struct sde_dbg_regbuf {
* @dsi_dbg_bus: dump dsi debug bus register
* @regbuf: buffer data to track the register dumping in hw recovery
* @cur_evt_index: index used for tracking event logs dump in hw recovery
+ * @cur_reglog_index: index used for tracking register logs dump in hw recovery
* @dbgbus_dump_idx: index used for tracking dbg-bus dump in hw recovery
* @vbif_dbgbus_dump_idx: index for tracking vbif dumps in hw recovery
*/
static struct sde_dbg_base {
struct sde_dbg_evtlog *evtlog;
+ struct sde_dbg_reglog *reglog;
struct list_head reg_base_list;
+ void *reg_dump_addr;
struct device *dev;
struct mutex mutex;
@@ -238,6 +242,7 @@ static struct sde_dbg_base {
struct sde_dbg_regbuf regbuf;
u32 cur_evt_index;
+ u32 cur_reglog_index;
u32 dbgbus_dump_idx;
u32 vbif_dbgbus_dump_idx;
enum sde_dbg_dump_context dump_mode;
@@ -246,6 +251,9 @@ static struct sde_dbg_base {
/* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
struct sde_dbg_evtlog *sde_dbg_base_evtlog;
+/* sde_dbg_base_reglog - global pointer to main sde reg log for macro use */
+struct sde_dbg_reglog *sde_dbg_base_reglog;
+
static void _sde_debug_bus_xbar_dump(void __iomem *mem_base,
struct sde_debug_bus_entry *entry, u32 val)
{
@@ -2922,6 +2930,7 @@ static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
char *base_addr, char *addr, size_t len_bytes, u32 **dump_mem)
{
u32 in_log, in_mem, len_align, len_padded;
+ struct sde_dbg_base *dbg_base = &sde_dbg_base;
u32 *dump_addr = NULL;
char *end_addr;
int i;
@@ -2950,9 +2959,8 @@ static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
if (in_mem) {
if (dump_mem && !(*dump_mem)) {
- phys_addr_t phys = 0;
- *dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
- len_padded, &phys, GFP_KERNEL);
+ *dump_mem = dbg_base->reg_dump_addr;
+ dbg_base->reg_dump_addr += len_padded;
}
if (dump_mem && *dump_mem) {
@@ -3026,6 +3034,49 @@ static u32 _sde_dbg_get_dump_range(struct sde_dbg_reg_offset *range_node,
return length;
}
+static u32 _sde_dbg_get_reg_blk_size(struct sde_dbg_reg_base *dbg)
+{
+ u32 len, len_align, len_padded;
+ u32 size = 0;
+ struct sde_dbg_reg_range *range_node;
+
+ if (!dbg || !dbg->base) {
+ pr_err("dbg base is null!\n");
+ return 0;
+ }
+
+ if (!list_empty(&dbg->sub_range_list)) {
+ list_for_each_entry(range_node, &dbg->sub_range_list, head) {
+ len = _sde_dbg_get_dump_range(&range_node->offset,
+ dbg->max_offset);
+ len_align = (len + REG_DUMP_ALIGN - 1) / REG_DUMP_ALIGN;
+ len_padded = len_align * REG_DUMP_ALIGN;
+ size += REG_BASE_NAME_LEN + RANGE_NAME_LEN + len_padded;
+ }
+ } else {
+ len = dbg->max_offset;
+ len_align = (len + REG_DUMP_ALIGN - 1) / REG_DUMP_ALIGN;
+ len_padded = len_align * REG_DUMP_ALIGN;
+ size += REG_BASE_NAME_LEN + RANGE_NAME_LEN + len_padded;
+ }
+ return size;
+}
+
+static u32 _sde_dbg_get_reg_dump_size(void)
+{
+ struct sde_dbg_base *dbg_base = &sde_dbg_base;
+ struct sde_dbg_reg_base *blk_base;
+ u32 size = 0;
+
+ if (!dbg_base)
+ return 0;
+
+ list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) {
+ size += _sde_dbg_get_reg_blk_size(blk_base);
+ }
+ return size;
+}
+
static int _sde_dump_reg_range_cmp(void *priv, struct list_head *a,
struct list_head *b)
{
@@ -3071,6 +3122,7 @@ static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg,
char *addr;
size_t len;
struct sde_dbg_reg_range *range_node;
+ struct sde_dbg_base *dbg_base = &sde_dbg_base;
if (!dbg || !(dbg->base || dbg->cb)) {
pr_err("dbg base is null!\n");
@@ -3100,6 +3152,12 @@ static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg,
addr, range_node->offset.start,
range_node->offset.end);
+ scnprintf(dbg_base->reg_dump_addr, REG_BASE_NAME_LEN,
+ dbg->name);
+ dbg_base->reg_dump_addr += REG_BASE_NAME_LEN;
+ scnprintf(dbg_base->reg_dump_addr, REG_BASE_NAME_LEN,
+ range_node->range_name);
+ dbg_base->reg_dump_addr += RANGE_NAME_LEN;
_sde_dump_reg(range_node->range_name, reg_dump_flag,
dbg->base, addr, len,
&range_node->reg_dump);
@@ -3112,6 +3170,10 @@ static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg,
dbg->max_offset);
addr = dbg->base;
len = dbg->max_offset;
+ scnprintf(dbg_base->reg_dump_addr, REG_BASE_NAME_LEN,
+ dbg->name);
+ dbg_base->reg_dump_addr += REG_BASE_NAME_LEN;
+ dbg_base->reg_dump_addr += RANGE_NAME_LEN;
_sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len,
&dbg->reg_dump);
}
@@ -3478,9 +3540,16 @@ static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[],
bool dump_dbgbus_vbif_rt, bool dump_all, bool dump_secure)
{
int i;
+ u32 reg_dump_size;
+ struct sde_dbg_base *dbg_base = &sde_dbg_base;
+ phys_addr_t phys = 0;
mutex_lock(&sde_dbg_base.mutex);
+ reg_dump_size = _sde_dbg_get_reg_dump_size();
+ dbg_base->reg_dump_addr = dma_alloc_coherent(sde_dbg_base.dev,
+ reg_dump_size, &phys, GFP_KERNEL);
+
if (dump_all)
sde_evtlog_dump_all(sde_dbg_base.evtlog);
@@ -3658,7 +3727,7 @@ void sde_dbg_ctrl(const char *name, ...)
va_end(args);
}
-
+#ifdef CONFIG_DEBUG_FS
/*
* sde_dbg_debugfs_open - debugfs open handler for evtlog dump
* @inode: debugfs inode
@@ -4600,6 +4669,15 @@ int sde_dbg_debugfs_register(struct device *dev)
return 0;
}
+#else
+
+int sde_dbg_debugfs_register(struct device *dev)
+{
+ return 0;
+}
+
+#endif
+
static void _sde_dbg_debugfs_destroy(void)
{
}
@@ -4665,6 +4743,12 @@ int sde_dbg_init(struct device *dev)
sde_dbg_base_evtlog = sde_dbg_base.evtlog;
+ sde_dbg_base.reglog = sde_reglog_init();
+ if (IS_ERR_OR_NULL(sde_dbg_base.reglog))
+ return PTR_ERR(sde_dbg_base.reglog);
+
+ sde_dbg_base_reglog = sde_dbg_base.reglog;
+
INIT_WORK(&sde_dbg_base.dump_work, _sde_dump_work);
sde_dbg_base.work_panic = false;
sde_dbg_base.panic_on_err = DEFAULT_PANIC;
@@ -4709,6 +4793,8 @@ void sde_dbg_destroy(void)
sde_dbg_base_evtlog = NULL;
sde_evtlog_destroy(sde_dbg_base.evtlog);
sde_dbg_base.evtlog = NULL;
+ sde_reglog_destroy(sde_dbg_base.reglog);
+ sde_dbg_base.reglog = NULL;
sde_dbg_reg_base_destroy();
mutex_destroy(&sde_dbg_base.mutex);
}
diff --git a/msm/sde_dbg.h b/msm/sde_dbg.h
index e336f323..61764c04 100644
--- a/msm/sde_dbg.h
+++ b/msm/sde_dbg.h
@@ -35,6 +35,7 @@ enum sde_dbg_evtlog_flag {
SDE_EVTLOG_IRQ = BIT(1),
SDE_EVTLOG_VERBOSE = BIT(2),
SDE_EVTLOG_EXTERNAL = BIT(3),
+ SDE_EVTLOG_REGWRITE = BIT(4),
SDE_EVTLOG_ALWAYS = -1
};
@@ -49,6 +50,34 @@ enum sde_dbg_dump_context {
SDE_DBG_DUMP_CLK_ENABLED_CTX,
};
+/*
+ * Define blocks for register write logging.
+ */
+#define SDE_REG_LOG_DEFAULT 0
+#define SDE_REG_LOG_NONE 1
+#define SDE_REG_LOG_CDM 2
+#define SDE_REG_LOG_DSPP 3
+#define SDE_REG_LOG_INTF 4
+#define SDE_REG_LOG_LM 5
+#define SDE_REG_LOG_CTL 6
+#define SDE_REG_LOG_PINGPONG 7
+#define SDE_REG_LOG_SSPP 8
+#define SDE_REG_LOG_WB 9
+#define SDE_REG_LOG_TOP 10
+#define SDE_REG_LOG_VBIF 11
+#define SDE_REG_LOG_DSC 12
+#define SDE_REG_LOG_ROT 13
+#define SDE_REG_LOG_DS 14
+#define SDE_REG_LOG_REGDMA 15
+#define SDE_REG_LOG_UIDLE 16
+#define SDE_REG_LOG_SID 16
+#define SDE_REG_LOG_QDSS 17
+/*
+ * 0-32 are reserved for sde_reg_write due to log masks
+ * Additional blocks are assigned from 33 to avoid conflict
+ */
+#define SDE_REG_LOG_RSCC 33
+
#define SDE_EVTLOG_DEFAULT_ENABLE (SDE_EVTLOG_CRITICAL | SDE_EVTLOG_IRQ | \
SDE_EVTLOG_EXTERNAL)
@@ -103,6 +132,44 @@ struct sde_dbg_evtlog {
extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
+/*
+ * reglog keeps this number of entries in memory for debug purpose. This
+ * number must be greater than number of possible writes in at least one
+ * single commit.
+ */
+#define SDE_REGLOG_ENTRY 1024
+
+struct sde_dbg_reglog_log {
+ s64 time;
+ u32 pid;
+ u32 addr;
+ u32 val;
+ u8 blk_id;
+};
+
+/**
+ * @last_dump: Index of last entry to be output during reglog dumps
+ * @filter_list: Linked list of currently active filter strings
+ */
+struct sde_dbg_reglog {
+ struct sde_dbg_reglog_log logs[SDE_REGLOG_ENTRY];
+ u32 first;
+ u32 last;
+ u32 last_dump;
+ u32 curr;
+ u32 next;
+ u32 enable;
+ u32 enable_mask;
+ spinlock_t spin_lock;
+};
+
+extern struct sde_dbg_reglog *sde_dbg_base_reglog;
+
+/**
+ * SDE_REG_LOG - Write register write to the register log
+ */
+#define SDE_REG_LOG(blk_id, val, addr) sde_reglog_log(blk_id, val, addr)
+
/**
* SDE_EVT32 - Write a list of 32bit values to the event log, default area
* ... - variable arguments
@@ -134,6 +201,13 @@ extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
#define SDE_EVT32_EXTERNAL(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \
__LINE__, SDE_EVTLOG_EXTERNAL, ##__VA_ARGS__, \
SDE_EVTLOG_DATA_LIMITER)
+/**
+ * SDE_EVT32_REGWRITE - Write a list of 32bit values for register writes logging
+ * ... - variable arguments
+ */
+#define SDE_EVT32_REGWRITE(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \
+ __LINE__, SDE_EVTLOG_REGWRITE, ##__VA_ARGS__, \
+ SDE_EVTLOG_DATA_LIMITER)
/**
* SDE_DBG_DUMP - trigger dumping of all sde_dbg facilities
@@ -175,7 +249,6 @@ extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
#define SDE_DBG_CTRL(...) sde_dbg_ctrl(__func__, ##__VA_ARGS__, \
SDE_DBG_DUMP_DATA_LIMITER)
-#if defined(CONFIG_DEBUG_FS)
/**
* sde_evtlog_init - allocate a new event log object
@@ -184,6 +257,12 @@ extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
struct sde_dbg_evtlog *sde_evtlog_init(void);
/**
+ * sde_reglog_init - allocate a new reg log object
+ * Returns: reglog or -ERROR
+ */
+struct sde_dbg_reglog *sde_reglog_init(void);
+
+/**
* sde_evtlog_destroy - destroy previously allocated event log
* @evtlog: pointer to evtlog
* Returns: none
@@ -191,6 +270,13 @@ struct sde_dbg_evtlog *sde_evtlog_init(void);
void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog);
/**
+ * sde_reglog_destroy - destroy previously allocated reg log
+ * @reglog: pointer to reglog
+ * Returns: none
+ */
+void sde_reglog_destroy(struct sde_dbg_reglog *reglog);
+
+/**
* sde_evtlog_log - log an entry into the event log.
* log collection may be enabled/disabled entirely via debugfs
* log area collection may be filtered by user provided flags via debugfs.
@@ -204,6 +290,15 @@ void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
int flag, ...);
/**
+ * sde_reglog_log - log an entry into the reg log.
+ * log collection may be enabled/disabled entirely via debugfs
+ * log area collection may be filtered by user provided flags via debugfs.
+ * @reglog: pointer to evtlog
+ * Returns: none
+ */
+void sde_reglog_log(u8 blk_id, u32 val, u32 addr);
+
+/**
* sde_evtlog_dump_all - print all entries in event log to kernel log
* @evtlog: pointer to evtlog
* Returns: none
@@ -371,101 +466,4 @@ void sde_rsc_debug_dump(u32 mux_sel);
*/
void dsi_ctrl_debug_dump(u32 *entries, u32 size);
-#else
-static inline struct sde_dbg_evtlog *sde_evtlog_init(void)
-{
- return NULL;
-}
-
-static inline void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog)
-{
-}
-
-static inline void sde_evtlog_log(struct sde_dbg_evtlog *evtlog,
- const char *name, int line, int flag, ...)
-{
-}
-
-static inline void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
-{
-}
-
-static inline bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog,
- u32 flag)
-{
- return false;
-}
-
-static inline ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
- char *evtlog_buf, ssize_t evtlog_buf_size,
- bool update_last_entry)
-{
- return 0;
-}
-
-static inline void sde_dbg_init_dbg_buses(u32 hwversion)
-{
-}
-
-static inline int sde_dbg_init(struct device *dev)
-{
- return 0;
-}
-
-static inline int sde_dbg_debugfs_register(struct device *dev)
-{
- return 0;
-}
-
-static inline void sde_dbg_destroy(void)
-{
-}
-
-static inline void sde_dbg_dump(enum sde_dbg_dump_context mode,
- const char *name, ...)
-{
-}
-
-static inline void sde_dbg_ctrl(const char *name, ...)
-{
-}
-
-static inline int sde_dbg_reg_register_base(const char *name,
- void __iomem *base, size_t max_offset)
-{
- return 0;
-}
-
-static inline void sde_dbg_reg_register_dump_range(const char *base_name,
- const char *range_name, u32 offset_start, u32 offset_end,
- uint32_t xin_id)
-{
-}
-
-static inline void sde_dbg_set_sde_top_offset(u32 blk_off)
-{
-}
-
-static inline void sde_evtlog_set_filter(
- struct sde_dbg_evtlog *evtlog, char *filter)
-{
-}
-
-static inline int sde_evtlog_get_filter(struct sde_dbg_evtlog *evtlog,
- int index, char *buf, size_t bufsz)
-{
- return -EINVAL;
-}
-
-static inline void sde_rsc_debug_dump(u32 mux_sel)
-{
-}
-
-static inline void dsi_ctrl_debug_dump(u32 *entries, u32 size)
-{
-}
-
-#endif /* defined(CONFIG_DEBUG_FS) */
-
-
#endif /* SDE_DBG_H_ */
diff --git a/msm/sde_dbg_evtlog.c b/msm/sde_dbg_evtlog.c
index 71ec3283..ddb4c996 100644
--- a/msm/sde_dbg_evtlog.c
+++ b/msm/sde_dbg_evtlog.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "sde_dbg:[%s] " fmt, __func__
@@ -101,6 +101,31 @@ exit:
spin_unlock_irqrestore(&evtlog->spin_lock, flags);
}
+void sde_reglog_log(u8 blk_id, u32 val, u32 addr)
+{
+ unsigned long flags;
+ struct sde_dbg_reglog_log *log;
+ struct sde_dbg_reglog *reglog = sde_dbg_base_reglog;
+
+ if (!reglog)
+ return;
+
+ spin_lock_irqsave(&reglog->spin_lock, flags);
+
+ log = &reglog->logs[reglog->curr];
+
+ log->blk_id = blk_id;
+ log->val = val;
+ log->addr = addr;
+ log->time = local_clock();
+ log->pid = current->pid;
+
+ reglog->curr = (reglog->curr + 1) % SDE_REGLOG_ENTRY;
+ reglog->last++;
+
+ spin_unlock_irqrestore(&reglog->spin_lock, flags);
+}
+
/* always dump the last entries which are not dumped yet */
static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog,
bool update_last_entry, bool full_dump)
@@ -211,6 +236,19 @@ struct sde_dbg_evtlog *sde_evtlog_init(void)
return evtlog;
}
+struct sde_dbg_reglog *sde_reglog_init(void)
+{
+ struct sde_dbg_reglog *reglog;
+
+ reglog = kzalloc(sizeof(*reglog), GFP_KERNEL);
+ if (!reglog)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&reglog->spin_lock);
+
+ return reglog;
+}
+
int sde_evtlog_get_filter(struct sde_dbg_evtlog *evtlog, int index,
char *buf, size_t bufsz)
{
@@ -312,3 +350,11 @@ void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog)
}
kfree(evtlog);
}
+
+void sde_reglog_destroy(struct sde_dbg_reglog *reglog)
+{
+ if (!reglog)
+ return;
+
+ kfree(reglog);
+}
diff --git a/msm/sde_io_util.c b/msm/sde_io_util.c
index 09649c59..ad6e89f7 100644
--- a/msm/sde_io_util.c
+++ b/msm/sde_io_util.c
@@ -9,6 +9,7 @@
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
#include <linux/sde_io_util.h>
+#include "sde_dbg.h"
#define MAX_I2C_CMDS 16
void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
@@ -33,7 +34,9 @@ void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
DEV_DBG("[%08x] => %08x [%08x]\n",
(u32)(unsigned long)(io->base + offset),
value, in_val);
+ SDE_EVT32_REGWRITE(io->base, offset, value, in_val);
}
+ SDE_REG_LOG(SDE_REG_LOG_RSCC, value, offset);
} /* dss_reg_w */
EXPORT_SYMBOL(dss_reg_w);
diff --git a/msm/sde_rsc_hw_v3.c b/msm/sde_rsc_hw_v3.c
index d3a589cc..5a5a692c 100644
--- a/msm/sde_rsc_hw_v3.c
+++ b/msm/sde_rsc_hw_v3.c
@@ -106,17 +106,17 @@ static int _rsc_hw_seq_memory_init_v3(struct sde_rsc_priv *rsc)
/* Mode - 2 sequence */
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
- 0xbdf9b9a0, rsc->debug_mode);
+ 0xf9b9baa0, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
- 0xa13899fe, rsc->debug_mode);
+ 0x999afebd, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
- 0xe0ac81e1, rsc->debug_mode);
+ 0x81e1a138, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
- 0x3982e2a2, rsc->debug_mode);
+ 0xe2a2e0ac, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
- 0x208cfd9d, rsc->debug_mode);
+ 0xfd9d3982, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
- 0x20202020, rsc->debug_mode);
+ 0x2020208c, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
0x20202020, rsc->debug_mode);
diff --git a/pll/dsi_pll_7nm.c b/pll/dsi_pll_7nm.c
index c3a8ff26..581fa3ba 100644
--- a/pll/dsi_pll_7nm.c
+++ b/pll/dsi_pll_7nm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -426,6 +426,12 @@ static inline int pclk_mux_read_sel(void *context, unsigned int reg,
int rc = 0;
struct mdss_pll_resources *rsc = context;
+ /* Return cached cfg1 as its updated with cached cfg1 in pll_enable */
+ if (!rsc->handoff_resources) {
+ *val = (rsc->cached_cfg1) & 0x3;
+ return rc;
+ }
+
rc = mdss_pll_resource_enable(rsc, true);
if (rc)
pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
@@ -1130,7 +1136,8 @@ static void shadow_dsi_pll_dynamic_refresh_7nm(struct dsi_pll_7nm *pll,
MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL6,
(PLL_CMODE_1 + offset),
(PLL_CLOCK_INVERTERS_1 + offset),
- 0x10, reg->pll_clock_inverters);
+ pll->cphy_enabled ? 0x00 : 0x10,
+ reg->pll_clock_inverters);
upper_addr |=
(upper_8_bit(PLL_CMODE_1 + offset) << 12);
upper_addr |= (upper_8_bit(PLL_CLOCK_INVERTERS_1 + offset) << 13);
@@ -1610,6 +1617,11 @@ static unsigned long vco_7nm_recalc_rate(struct clk_hw *hw,
return 0;
}
+ if (!pll->priv) {
+ pr_err("pll priv is null\n");
+ return 0;
+ }
+
/*
* In the case when vco arte is set, the recalculation function should
* return the current rate as to avoid trying to set the vco rate
@@ -1629,6 +1641,7 @@ static unsigned long vco_7nm_recalc_rate(struct clk_hw *hw,
}
pll->handoff_resources = true;
+ dsi_pll_detect_phy_mode(pll->priv, pll);
if (dsi_pll_7nm_lock_status(pll)) {
pr_debug("PLL not enabled\n");
pll->handoff_resources = false;
@@ -1837,25 +1850,25 @@ static struct regmap_bus mdss_mux_regmap_bus = {
* | DIV(8) | | DIV(7) | | | DIV (2) | | | DIV(4) | | DIV(3.5) |
* +-----+-----+ +-----+-----+ | +------+------+ | +-----+------+ +------+-----+
* | | | | | | |
- *Shadow Path | CPHY Path | | | | +----v
- * + | | +------+ | | +---+ |
- * +---+ | +-----+ | | | | |
- * | | | +-v--v----v---v---+ +--------v--------+
- * +---v--v--------v---+ \ pclk_src_mux / \ cphy_pclk_src /
+ *Shadow DPHY | Shadow CPHY Path | | | | +----v
+ * Path | CPHY Path | +------+ | | +---+ |
+ * +---+ | | +-----+ | | | | |
+ * | | | | +-v--v----v---v---+ +--------v--------+
+ * +---v--v----v---v---+ \ pclk_src_mux / \ cphy_pclk_src /
* \ byteclk_mux / \ / \ mux /
* \ / +-----+-----+ +-----+-----+
- * +------+------+ | Shadow Path |
- * | | + |
+ * +------+------+ | Shadow |
+ * | | DPHY Path |
* v +-----v------+ | +------v------+
* dsi_byte_clk | pclk_src | | |cphy_pclk_src|
* | DIV(1..15) | | | DIV(1..15) |
* +-----+------+ | +------+------+
* | | |
* | | CPHY Path
- * | | |
- * +-------+ | +-------+
- * | | |
- * +---v---v----v------+
+ * | | | Shadow CPHY Path
+ * +-------+ | +-------+ |
+ * | | | |----------------
+ * +---v---v----v---v--+
* \ pclk_mux /
* +------+------+
* |
@@ -2084,6 +2097,17 @@ static struct clk_fixed_factor dsi0pll_post_vco_div3_5 = {
},
};
+static struct clk_fixed_factor dsi0pll_shadow_post_vco_div3_5 = {
+ .div = 7,
+ .mult = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_post_vco_div3_5",
+ .parent_names = (const char *[]){"dsi0pll_shadow_pll_out_div"},
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
static struct clk_fixed_factor dsi1pll_post_vco_div3_5 = {
.div = 7,
.mult = 2,
@@ -2095,6 +2119,17 @@ static struct clk_fixed_factor dsi1pll_post_vco_div3_5 = {
},
};
+static struct clk_fixed_factor dsi1pll_shadow_post_vco_div3_5 = {
+ .div = 7,
+ .mult = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_post_vco_div3_5",
+ .parent_names = (const char *[]){"dsi1pll_shadow_pll_out_div"},
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
static struct clk_fixed_factor dsi1pll_shadow_post_vco_div = {
.div = 4,
.mult = 1,
@@ -2154,6 +2189,18 @@ static struct clk_fixed_factor dsi0pll_cphy_byteclk_src = {
},
};
+static struct clk_fixed_factor dsi0pll_shadow_cphy_byteclk_src = {
+ .div = 7,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_cphy_byteclk_src",
+ .parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
static struct clk_fixed_factor dsi1pll_cphy_byteclk_src = {
.div = 7,
.mult = 1,
@@ -2166,6 +2213,18 @@ static struct clk_fixed_factor dsi1pll_cphy_byteclk_src = {
},
};
+static struct clk_fixed_factor dsi1pll_shadow_cphy_byteclk_src = {
+ .div = 7,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_cphy_shadow_byteclk_src",
+ .parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
static struct clk_fixed_factor dsi1pll_shadow_byteclk_src = {
.div = 8,
.mult = 1,
@@ -2230,8 +2289,9 @@ static struct clk_regmap_mux dsi0pll_byteclk_mux = {
.name = "dsi0_phy_pll_out_byteclk",
.parent_names = (const char *[]){"dsi0pll_byteclk_src",
"dsi0pll_shadow_byteclk_src",
- "dsi0pll_cphy_byteclk_src"},
- .num_parents = 3,
+ "dsi0pll_cphy_byteclk_src",
+ "dsi0pll_shadow_cphy_byteclk_src"},
+ .num_parents = 4,
.flags = (CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT),
.ops = &clk_regmap_mux_closest_ops,
@@ -2247,8 +2307,9 @@ static struct clk_regmap_mux dsi1pll_byteclk_mux = {
.name = "dsi1_phy_pll_out_byteclk",
.parent_names = (const char *[]){"dsi1pll_byteclk_src",
"dsi1pll_shadow_byteclk_src",
- "dsi1pll_cphy_byteclk_src"},
- .num_parents = 3,
+ "dsi1pll_cphy_byteclk_src",
+ "dsi1pll_shadow_cphy_byteclk_src"},
+ .num_parents = 4,
.flags = (CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT),
.ops = &clk_regmap_mux_closest_ops,
@@ -2302,6 +2363,22 @@ static struct clk_regmap_mux dsi0pll_cphy_pclk_src_mux = {
},
};
+static struct clk_regmap_mux dsi0pll_shadow_cphy_pclk_src_mux = {
+ .reg = PHY_CMN_CLK_CFG1,
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_cphy_pclk_src_mux",
+ .parent_names =
+ (const char *[]){
+ "dsi0pll_shadow_post_vco_div3_5"},
+ .num_parents = 1,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
.reg = PHY_CMN_CLK_CFG1,
.shift = 0,
@@ -2348,6 +2425,22 @@ static struct clk_regmap_mux dsi1pll_cphy_pclk_src_mux = {
},
};
+static struct clk_regmap_mux dsi1pll_shadow_cphy_pclk_src_mux = {
+ .reg = PHY_CMN_CLK_CFG1,
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_cphy_pclk_src_mux",
+ .parent_names =
+ (const char *[]){
+ "dsi1pll_shadow_post_vco_div3_5"},
+ .num_parents = 1,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
static struct clk_regmap_div dsi0pll_pclk_src = {
.shift = 0,
.width = 4,
@@ -2396,6 +2489,22 @@ static struct clk_regmap_div dsi0pll_cphy_pclk_src = {
},
};
+static struct clk_regmap_div dsi0pll_shadow_cphy_pclk_src = {
+ .shift = 0,
+ .width = 4,
+ .flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_cphy_pclk_src",
+ .parent_names = (const char *[]){
+ "dsi0pll_shadow_cphy_pclk_src_mux"},
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
static struct clk_regmap_div dsi1pll_pclk_src = {
.shift = 0,
.width = 4,
@@ -2444,6 +2553,22 @@ static struct clk_regmap_div dsi1pll_cphy_pclk_src = {
},
};
+static struct clk_regmap_div dsi1pll_shadow_cphy_pclk_src = {
+ .shift = 0,
+ .width = 4,
+ .flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_cphy_pclk_src",
+ .parent_names = (const char *[]){
+ "dsi1pll_shadow_cphy_pclk_src_mux"},
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
static struct clk_regmap_mux dsi0pll_pclk_mux = {
.shift = 0,
.width = 1,
@@ -2452,8 +2577,9 @@ static struct clk_regmap_mux dsi0pll_pclk_mux = {
.name = "dsi0_phy_pll_out_dsiclk",
.parent_names = (const char *[]){"dsi0pll_pclk_src",
"dsi0pll_shadow_pclk_src",
- "dsi0pll_cphy_pclk_src"},
- .num_parents = 3,
+ "dsi0pll_cphy_pclk_src",
+ "dsi0pll_shadow_cphy_pclk_src"},
+ .num_parents = 4,
.flags = (CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT),
.ops = &clk_regmap_mux_closest_ops,
@@ -2469,8 +2595,9 @@ static struct clk_regmap_mux dsi1pll_pclk_mux = {
.name = "dsi1_phy_pll_out_dsiclk",
.parent_names = (const char *[]){"dsi1pll_pclk_src",
"dsi1pll_shadow_pclk_src",
- "dsi1pll_cphy_pclk_src"},
- .num_parents = 3,
+ "dsi1pll_cphy_pclk_src",
+ "dsi1pll_shadow_cphy_pclk_src"},
+ .num_parents = 4,
.flags = (CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT),
.ops = &clk_regmap_mux_closest_ops,
@@ -2497,10 +2624,15 @@ static struct clk_hw *mdss_dsi_pllcc_7nm[] = {
[SHADOW_PLL_OUT_DIV_0_CLK] = &dsi0pll_shadow_pll_out_div.clkr.hw,
[SHADOW_BITCLK_SRC_0_CLK] = &dsi0pll_shadow_bitclk_src.clkr.hw,
[SHADOW_BYTECLK_SRC_0_CLK] = &dsi0pll_shadow_byteclk_src.hw,
+ [SHADOW_CPHY_BYTECLK_SRC_0_CLK] = &dsi0pll_shadow_cphy_byteclk_src.hw,
[SHADOW_POST_BIT_DIV_0_CLK] = &dsi0pll_shadow_post_bit_div.hw,
[SHADOW_POST_VCO_DIV_0_CLK] = &dsi0pll_shadow_post_vco_div.hw,
+ [SHADOW_POST_VCO_DIV3_5_0_CLK] = &dsi0pll_shadow_post_vco_div3_5.hw,
[SHADOW_PCLK_SRC_MUX_0_CLK] = &dsi0pll_shadow_pclk_src_mux.clkr.hw,
[SHADOW_PCLK_SRC_0_CLK] = &dsi0pll_shadow_pclk_src.clkr.hw,
+ [SHADOW_CPHY_PCLK_SRC_MUX_0_CLK] =
+ &dsi0pll_shadow_cphy_pclk_src_mux.clkr.hw,
+ [SHADOW_CPHY_PCLK_SRC_0_CLK] = &dsi0pll_shadow_cphy_pclk_src.clkr.hw,
[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
@@ -2519,10 +2651,15 @@ static struct clk_hw *mdss_dsi_pllcc_7nm[] = {
[SHADOW_PLL_OUT_DIV_1_CLK] = &dsi1pll_shadow_pll_out_div.clkr.hw,
[SHADOW_BITCLK_SRC_1_CLK] = &dsi1pll_shadow_bitclk_src.clkr.hw,
[SHADOW_BYTECLK_SRC_1_CLK] = &dsi1pll_shadow_byteclk_src.hw,
+ [SHADOW_CPHY_BYTECLK_SRC_1_CLK] = &dsi1pll_shadow_cphy_byteclk_src.hw,
[SHADOW_POST_BIT_DIV_1_CLK] = &dsi1pll_shadow_post_bit_div.hw,
[SHADOW_POST_VCO_DIV_1_CLK] = &dsi1pll_shadow_post_vco_div.hw,
+ [SHADOW_POST_VCO_DIV3_5_1_CLK] = &dsi1pll_shadow_post_vco_div3_5.hw,
[SHADOW_PCLK_SRC_MUX_1_CLK] = &dsi1pll_shadow_pclk_src_mux.clkr.hw,
[SHADOW_PCLK_SRC_1_CLK] = &dsi1pll_shadow_pclk_src.clkr.hw,
+ [SHADOW_CPHY_PCLK_SRC_MUX_1_CLK] =
+ &dsi1pll_shadow_cphy_pclk_src_mux.clkr.hw,
+ [SHADOW_CPHY_PCLK_SRC_1_CLK] = &dsi1pll_shadow_cphy_pclk_src.clkr.hw,
};
int dsi_pll_clock_register_7nm(struct platform_device *pdev,
@@ -2581,6 +2718,7 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
dsi0pll_pclk_src.clkr.regmap = rmap;
dsi0pll_cphy_pclk_src.clkr.regmap = rmap;
dsi0pll_shadow_pclk_src.clkr.regmap = rmap;
+ dsi0pll_shadow_cphy_pclk_src.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_7nm_config);
@@ -2594,6 +2732,7 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
&cphy_pclk_src_mux_regmap_bus,
pll_res, &dsi_pll_7nm_config);
dsi0pll_cphy_pclk_src_mux.clkr.regmap = rmap;
+ dsi0pll_shadow_cphy_pclk_src_mux.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_7nm_config);
@@ -2609,7 +2748,7 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
dsi0pll_shadow_vco_clk.max_rate = 5000000000;
}
- for (i = VCO_CLK_0; i <= CPHY_PCLK_SRC_0_CLK; i++) {
+ for (i = VCO_CLK_0; i <= SHADOW_CPHY_PCLK_SRC_0_CLK; i++) {
clk = devm_clk_register(&pdev->dev,
mdss_dsi_pllcc_7nm[i]);
if (IS_ERR(clk)) {
@@ -2640,6 +2779,7 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
dsi1pll_pclk_src.clkr.regmap = rmap;
dsi1pll_cphy_pclk_src.clkr.regmap = rmap;
dsi1pll_shadow_pclk_src.clkr.regmap = rmap;
+ dsi1pll_shadow_cphy_pclk_src.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_7nm_config);
@@ -2653,6 +2793,7 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
&cphy_pclk_src_mux_regmap_bus,
pll_res, &dsi_pll_7nm_config);
dsi1pll_cphy_pclk_src_mux.clkr.regmap = rmap;
+ dsi1pll_shadow_cphy_pclk_src_mux.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_7nm_config);
@@ -2667,7 +2808,7 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
dsi1pll_shadow_vco_clk.max_rate = 5000000000;
}
- for (i = VCO_CLK_1; i <= CPHY_PCLK_SRC_1_CLK; i++) {
+ for (i = VCO_CLK_1; i <= SHADOW_CPHY_PCLK_SRC_1_CLK; i++) {
clk = devm_clk_register(&pdev->dev,
mdss_dsi_pllcc_7nm[i]);
if (IS_ERR(clk)) {
diff --git a/rotator/sde_rotator_core.c b/rotator/sde_rotator_core.c
index 0febbd60..e7f72fd6 100644
--- a/rotator/sde_rotator_core.c
+++ b/rotator/sde_rotator_core.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
@@ -79,6 +79,10 @@
#define BUS_VOTE_19_MHZ 153600000
+#define ROT_HAS_UBWC(caps) (test_bit(SDE_CAPS_UBWC_2, caps) ||\
+ test_bit(SDE_CAPS_UBWC_3, caps) ||\
+ test_bit(SDE_CAPS_UBWC_4, caps))
+
/* forward prototype */
static int sde_rotator_update_perf(struct sde_rot_mgr *mgr);
@@ -610,7 +614,7 @@ static int sde_rotator_secure_session_ctrl(bool enable)
sid_info = (uint32_t *) shm.vaddr;
desc.args[1] = shm.paddr;
- desc.args[2] = shm.size;
+ desc.args[2] = sizeof(uint32_t);
} else {
sid_info = kzalloc(sizeof(uint32_t), GFP_KERNEL);
if (!sid_info)
@@ -2033,14 +2037,25 @@ static int sde_rotator_validate_img_roi(struct sde_rotation_item *item)
static int sde_rotator_validate_fmt_and_item_flags(
struct sde_rotation_config *config, struct sde_rotation_item *item)
{
- struct sde_mdp_format_params *fmt;
+ struct sde_mdp_format_params *in_fmt, *out_fmt;
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ bool has_ubwc;
- fmt = sde_get_format_params(item->input.format);
+ in_fmt = sde_get_format_params(item->input.format);
+ out_fmt = sde_get_format_params(item->output.format);
if ((item->flags & SDE_ROTATION_DEINTERLACE) &&
- sde_mdp_is_ubwc_format(fmt)) {
+ sde_mdp_is_ubwc_format(in_fmt)) {
SDEROT_DBG("cannot perform deinterlace on tiled formats\n");
return -EINVAL;
}
+
+ has_ubwc = ROT_HAS_UBWC(mdata->sde_caps_map);
+ if (!has_ubwc && (sde_mdp_is_ubwc_format(in_fmt) ||
+ sde_mdp_is_ubwc_format(out_fmt))) {
+ SDEROT_ERR("ubwc format is not supported\n");
+ return -EINVAL;
+ }
+
return 0;
}