Commit ec0bd60a authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-msm-fixes-2020-02-16' of https://gitlab.freedesktop.org/drm/msm into drm-fixes



+ fix UBWC on GPU and display side for sc7180
+ fix DSI suspend/resume issue encountered on sc7180
+ fix some breakage on so called "linux-android" devices
  (fallout from sc7180/a618 support, not seen earlier
  due to bootloader/firmware differences)
+ couple other misc fixes

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGshz5K3tJd=NsBSHq6HGT-ZRa67qt+iN=U2ZFO2oD8kuw@mail.gmail.com
parents 99edb18b 8fc7036e
Loading
Loading
Loading
Loading
+31 −6
Original line number Diff line number Diff line
@@ -796,12 +796,41 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
	return true;
}

#define GBIF_CLIENT_HALT_MASK             BIT(0)
#define GBIF_ARB_HALT_MASK                BIT(1)

static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
{
	struct msm_gpu *gpu = &adreno_gpu->base;

	if (!a6xx_has_gbif(adreno_gpu)) {
		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
		spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
								0xf) == 0xf);
		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);

		return;
	}

	/* Halt new client requests on GBIF */
	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
	spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
			(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);

	/* Halt all AXI requests on GBIF */
	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
	spin_until((gpu_read(gpu,  REG_A6XX_GBIF_HALT_ACK) &
			(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);

	/* The GBIF halt needs to be explicitly cleared */
	gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
}

/* Gracefully try to shut down the GMU and by extension the GPU */
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
	struct msm_gpu *gpu = &adreno_gpu->base;
	u32 val;

	/*
@@ -819,11 +848,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
			return;
		}

		/* Clear the VBIF pipe before shutting down */
		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
		spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
			== 0xf);
		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
		a6xx_bus_clear_pending_transactions(adreno_gpu);

		/* tell the GMU we want to slumber */
		a6xx_gmu_notify_slumber(gmu);
+6 −59
Original line number Diff line number Diff line
@@ -378,18 +378,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
	int ret;

	/*
	 * During a previous slumber, GBIF halt is asserted to ensure
	 * no further transaction can go through GPU before GPU
	 * headswitch is turned off.
	 *
	 * This halt is deasserted once headswitch goes off but
	 * incase headswitch doesn't goes off clear GBIF halt
	 * here to ensure GPU wake-up doesn't fail because of
	 * halted GPU transactions.
	 */
	gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);

	/* Make sure the GMU keeps the GPU on while we set it up */
	a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);

@@ -470,10 +458,12 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
	/* Select CP0 to always count cycles */
	gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);

	if (adreno_is_a630(adreno_gpu)) {
		gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
		gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
		gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
		gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
	}

	/* Enable fault detection */
	gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
@@ -748,39 +738,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};

#define GBIF_CLIENT_HALT_MASK             BIT(0)
#define GBIF_ARB_HALT_MASK                BIT(1)

static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
{
	struct msm_gpu *gpu = &adreno_gpu->base;

	if(!a6xx_has_gbif(adreno_gpu)){
		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
		spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
								0xf) == 0xf);
		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);

		return;
	}

	/* Halt new client requests on GBIF */
	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
	spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
			(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);

	/* Halt all AXI requests on GBIF */
	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
	spin_until((gpu_read(gpu,  REG_A6XX_GBIF_HALT_ACK) &
			(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);

	/*
	 * GMU needs DDR access in slumber path. Deassert GBIF halt now
	 * to allow for GMU to access system memory.
	 */
	gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
}

static int a6xx_pm_resume(struct msm_gpu *gpu)
{
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -805,16 +762,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)

	devfreq_suspend_device(gpu->devfreq.devfreq);

	/*
	 * Make sure the GMU is idle before continuing (because some transitions
	 * may use VBIF
	 */
	a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);

	/* Clear the VBIF pipe before shutting down */
	/* FIXME: This accesses the GPU - do we need to make sure it is on? */
	a6xx_bus_clear_pending_transactions(adreno_gpu);

	return a6xx_gmu_stop(a6xx_gpu);
}

+60 −25
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@

#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
#include "a6xx_gpu.h"

#define HFI_MSG_ID(val) [val] = #val

@@ -216,48 +217,82 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
		NULL, 0);
}

static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
	struct a6xx_hfi_msg_bw_table msg = { 0 };
	/* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
	msg->bw_level_num = 1;

	msg->ddr_cmds_num = 3;
	msg->ddr_wait_bitmask = 0x01;

	msg->ddr_cmds_addrs[0] = 0x50000;
	msg->ddr_cmds_addrs[1] = 0x5003c;
	msg->ddr_cmds_addrs[2] = 0x5000c;

	msg->ddr_cmds_data[0][0] =  0x40000000;
	msg->ddr_cmds_data[0][1] =  0x40000000;
	msg->ddr_cmds_data[0][2] =  0x40000000;

	/*
	 * The sdm845 GMU doesn't do bus frequency scaling on its own but it
	 * does need at least one entry in the list because it might be accessed
	 * when the GMU is shutting down. Send a single "off" entry.
	 * These are the CX (CNOC) votes - these are used by the GMU but the
	 * votes are known and fixed for the target
	 */
	msg->cnoc_cmds_num = 1;
	msg->cnoc_wait_bitmask = 0x01;

	msg->cnoc_cmds_addrs[0] = 0x5007c;
	msg->cnoc_cmds_data[0][0] =  0x40000000;
	msg->cnoc_cmds_data[1][0] =  0x60000001;
}

	msg.bw_level_num = 1;
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
	/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
	msg->bw_level_num = 1;

	msg.ddr_cmds_num = 3;
	msg.ddr_wait_bitmask = 0x07;
	msg->ddr_cmds_num = 3;
	msg->ddr_wait_bitmask = 0x07;

	msg.ddr_cmds_addrs[0] = 0x50000;
	msg.ddr_cmds_addrs[1] = 0x5005c;
	msg.ddr_cmds_addrs[2] = 0x5000c;
	msg->ddr_cmds_addrs[0] = 0x50000;
	msg->ddr_cmds_addrs[1] = 0x5005c;
	msg->ddr_cmds_addrs[2] = 0x5000c;

	msg.ddr_cmds_data[0][0] =  0x40000000;
	msg.ddr_cmds_data[0][1] =  0x40000000;
	msg.ddr_cmds_data[0][2] =  0x40000000;
	msg->ddr_cmds_data[0][0] =  0x40000000;
	msg->ddr_cmds_data[0][1] =  0x40000000;
	msg->ddr_cmds_data[0][2] =  0x40000000;

	/*
	 * These are the CX (CNOC) votes.  This is used but the values for the
	 * sdm845 GMU are known and fixed so we can hard code them.
	 */

	msg.cnoc_cmds_num = 3;
	msg.cnoc_wait_bitmask = 0x05;
	msg->cnoc_cmds_num = 3;
	msg->cnoc_wait_bitmask = 0x05;

	msg.cnoc_cmds_addrs[0] = 0x50034;
	msg.cnoc_cmds_addrs[1] = 0x5007c;
	msg.cnoc_cmds_addrs[2] = 0x5004c;
	msg->cnoc_cmds_addrs[0] = 0x50034;
	msg->cnoc_cmds_addrs[1] = 0x5007c;
	msg->cnoc_cmds_addrs[2] = 0x5004c;

	msg.cnoc_cmds_data[0][0] =  0x40000000;
	msg.cnoc_cmds_data[0][1] =  0x00000000;
	msg.cnoc_cmds_data[0][2] =  0x40000000;
	msg->cnoc_cmds_data[0][0] =  0x40000000;
	msg->cnoc_cmds_data[0][1] =  0x00000000;
	msg->cnoc_cmds_data[0][2] =  0x40000000;

	msg->cnoc_cmds_data[1][0] =  0x60000001;
	msg->cnoc_cmds_data[1][1] =  0x20000001;
	msg->cnoc_cmds_data[1][2] =  0x60000001;
}


static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
{
	struct a6xx_hfi_msg_bw_table msg = { 0 };
	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;

	msg.cnoc_cmds_data[1][0] =  0x60000001;
	msg.cnoc_cmds_data[1][1] =  0x20000001;
	msg.cnoc_cmds_data[1][2] =  0x60000001;
	if (adreno_is_a618(adreno_gpu))
		a618_build_bw_table(&msg);
	else
		a6xx_build_bw_table(&msg);

	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
		NULL, 0);
+2 −2
Original line number Diff line number Diff line
@@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {

	INTERLEAVED_RGB_FMT(RGB565,
		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
		false, 2, 0,
		DPU_FETCH_LINEAR, 1),

	INTERLEAVED_RGB_FMT(BGR565,
		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
		false, 2, 0,
		DPU_FETCH_LINEAR, 1),

+57 −1
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@

#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)

#define HW_REV				0x0
#define HW_INTR_STATUS			0x0010

/* Max BW defined in KBps */
@@ -22,6 +23,17 @@ struct dpu_irq_controller {
	struct irq_domain *domain;
};

struct dpu_hw_cfg {
	u32 val;
	u32 offset;
};

struct dpu_mdss_hw_init_handler {
	u32 hw_rev;
	u32 hw_reg_count;
	struct dpu_hw_cfg* hw_cfg;
};

struct dpu_mdss {
	struct msm_mdss base;
	void __iomem *mmio;
@@ -32,6 +44,44 @@ struct dpu_mdss {
	u32 num_paths;
};

static struct dpu_hw_cfg hw_cfg[] = {
    {
	/* UBWC global settings */
	.val = 0x1E,
	.offset = 0x144,
    }
};

static struct dpu_mdss_hw_init_handler cfg_handler[] = {
    { .hw_rev = DPU_HW_VER_620,
      .hw_reg_count = ARRAY_SIZE(hw_cfg),
      .hw_cfg = hw_cfg
    },
};

static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev)
{
	int i;
	u32 count = 0;
	struct dpu_hw_cfg *hw_cfg = NULL;

	for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
		if (cfg_handler[i].hw_rev == hw_rev) {
			hw_cfg = cfg_handler[i].hw_cfg;
			count = cfg_handler[i].hw_reg_count;
			break;
	    }
	}

	for (i = 0; i < count; i++ ) {
		writel_relaxed(hw_cfg->val,
			dpu_mdss->mmio + hw_cfg->offset);
		hw_cfg++;
	}

    return;
}

static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
						struct dpu_mdss *dpu_mdss)
{
@@ -174,12 +224,18 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
	struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
	struct dss_module_power *mp = &dpu_mdss->mp;
	int ret;
	u32 mdss_rev;

	dpu_mdss_icc_request_bw(mdss);

	ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
	if (ret)
	if (ret) {
		DPU_ERROR("clock enable failed, ret:%d\n", ret);
		return ret;
	}

	mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV);
	dpu_mdss_hw_init(dpu_mdss, mdss_rev);

	return ret;
}
Loading