diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /drivers/gpu/drm/arm | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/arm')
44 files changed, 15054 insertions, 0 deletions
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig new file mode 100644 index 000000000..3a9e966e0 --- /dev/null +++ b/drivers/gpu/drm/arm/Kconfig @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: GPL-2.0 +menu "ARM devices" + +config DRM_HDLCD + tristate "ARM HDLCD" + depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) + depends on COMMON_CLK + select DRM_KMS_HELPER + select DRM_KMS_CMA_HELPER + help + Choose this option if you have an ARM High Definition Colour LCD + controller. + + If M is selected the module will be called hdlcd. + +config DRM_HDLCD_SHOW_UNDERRUN + bool "Show underrun conditions" + depends on DRM_HDLCD + default n + help + Enable this option to show in red colour the pixels that the + HDLCD device did not fetch from framebuffer due to underrun + conditions. + +config DRM_MALI_DISPLAY + tristate "ARM Mali Display Processor" + depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) + depends on COMMON_CLK + select DRM_KMS_HELPER + select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER + select VIDEOMODE_HELPERS + help + Choose this option if you want to compile the ARM Mali Display + Processor driver. It supports the DP500, DP550 and DP650 variants + of the hardware. + + If compiled as a module it will be called mali-dp. + +source "drivers/gpu/drm/arm/display/Kconfig" + +endmenu diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile new file mode 100644 index 000000000..3ced6fc9e --- /dev/null +++ b/drivers/gpu/drm/arm/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +hdlcd-y := hdlcd_drv.o hdlcd_crtc.o +obj-$(CONFIG_DRM_HDLCD) += hdlcd.o +mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o +mali-dp-y += malidp_mw.o +obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o +obj-$(CONFIG_DRM_KOMEDA) += display/ diff --git a/drivers/gpu/drm/arm/display/Kbuild b/drivers/gpu/drm/arm/display/Kbuild new file mode 100644 index 000000000..382f1ca83 --- /dev/null +++ b/drivers/gpu/drm/arm/display/Kbuild @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_DRM_KOMEDA) += komeda/ diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig new file mode 100644 index 000000000..cec0639e3 --- /dev/null +++ b/drivers/gpu/drm/arm/display/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +config DRM_KOMEDA + tristate "ARM Komeda display driver" + depends on DRM && OF + depends on COMMON_CLK + select DRM_KMS_HELPER + select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER + select VIDEOMODE_HELPERS + help + Choose this option if you want to compile the ARM Komeda display + Processor driver. It supports the D71 variants of the hardware. + + If compiled as a module it will be called komeda. diff --git a/drivers/gpu/drm/arm/display/include/malidp_io.h b/drivers/gpu/drm/arm/display/include/malidp_io.h new file mode 100644 index 000000000..9440dff94 --- /dev/null +++ b/drivers/gpu/drm/arm/display/include/malidp_io.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#ifndef _MALIDP_IO_H_ +#define _MALIDP_IO_H_ + +#include <linux/io.h> + +static inline u32 +malidp_read32(u32 __iomem *base, u32 offset) +{ + return readl((base + (offset >> 2))); +} + +static inline void +malidp_write32(u32 __iomem *base, u32 offset, u32 v) +{ + writel(v, (base + (offset >> 2))); +} + +static inline void +malidp_write64(u32 __iomem *base, u32 offset, u64 v) +{ + writel(lower_32_bits(v), (base + (offset >> 2))); + writel(upper_32_bits(v), (base + (offset >> 2) + 1)); +} + +static inline void +malidp_write32_mask(u32 __iomem *base, u32 offset, u32 m, u32 v) +{ + u32 tmp = malidp_read32(base, offset); + + tmp &= (~m); + malidp_write32(base, offset, v | tmp); +} + +static inline void +malidp_write_group(u32 __iomem *base, u32 offset, int num, const u32 *values) +{ + int i; + + for (i = 0; i < num; i++) + malidp_write32(base, offset + i * 4, values[i]); +} + +#endif /*_MALIDP_IO_H_*/ diff --git a/drivers/gpu/drm/arm/display/include/malidp_product.h b/drivers/gpu/drm/arm/display/include/malidp_product.h new file mode 100644 index 000000000..16a8a2c22 --- /dev/null +++ b/drivers/gpu/drm/arm/display/include/malidp_product.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#ifndef _MALIDP_PRODUCT_H_ +#define _MALIDP_PRODUCT_H_ + +/* Product identification */ +#define MALIDP_CORE_ID(__product, __major, __minor, __status) \ + ((((__product) & 0xFFFF) << 16) | (((__major) & 0xF) << 12) | \ + (((__minor) & 0xF) << 8) | ((__status) & 0xFF)) + +#define MALIDP_CORE_ID_PRODUCT_ID(__core_id) ((__u32)(__core_id) >> 16) +#define MALIDP_CORE_ID_MAJOR(__core_id) (((__u32)(__core_id) >> 12) & 0xF) +#define MALIDP_CORE_ID_MINOR(__core_id) (((__u32)(__core_id) >> 8) & 0xF) +#define MALIDP_CORE_ID_STATUS(__core_id) (((__u32)(__core_id)) & 0xFF) + +/* Mali-display product IDs */ +#define MALIDP_D71_PRODUCT_ID 0x0071 +#define MALIDP_D32_PRODUCT_ID 0x0032 + +union komeda_config_id { + struct { + __u32 max_line_sz:16, + n_pipelines:2, + n_scalers:2, /* number of scalers per pipeline */ + n_layers:3, /* number of layers per pipeline */ + n_richs:3, /* number of rich layers per pipeline */ + reserved_bits:6; + }; + __u32 value; +}; + +#endif /* _MALIDP_PRODUCT_H_ */ diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h new file mode 100644 index 000000000..49a1d7f35 --- /dev/null +++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#ifndef _MALIDP_UTILS_ +#define _MALIDP_UTILS_ + +#include <linux/delay.h> +#include <linux/errno.h> + +#define has_bit(nr, mask) (BIT(nr) & (mask)) +#define has_bits(bits, mask) (((bits) & (mask)) == (bits)) + +#define dp_wait_cond(__cond, __tries, __min_range, __max_range) \ +({ \ + int num_tries = __tries; \ + while (!__cond && (num_tries > 0)) { \ + usleep_range(__min_range, __max_range); \ + num_tries--; \ + } \ + (__cond) ? 0 : -ETIMEDOUT; \ +}) + +/* the restriction of range is [start, end] */ +struct malidp_range { + u32 start; + u32 end; +}; + +static inline void set_range(struct malidp_range *rg, u32 start, u32 end) +{ + rg->start = start; + rg->end = end; +} + +static inline bool in_range(struct malidp_range *rg, u32 v) +{ + return (v >= rg->start) && (v <= rg->end); +} + +#endif /* _MALIDP_UTILS_ */ diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile new file mode 100644 index 000000000..1931a7fa1 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/Makefile @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0 + +ccflags-y := \ + -I $(srctree)/$(src)/../include \ + -I $(srctree)/$(src) + +komeda-y := \ + komeda_drv.o \ + komeda_dev.o \ + komeda_format_caps.o \ + komeda_color_mgmt.o \ + komeda_pipeline.o \ + komeda_pipeline_state.o \ + komeda_framebuffer.o \ + komeda_kms.o \ + komeda_crtc.o \ + komeda_plane.o \ + komeda_wb_connector.o \ + komeda_private_obj.o \ + komeda_event.o + +komeda-y += \ + d71/d71_dev.o \ + d71/d71_component.o + +obj-$(CONFIG_DRM_KOMEDA) += komeda.o diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c new file mode 100644 index 000000000..8a02ade36 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c @@ -0,0 +1,1442 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include "d71_dev.h" +#include "komeda_kms.h" +#include "malidp_io.h" +#include "komeda_framebuffer.h" +#include "komeda_color_mgmt.h" + +static void get_resources_id(u32 hw_id, u32 *pipe_id, u32 *comp_id) +{ + u32 id = BLOCK_INFO_BLK_ID(hw_id); + u32 pipe = id; + + switch (BLOCK_INFO_BLK_TYPE(hw_id)) { + case D71_BLK_TYPE_LPU_WB_LAYER: + id = KOMEDA_COMPONENT_WB_LAYER; + break; + case D71_BLK_TYPE_CU_SPLITTER: + id = KOMEDA_COMPONENT_SPLITTER; + break; + case D71_BLK_TYPE_CU_SCALER: + pipe = id / D71_PIPELINE_MAX_SCALERS; + id %= D71_PIPELINE_MAX_SCALERS; + id += KOMEDA_COMPONENT_SCALER0; + break; + case D71_BLK_TYPE_CU: + id += KOMEDA_COMPONENT_COMPIZ0; + break; + case D71_BLK_TYPE_LPU_LAYER: + pipe = id / D71_PIPELINE_MAX_LAYERS; + id %= D71_PIPELINE_MAX_LAYERS; + id += KOMEDA_COMPONENT_LAYER0; + break; + case D71_BLK_TYPE_DOU_IPS: + id += KOMEDA_COMPONENT_IPS0; + break; + case D71_BLK_TYPE_CU_MERGER: + id = KOMEDA_COMPONENT_MERGER; + break; + case D71_BLK_TYPE_DOU: + id = KOMEDA_COMPONENT_TIMING_CTRLR; + break; + default: + id = 0xFFFFFFFF; + } + + if (comp_id) + *comp_id = id; + + if (pipe_id) + *pipe_id = pipe; +} + +static u32 get_valid_inputs(struct block_header *blk) +{ + u32 valid_inputs = 0, comp_id; + int i; + + for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++) { + get_resources_id(blk->input_ids[i], NULL, &comp_id); + if (comp_id == 0xFFFFFFFF) + continue; + valid_inputs |= BIT(comp_id); + } + + return valid_inputs; +} + +static void get_values_from_reg(void __iomem *reg, u32 offset, + u32 count, u32 *val) +{ + u32 i, addr; + + for (i = 0; i < count; i++) { + addr = offset + (i << 2); + /* 0xA4 is WO register */ + if (addr != 0xA4) + val[i] = malidp_read32(reg, addr); + else + val[i] = 0xDEADDEAD; + } +} + +static void dump_block_header(struct seq_file *sf, void __iomem *reg) +{ + struct block_header hdr; + u32 i, n_input, n_output; + + d71_read_block_header(reg, &hdr); + seq_printf(sf, "BLOCK_INFO:\t\t0x%X\n", hdr.block_info); + seq_printf(sf, "PIPELINE_INFO:\t\t0x%X\n", hdr.pipeline_info); + + n_output = PIPELINE_INFO_N_OUTPUTS(hdr.pipeline_info); + n_input = PIPELINE_INFO_N_VALID_INPUTS(hdr.pipeline_info); + + for (i = 0; i < n_input; i++) + seq_printf(sf, "VALID_INPUT_ID%u:\t0x%X\n", + i, hdr.input_ids[i]); + + for (i = 0; i < n_output; i++) + seq_printf(sf, "OUTPUT_ID%u:\t\t0x%X\n", + i, hdr.output_ids[i]); +} + +/* On D71, we are using the global line size. From D32, every component have + * a line size register to indicate the fifo size. + */ +static u32 __get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg, + u32 max_default) +{ + if (!d71->periph_addr) + max_default = malidp_read32(reg, BLK_MAX_LINE_SIZE); + + return max_default; +} + +static u32 get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg) +{ + return __get_blk_line_size(d71, reg, d71->max_line_size); +} + +static u32 to_rot_ctrl(u32 rot) +{ + u32 lr_ctrl = 0; + + switch (rot & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_0: + lr_ctrl |= L_ROT(L_ROT_R0); + break; + case DRM_MODE_ROTATE_90: + lr_ctrl |= L_ROT(L_ROT_R90); + break; + case DRM_MODE_ROTATE_180: + lr_ctrl |= L_ROT(L_ROT_R180); + break; + case DRM_MODE_ROTATE_270: + lr_ctrl |= L_ROT(L_ROT_R270); + break; + } + + if (rot & DRM_MODE_REFLECT_X) + lr_ctrl |= L_HFLIP; + if (rot & DRM_MODE_REFLECT_Y) + lr_ctrl |= L_VFLIP; + + return lr_ctrl; +} + +static u32 to_ad_ctrl(u64 modifier) +{ + u32 afbc_ctrl = AD_AEN; + + if (!modifier) + return 0; + + if ((modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) == + AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) + afbc_ctrl |= AD_WB; + + if (modifier & AFBC_FORMAT_MOD_YTR) + afbc_ctrl |= AD_YT; + if (modifier & AFBC_FORMAT_MOD_SPLIT) + afbc_ctrl |= AD_BS; + if (modifier & AFBC_FORMAT_MOD_TILED) + afbc_ctrl |= AD_TH; + + return afbc_ctrl; +} + +static inline u32 to_d71_input_id(struct komeda_component_state *st, int idx) +{ + struct komeda_component_output *input = &st->inputs[idx]; + + /* if input is not active, set hw input_id(0) to disable it */ + if (has_bit(idx, st->active_inputs)) + return input->component->hw_id + input->output_port; + else + return 0; +} + +static void d71_layer_update_fb(struct komeda_component *c, + struct komeda_fb *kfb, + dma_addr_t *addr) +{ + struct drm_framebuffer *fb = &kfb->base; + const struct drm_format_info *info = fb->format; + u32 __iomem *reg = c->reg; + int block_h; + + if (info->num_planes > 2) + malidp_write64(reg, BLK_P2_PTR_LOW, addr[2]); + + if (info->num_planes > 1) { + block_h = drm_format_info_block_height(info, 1); + malidp_write32(reg, BLK_P1_STRIDE, fb->pitches[1] * block_h); + malidp_write64(reg, BLK_P1_PTR_LOW, addr[1]); + } + + block_h = drm_format_info_block_height(info, 0); + malidp_write32(reg, BLK_P0_STRIDE, fb->pitches[0] * block_h); + malidp_write64(reg, BLK_P0_PTR_LOW, addr[0]); + malidp_write32(reg, LAYER_FMT, kfb->format_caps->hw_id); +} + +static void d71_layer_disable(struct komeda_component *c) +{ + malidp_write32_mask(c->reg, BLK_CONTROL, L_EN, 0); +} + +static void d71_layer_update(struct komeda_component *c, + struct komeda_component_state *state) +{ + struct komeda_layer_state *st = to_layer_st(state); + struct drm_plane_state *plane_st = state->plane->state; + struct drm_framebuffer *fb = plane_st->fb; + struct komeda_fb *kfb = to_kfb(fb); + u32 __iomem *reg = c->reg; + u32 ctrl_mask = L_EN | L_ROT(L_ROT_R270) | L_HFLIP | L_VFLIP | L_TBU_EN; + u32 ctrl = L_EN | to_rot_ctrl(st->rot); + + d71_layer_update_fb(c, kfb, st->addr); + + malidp_write32(reg, AD_CONTROL, to_ad_ctrl(fb->modifier)); + if (fb->modifier) { + u64 addr; + + malidp_write32(reg, LAYER_AD_H_CROP, HV_CROP(st->afbc_crop_l, + st->afbc_crop_r)); + malidp_write32(reg, LAYER_AD_V_CROP, HV_CROP(st->afbc_crop_t, + st->afbc_crop_b)); + /* afbc 1.2 wants payload, afbc 1.0/1.1 wants end_addr */ + if (fb->modifier & AFBC_FORMAT_MOD_TILED) + addr = st->addr[0] + kfb->offset_payload; + else + addr = st->addr[0] + kfb->afbc_size - 1; + + malidp_write32(reg, BLK_P1_PTR_LOW, lower_32_bits(addr)); + malidp_write32(reg, BLK_P1_PTR_HIGH, upper_32_bits(addr)); + } + + if (fb->format->is_yuv) { + u32 upsampling = 0; + + switch (kfb->format_caps->fourcc) { + case DRM_FORMAT_YUYV: + upsampling = fb->modifier ? LR_CHI422_BILINEAR : + LR_CHI422_REPLICATION; + break; + case DRM_FORMAT_UYVY: + upsampling = LR_CHI422_REPLICATION; + break; + case DRM_FORMAT_NV12: + case DRM_FORMAT_YUV420_8BIT: + case DRM_FORMAT_YUV420_10BIT: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_P010: + /* these fmt support MPGE/JPEG both, here perfer JPEG*/ + upsampling = LR_CHI420_JPEG; + break; + case DRM_FORMAT_X0L2: + upsampling = LR_CHI420_JPEG; + break; + default: + break; + } + + malidp_write32(reg, LAYER_R_CONTROL, upsampling); + malidp_write_group(reg, LAYER_YUV_RGB_COEFF0, + KOMEDA_N_YUV2RGB_COEFFS, + komeda_select_yuv2rgb_coeffs( + plane_st->color_encoding, + plane_st->color_range)); + } + + malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize, st->vsize)); + + if (kfb->is_va) + ctrl |= L_TBU_EN; + malidp_write32_mask(reg, BLK_CONTROL, ctrl_mask, ctrl); +} + +static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf) +{ + u32 v[15], i; + bool rich, rgb2rgb; + char *prefix; + + get_values_from_reg(c->reg, LAYER_INFO, 1, &v[14]); + if (v[14] & 0x1) { + rich = true; + prefix = "LR_"; + } else { + rich = false; + prefix = "LS_"; + } + + rgb2rgb = !!(v[14] & L_INFO_CM); + + dump_block_header(sf, c->reg); + + seq_printf(sf, "%sLAYER_INFO:\t\t0x%X\n", prefix, v[14]); + + get_values_from_reg(c->reg, 0xD0, 1, v); + seq_printf(sf, "%sCONTROL:\t\t0x%X\n", prefix, v[0]); + if (rich) { + get_values_from_reg(c->reg, 0xD4, 1, v); + seq_printf(sf, "LR_RICH_CONTROL:\t0x%X\n", v[0]); + } + get_values_from_reg(c->reg, 0xD8, 4, v); + seq_printf(sf, "%sFORMAT:\t\t0x%X\n", prefix, v[0]); + seq_printf(sf, "%sIT_COEFFTAB:\t\t0x%X\n", prefix, v[1]); + seq_printf(sf, "%sIN_SIZE:\t\t0x%X\n", prefix, v[2]); + seq_printf(sf, "%sPALPHA:\t\t0x%X\n", prefix, v[3]); + + get_values_from_reg(c->reg, 0x100, 3, v); + seq_printf(sf, "%sP0_PTR_LOW:\t\t0x%X\n", prefix, v[0]); + seq_printf(sf, "%sP0_PTR_HIGH:\t\t0x%X\n", prefix, v[1]); + seq_printf(sf, "%sP0_STRIDE:\t\t0x%X\n", prefix, v[2]); + + get_values_from_reg(c->reg, 0x110, 2, v); + seq_printf(sf, "%sP1_PTR_LOW:\t\t0x%X\n", prefix, v[0]); + seq_printf(sf, "%sP1_PTR_HIGH:\t\t0x%X\n", prefix, v[1]); + if (rich) { + get_values_from_reg(c->reg, 0x118, 1, v); + seq_printf(sf, "LR_P1_STRIDE:\t\t0x%X\n", v[0]); + + get_values_from_reg(c->reg, 0x120, 2, v); + seq_printf(sf, "LR_P2_PTR_LOW:\t\t0x%X\n", v[0]); + seq_printf(sf, "LR_P2_PTR_HIGH:\t\t0x%X\n", v[1]); + + get_values_from_reg(c->reg, 0x130, 12, v); + for (i = 0; i < 12; i++) + seq_printf(sf, "LR_YUV_RGB_COEFF%u:\t0x%X\n", i, v[i]); + } + + if (rgb2rgb) { + get_values_from_reg(c->reg, LAYER_RGB_RGB_COEFF0, 12, v); + for (i = 0; i < 12; i++) + seq_printf(sf, "LS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]); + } + + get_values_from_reg(c->reg, 0x160, 3, v); + seq_printf(sf, "%sAD_CONTROL:\t\t0x%X\n", prefix, v[0]); + seq_printf(sf, "%sAD_H_CROP:\t\t0x%X\n", prefix, v[1]); + seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]); +} + +static int d71_layer_validate(struct komeda_component *c, + struct komeda_component_state *state) +{ + struct komeda_layer_state *st = to_layer_st(state); + struct komeda_layer *layer = to_layer(c); + struct drm_plane_state *plane_st; + struct drm_framebuffer *fb; + u32 fourcc, line_sz, max_line_sz; + + plane_st = drm_atomic_get_new_plane_state(state->obj.state, + state->plane); + fb = plane_st->fb; + fourcc = fb->format->format; + + if (drm_rotation_90_or_270(st->rot)) + line_sz = st->vsize - st->afbc_crop_t - st->afbc_crop_b; + else + line_sz = st->hsize - st->afbc_crop_l - st->afbc_crop_r; + + if (fb->modifier) { + if ((fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) == + AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) + max_line_sz = layer->line_sz; + else + max_line_sz = layer->line_sz / 2; + + if (line_sz > max_line_sz) { + DRM_DEBUG_ATOMIC("afbc request line_sz: %d exceed the max afbc line_sz: %d.\n", + line_sz, max_line_sz); + return -EINVAL; + } + } + + if (fourcc == DRM_FORMAT_YUV420_10BIT && line_sz > 2046 && (st->afbc_crop_l % 4)) { + DRM_DEBUG_ATOMIC("YUV420_10BIT input_hsize: %d exceed the max size 2046.\n", + line_sz); + return -EINVAL; + } + + if (fourcc == DRM_FORMAT_X0L2 && line_sz > 2046 && (st->addr[0] % 16)) { + DRM_DEBUG_ATOMIC("X0L2 input_hsize: %d exceed the max size 2046.\n", + line_sz); + return -EINVAL; + } + + return 0; +} + +static const struct komeda_component_funcs d71_layer_funcs = { + .validate = d71_layer_validate, + .update = d71_layer_update, + .disable = d71_layer_disable, + .dump_register = d71_layer_dump, +}; + +static int d71_layer_init(struct d71_dev *d71, + struct block_header *blk, u32 __iomem *reg) +{ + struct komeda_component *c; + struct komeda_layer *layer; + u32 pipe_id, layer_id, layer_info; + + get_resources_id(blk->block_info, &pipe_id, &layer_id); + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*layer), + layer_id, + BLOCK_INFO_INPUT_ID(blk->block_info), + &d71_layer_funcs, 0, + get_valid_inputs(blk), + 1, reg, "LPU%d_LAYER%d", pipe_id, layer_id); + if (IS_ERR(c)) { + DRM_ERROR("Failed to add layer component\n"); + return PTR_ERR(c); + } + + layer = to_layer(c); + layer_info = malidp_read32(reg, LAYER_INFO); + + if (layer_info & L_INFO_RF) + layer->layer_type = KOMEDA_FMT_RICH_LAYER; + else + layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER; + + if (!d71->periph_addr) { + /* D32 or newer product */ + layer->line_sz = malidp_read32(reg, BLK_MAX_LINE_SIZE); + layer->yuv_line_sz = L_INFO_YUV_MAX_LINESZ(layer_info); + } else if (d71->max_line_size > 2048) { + /* D71 4K */ + layer->line_sz = d71->max_line_size; + layer->yuv_line_sz = layer->line_sz / 2; + } else { + /* D71 2K */ + if (layer->layer_type == KOMEDA_FMT_RICH_LAYER) { + /* rich layer is 4K configuration */ + layer->line_sz = d71->max_line_size * 2; + layer->yuv_line_sz = layer->line_sz / 2; + } else { + layer->line_sz = d71->max_line_size; + layer->yuv_line_sz = 0; + } + } + + set_range(&layer->hsize_in, 4, layer->line_sz); + + set_range(&layer->vsize_in, 4, d71->max_vsize); + + malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP); + + layer->supported_rots = DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK; + + return 0; +} + +static void d71_wb_layer_update(struct komeda_component *c, + struct komeda_component_state *state) +{ + struct komeda_layer_state *st = to_layer_st(state); + struct drm_connector_state *conn_st = state->wb_conn->state; + struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb); + u32 ctrl = L_EN | LW_OFM, mask = L_EN | LW_OFM | LW_TBU_EN; + u32 __iomem *reg = c->reg; + + d71_layer_update_fb(c, kfb, st->addr); + + if (kfb->is_va) + ctrl |= LW_TBU_EN; + + malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize, st->vsize)); + malidp_write32(reg, BLK_INPUT_ID0, to_d71_input_id(state, 0)); + malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl); +} + +static void d71_wb_layer_dump(struct komeda_component *c, struct seq_file *sf) +{ + u32 v[12], i; + + dump_block_header(sf, c->reg); + + get_values_from_reg(c->reg, 0x80, 1, v); + seq_printf(sf, "LW_INPUT_ID0:\t\t0x%X\n", v[0]); + + get_values_from_reg(c->reg, 0xD0, 3, v); + seq_printf(sf, "LW_CONTROL:\t\t0x%X\n", v[0]); + seq_printf(sf, "LW_PROG_LINE:\t\t0x%X\n", v[1]); + seq_printf(sf, "LW_FORMAT:\t\t0x%X\n", v[2]); + + get_values_from_reg(c->reg, 0xE0, 1, v); + seq_printf(sf, "LW_IN_SIZE:\t\t0x%X\n", v[0]); + + for (i = 0; i < 2; i++) { + get_values_from_reg(c->reg, 0x100 + i * 0x10, 3, v); + seq_printf(sf, "LW_P%u_PTR_LOW:\t\t0x%X\n", i, v[0]); + seq_printf(sf, "LW_P%u_PTR_HIGH:\t\t0x%X\n", i, v[1]); + seq_printf(sf, "LW_P%u_STRIDE:\t\t0x%X\n", i, v[2]); + } + + get_values_from_reg(c->reg, 0x130, 12, v); + for (i = 0; i < 12; i++) + seq_printf(sf, "LW_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]); +} + +static void d71_wb_layer_disable(struct komeda_component *c) +{ + malidp_write32(c->reg, BLK_INPUT_ID0, 0); + malidp_write32_mask(c->reg, BLK_CONTROL, L_EN, 0); +} + +static const struct komeda_component_funcs d71_wb_layer_funcs = { + .update = d71_wb_layer_update, + .disable = d71_wb_layer_disable, + .dump_register = d71_wb_layer_dump, +}; + +static int d71_wb_layer_init(struct d71_dev *d71, + struct block_header *blk, u32 __iomem *reg) +{ + struct komeda_component *c; + struct komeda_layer *wb_layer; + u32 pipe_id, layer_id; + + get_resources_id(blk->block_info, &pipe_id, &layer_id); + + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*wb_layer), + layer_id, BLOCK_INFO_INPUT_ID(blk->block_info), + &d71_wb_layer_funcs, + 1, get_valid_inputs(blk), 0, reg, + "LPU%d_LAYER_WR", pipe_id); + if (IS_ERR(c)) { + DRM_ERROR("Failed to add wb_layer component\n"); + return PTR_ERR(c); + } + + wb_layer = to_layer(c); + wb_layer->layer_type = KOMEDA_FMT_WB_LAYER; + wb_layer->line_sz = get_blk_line_size(d71, reg); + wb_layer->yuv_line_sz = wb_layer->line_sz; + + set_range(&wb_layer->hsize_in, 64, wb_layer->line_sz); + set_range(&wb_layer->vsize_in, 64, d71->max_vsize); + + return 0; +} + +static void d71_component_disable(struct komeda_component *c) +{ + u32 __iomem *reg = c->reg; + u32 i; + + malidp_write32(reg, BLK_CONTROL, 0); + + for (i = 0; i < c->max_active_inputs; i++) { + malidp_write32(reg, BLK_INPUT_ID0 + (i << 2), 0); + + /* Besides clearing the input ID to zero, D71 compiz also has + * input enable bit in CU_INPUTx_CONTROL which need to be + * cleared. + */ + if (has_bit(c->id, KOMEDA_PIPELINE_COMPIZS)) + malidp_write32(reg, CU_INPUT0_CONTROL + + i * CU_PER_INPUT_REGS * 4, + CU_INPUT_CTRL_ALPHA(0xFF)); + } +} + +static void compiz_enable_input(u32 __iomem *id_reg, + u32 __iomem *cfg_reg, + u32 input_hw_id, + struct komeda_compiz_input_cfg *cin) +{ + u32 ctrl = CU_INPUT_CTRL_EN; + u8 blend = cin->pixel_blend_mode; + + if (blend == DRM_MODE_BLEND_PIXEL_NONE) + ctrl |= CU_INPUT_CTRL_PAD; + else if (blend == DRM_MODE_BLEND_PREMULTI) + ctrl |= CU_INPUT_CTRL_PMUL; + + ctrl |= CU_INPUT_CTRL_ALPHA(cin->layer_alpha); + + malidp_write32(id_reg, BLK_INPUT_ID0, input_hw_id); + + malidp_write32(cfg_reg, CU_INPUT0_SIZE, + HV_SIZE(cin->hsize, cin->vsize)); + malidp_write32(cfg_reg, CU_INPUT0_OFFSET, + HV_OFFSET(cin->hoffset, cin->voffset)); + malidp_write32(cfg_reg, CU_INPUT0_CONTROL, ctrl); +} + +static void d71_compiz_update(struct komeda_component *c, + struct komeda_component_state *state) +{ + struct komeda_compiz_state *st = to_compiz_st(state); + u32 __iomem *reg = c->reg; + u32 __iomem *id_reg, *cfg_reg; + u32 index; + + for_each_changed_input(state, index) { + id_reg = reg + index; + cfg_reg = reg + index * CU_PER_INPUT_REGS; + if (state->active_inputs & BIT(index)) { + compiz_enable_input(id_reg, cfg_reg, + to_d71_input_id(state, index), + &st->cins[index]); + } else { + malidp_write32(id_reg, BLK_INPUT_ID0, 0); + malidp_write32(cfg_reg, CU_INPUT0_CONTROL, 0); + } + } + + malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize)); +} + +static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf) +{ + u32 v[8], i; + + dump_block_header(sf, c->reg); + + get_values_from_reg(c->reg, 0x80, 5, v); + for (i = 0; i < 5; i++) + seq_printf(sf, "CU_INPUT_ID%u:\t\t0x%X\n", i, v[i]); + + get_values_from_reg(c->reg, 0xA0, 5, v); + seq_printf(sf, "CU_IRQ_RAW_STATUS:\t0x%X\n", v[0]); + seq_printf(sf, "CU_IRQ_CLEAR:\t\t0x%X\n", v[1]); + seq_printf(sf, "CU_IRQ_MASK:\t\t0x%X\n", v[2]); + seq_printf(sf, "CU_IRQ_STATUS:\t\t0x%X\n", v[3]); + seq_printf(sf, "CU_STATUS:\t\t0x%X\n", v[4]); + + get_values_from_reg(c->reg, 0xD0, 2, v); + seq_printf(sf, "CU_CONTROL:\t\t0x%X\n", v[0]); + seq_printf(sf, "CU_SIZE:\t\t0x%X\n", v[1]); + + get_values_from_reg(c->reg, 0xDC, 1, v); + seq_printf(sf, "CU_BG_COLOR:\t\t0x%X\n", v[0]); + + for (i = 0, v[4] = 0xE0; i < 5; i++, v[4] += 0x10) { + get_values_from_reg(c->reg, v[4], 3, v); + seq_printf(sf, "CU_INPUT%u_SIZE:\t\t0x%X\n", i, v[0]); + seq_printf(sf, "CU_INPUT%u_OFFSET:\t0x%X\n", i, v[1]); + seq_printf(sf, "CU_INPUT%u_CONTROL:\t0x%X\n", i, v[2]); + } + + get_values_from_reg(c->reg, 0x130, 2, v); + seq_printf(sf, "CU_USER_LOW:\t\t0x%X\n", v[0]); + seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]); +} + +static const struct komeda_component_funcs d71_compiz_funcs = { + .update = d71_compiz_update, + .disable = d71_component_disable, + .dump_register = d71_compiz_dump, +}; + +static int d71_compiz_init(struct d71_dev *d71, + struct block_header *blk, u32 __iomem *reg) +{ + struct komeda_component *c; + struct komeda_compiz *compiz; + u32 pipe_id, comp_id; + + get_resources_id(blk->block_info, &pipe_id, &comp_id); + + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*compiz), + comp_id, + BLOCK_INFO_INPUT_ID(blk->block_info), + &d71_compiz_funcs, + CU_NUM_INPUT_IDS, get_valid_inputs(blk), + CU_NUM_OUTPUT_IDS, reg, + "CU%d", pipe_id); + if (IS_ERR(c)) + return PTR_ERR(c); + + compiz = to_compiz(c); + + set_range(&compiz->hsize, 64, get_blk_line_size(d71, reg)); + set_range(&compiz->vsize, 64, d71->max_vsize); + + return 0; +} + +static void d71_scaler_update_filter_lut(u32 __iomem *reg, u32 hsize_in, + u32 vsize_in, u32 hsize_out, + u32 vsize_out) +{ + u32 val = 0; + + if (hsize_in <= hsize_out) + val |= 0x62; + else if (hsize_in <= (hsize_out + hsize_out / 2)) + val |= 0x63; + else if (hsize_in <= hsize_out * 2) + val |= 0x64; + else if (hsize_in <= hsize_out * 2 + (hsize_out * 3) / 4) + val |= 0x65; + else + val |= 0x66; + + if (vsize_in <= vsize_out) + val |= SC_VTSEL(0x6A); + else if (vsize_in <= (vsize_out + vsize_out / 2)) + val |= SC_VTSEL(0x6B); + else if (vsize_in <= vsize_out * 2) + val |= SC_VTSEL(0x6C); + else if (vsize_in <= vsize_out * 2 + vsize_out * 3 / 4) + val |= SC_VTSEL(0x6D); + else + val |= SC_VTSEL(0x6E); + + malidp_write32(reg, SC_COEFFTAB, val); +} + +static void d71_scaler_update(struct komeda_component *c, + struct komeda_component_state *state) +{ + struct komeda_scaler_state *st = to_scaler_st(state); + u32 __iomem *reg = c->reg; + u32 init_ph, delta_ph, ctrl; + + d71_scaler_update_filter_lut(reg, st->hsize_in, st->vsize_in, + st->hsize_out, st->vsize_out); + + malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize_in, st->vsize_in)); + malidp_write32(reg, SC_OUT_SIZE, HV_SIZE(st->hsize_out, st->vsize_out)); + malidp_write32(reg, SC_H_CROP, HV_CROP(st->left_crop, st->right_crop)); + + /* for right part, HW only sample the valid pixel which means the pixels + * in left_crop will be jumpped, and the first sample pixel is: + * + * dst_a = st->total_hsize_out - st->hsize_out + st->left_crop + 0.5; + * + * Then the corresponding texel in src is: + * + * h_delta_phase = st->total_hsize_in / st->total_hsize_out; + * src_a = dst_A * h_delta_phase; + * + * and h_init_phase is src_a deduct the real source start src_S; + * + * src_S = st->total_hsize_in - st->hsize_in; + * h_init_phase = src_a - src_S; + * + * And HW precision for the initial/delta_phase is 16:16 fixed point, + * the following is the simplified formula + */ + if (st->right_part) { + u32 dst_a = st->total_hsize_out - st->hsize_out + st->left_crop; + + if (st->en_img_enhancement) + dst_a -= 1; + + init_ph = ((st->total_hsize_in * (2 * dst_a + 1) - + 2 * st->total_hsize_out * (st->total_hsize_in - + st->hsize_in)) << 15) / st->total_hsize_out; + } else { + init_ph = (st->total_hsize_in << 15) / st->total_hsize_out; + } + + malidp_write32(reg, SC_H_INIT_PH, init_ph); + + delta_ph = (st->total_hsize_in << 16) / st->total_hsize_out; + malidp_write32(reg, SC_H_DELTA_PH, delta_ph); + + init_ph = (st->total_vsize_in << 15) / st->vsize_out; + malidp_write32(reg, SC_V_INIT_PH, init_ph); + + delta_ph = (st->total_vsize_in << 16) / st->vsize_out; + malidp_write32(reg, SC_V_DELTA_PH, delta_ph); + + ctrl = 0; + ctrl |= st->en_scaling ? SC_CTRL_SCL : 0; + ctrl |= st->en_alpha ? SC_CTRL_AP : 0; + ctrl |= st->en_img_enhancement ? SC_CTRL_IENH : 0; + /* If we use the hardware splitter we shouldn't set SC_CTRL_LS */ + if (st->en_split && + state->inputs[0].component->id != KOMEDA_COMPONENT_SPLITTER) + ctrl |= SC_CTRL_LS; + + malidp_write32(reg, BLK_CONTROL, ctrl); + malidp_write32(reg, BLK_INPUT_ID0, to_d71_input_id(state, 0)); +} + +static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf) +{ + u32 v[10]; + + dump_block_header(sf, c->reg); + + get_values_from_reg(c->reg, 0x80, 1, v); + seq_printf(sf, "SC_INPUT_ID0:\t\t0x%X\n", v[0]); + + get_values_from_reg(c->reg, 0xD0, 1, v); + seq_printf(sf, "SC_CONTROL:\t\t0x%X\n", v[0]); + + get_values_from_reg(c->reg, 0xDC, 9, v); + seq_printf(sf, "SC_COEFFTAB:\t\t0x%X\n", v[0]); + seq_printf(sf, "SC_IN_SIZE:\t\t0x%X\n", v[1]); + seq_printf(sf, "SC_OUT_SIZE:\t\t0x%X\n", v[2]); + seq_printf(sf, "SC_H_CROP:\t\t0x%X\n", v[3]); + seq_printf(sf, "SC_V_CROP:\t\t0x%X\n", v[4]); + seq_printf(sf, "SC_H_INIT_PH:\t\t0x%X\n", v[5]); + seq_printf(sf, "SC_H_DELTA_PH:\t\t0x%X\n", v[6]); + seq_printf(sf, "SC_V_INIT_PH:\t\t0x%X\n", v[7]); + seq_printf(sf, "SC_V_DELTA_PH:\t\t0x%X\n", v[8]); + + get_values_from_reg(c->reg, 0x130, 10, v); + seq_printf(sf, "SC_ENH_LIMITS:\t\t0x%X\n", v[0]); + seq_printf(sf, "SC_ENH_COEFF0:\t\t0x%X\n", v[1]); + seq_printf(sf, "SC_ENH_COEFF1:\t\t0x%X\n", v[2]); + seq_printf(sf, "SC_ENH_COEFF2:\t\t0x%X\n", v[3]); + seq_printf(sf, "SC_ENH_COEFF3:\t\t0x%X\n", v[4]); + seq_printf(sf, "SC_ENH_COEFF4:\t\t0x%X\n", v[5]); + seq_printf(sf, "SC_ENH_COEFF5:\t\t0x%X\n", v[6]); + seq_printf(sf, "SC_ENH_COEFF6:\t\t0x%X\n", v[7]); + seq_printf(sf, "SC_ENH_COEFF7:\t\t0x%X\n", v[8]); + seq_printf(sf, "SC_ENH_COEFF8:\t\t0x%X\n", v[9]); +} + +static const struct komeda_component_funcs d71_scaler_funcs = { + .update = d71_scaler_update, + .disable = d71_component_disable, + .dump_register = d71_scaler_dump, +}; + +static int d71_scaler_init(struct d71_dev *d71, + struct block_header *blk, u32 __iomem *reg) +{ + struct komeda_component *c; + struct komeda_scaler *scaler; + u32 pipe_id, comp_id; + + get_resources_id(blk->block_info, &pipe_id, &comp_id); + + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*scaler), + comp_id, BLOCK_INFO_INPUT_ID(blk->block_info), + &d71_scaler_funcs, + 1, get_valid_inputs(blk), 1, reg, + "CU%d_SCALER%d", + pipe_id, BLOCK_INFO_BLK_ID(blk->block_info)); + + if (IS_ERR(c)) { + DRM_ERROR("Failed to initialize scaler"); + return PTR_ERR(c); + } + + scaler = to_scaler(c); + set_range(&scaler->hsize, 4, __get_blk_line_size(d71, reg, 2048)); + set_range(&scaler->vsize, 4, 4096); + scaler->max_downscaling = 6; + scaler->max_upscaling = 64; + scaler->scaling_split_overlap = 8; + scaler->enh_split_overlap = 1; + + malidp_write32(c->reg, BLK_CONTROL, 0); + + return 0; +} + +static int d71_downscaling_clk_check(struct komeda_pipeline *pipe, + struct drm_display_mode *mode, + unsigned long aclk_rate, + struct komeda_data_flow_cfg *dflow) +{ + u32 h_in = dflow->in_w; + u32 v_in = dflow->in_h; + u32 v_out = dflow->out_h; + u64 fraction, denominator; + + /* D71 downscaling must satisfy the following equation + * + * ACLK h_in * v_in + * ------- >= --------------------------------------------- + * PXLCLK (h_total - (1 + 2 * v_in / v_out)) * v_out + * + * In only horizontal downscaling situation, the right side should be + * multiplied by (h_total - 3) / (h_active - 3), then equation becomes + * + * ACLK h_in + * ------- >= ---------------- + * PXLCLK (h_active - 3) + * + * To avoid precision lost the equation 1 will be convert to: + * + * ACLK h_in * v_in + * ------- >= ----------------------------------- + * PXLCLK (h_total -1 ) * v_out - 2 * v_in + */ + if (v_in == v_out) { + fraction = h_in; + denominator = mode->hdisplay - 3; + } else { + fraction = h_in * v_in; + denominator = (mode->htotal - 1) * v_out - 2 * v_in; + } + + return aclk_rate * denominator >= mode->crtc_clock * 1000 * fraction ? + 0 : -EINVAL; +} + +static void d71_splitter_update(struct komeda_component *c, + struct komeda_component_state *state) +{ + struct komeda_splitter_state *st = to_splitter_st(state); + u32 __iomem *reg = c->reg; + + malidp_write32(reg, BLK_INPUT_ID0, to_d71_input_id(state, 0)); + malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize)); + malidp_write32(reg, SP_OVERLAP_SIZE, st->overlap & 0x1FFF); + malidp_write32(reg, BLK_CONTROL, BLK_CTRL_EN); +} + +static void d71_splitter_dump(struct komeda_component *c, struct seq_file *sf) +{ + u32 v[3]; + + dump_block_header(sf, c->reg); + + get_values_from_reg(c->reg, BLK_INPUT_ID0, 1, v); + seq_printf(sf, "SP_INPUT_ID0:\t\t0x%X\n", v[0]); + + get_values_from_reg(c->reg, BLK_CONTROL, 3, v); + seq_printf(sf, "SP_CONTROL:\t\t0x%X\n", v[0]); + seq_printf(sf, "SP_SIZE:\t\t0x%X\n", v[1]); + seq_printf(sf, "SP_OVERLAP_SIZE:\t0x%X\n", v[2]); +} + +static const struct komeda_component_funcs d71_splitter_funcs = { + .update = d71_splitter_update, + .disable = d71_component_disable, + .dump_register = d71_splitter_dump, +}; + +static int d71_splitter_init(struct d71_dev *d71, + struct block_header *blk, u32 __iomem *reg) +{ + struct komeda_component *c; + struct komeda_splitter *splitter; + u32 pipe_id, comp_id; + + get_resources_id(blk->block_info, &pipe_id, &comp_id); + + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*splitter), + comp_id, + BLOCK_INFO_INPUT_ID(blk->block_info), + &d71_splitter_funcs, + 1, get_valid_inputs(blk), 2, reg, + "CU%d_SPLITTER", pipe_id); + + if (IS_ERR(c)) { + DRM_ERROR("Failed to initialize splitter"); + return -1; + } + + splitter = to_splitter(c); + + set_range(&splitter->hsize, 4, get_blk_line_size(d71, reg)); + set_range(&splitter->vsize, 4, d71->max_vsize); + + return 0; +} + +static void d71_merger_update(struct komeda_component *c, + struct komeda_component_state *state) +{ + struct komeda_merger_state *st = to_merger_st(state); + u32 __iomem *reg = c->reg; + u32 index; + + for_each_changed_input(state, index) + malidp_write32(reg, MG_INPUT_ID0 + index * 4, + to_d71_input_id(state, index)); + + malidp_write32(reg, MG_SIZE, HV_SIZE(st->hsize_merged, + st->vsize_merged)); + malidp_write32(reg, BLK_CONTROL, BLK_CTRL_EN); +} + +static void d71_merger_dump(struct komeda_component *c, struct seq_file *sf) +{ + u32 v; + + dump_block_header(sf, c->reg); + + get_values_from_reg(c->reg, MG_INPUT_ID0, 1, &v); + seq_printf(sf, "MG_INPUT_ID0:\t\t0x%X\n", v); + + get_values_from_reg(c->reg, MG_INPUT_ID1, 1, &v); + seq_printf(sf, "MG_INPUT_ID1:\t\t0x%X\n", v); + + get_values_from_reg(c->reg, BLK_CONTROL, 1, &v); + seq_printf(sf, "MG_CONTROL:\t\t0x%X\n", v); + + get_values_from_reg(c->reg, MG_SIZE, 1, &v); + seq_printf(sf, "MG_SIZE:\t\t0x%X\n", v); +} + +static const struct komeda_component_funcs d71_merger_funcs = { + .update = d71_merger_update, + .disable = d71_component_disable, + .dump_register = d71_merger_dump, +}; + +static int d71_merger_init(struct d71_dev *d71, + struct block_header *blk, u32 __iomem *reg) +{ + struct komeda_component *c; + struct komeda_merger *merger; + u32 pipe_id, comp_id; + + get_resources_id(blk->block_info, &pipe_id, &comp_id); + + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*merger), + comp_id, + BLOCK_INFO_INPUT_ID(blk->block_info), + &d71_merger_funcs, + MG_NUM_INPUTS_IDS, get_valid_inputs(blk), + MG_NUM_OUTPUTS_IDS, reg, + "CU%d_MERGER", pipe_id); + + if (IS_ERR(c)) { + DRM_ERROR("Failed to initialize merger.\n"); + return PTR_ERR(c); + } + + merger = to_merger(c); + + set_range(&merger->hsize_merged, 4, + __get_blk_line_size(d71, reg, 4032)); + set_range(&merger->vsize_merged, 4, 4096); + + return 0; +} + +static void d71_improc_update(struct komeda_component *c, + struct komeda_component_state *state) +{ + struct drm_crtc_state *crtc_st = state->crtc->state; + struct komeda_improc_state *st = to_improc_st(state); + struct d71_pipeline *pipe = to_d71_pipeline(c->pipeline); + u32 __iomem *reg = c->reg; + u32 index, mask = 0, ctrl = 0; + + for_each_changed_input(state, index) + malidp_write32(reg, BLK_INPUT_ID0 + index * 4, + to_d71_input_id(state, index)); + + malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize)); + malidp_write32(reg, IPS_DEPTH, st->color_depth); + + if (crtc_st->color_mgmt_changed) { + mask |= IPS_CTRL_FT | IPS_CTRL_RGB; + + if (crtc_st->gamma_lut) { + malidp_write_group(pipe->dou_ft_coeff_addr, FT_COEFF0, + KOMEDA_N_GAMMA_COEFFS, + st->fgamma_coeffs); + ctrl |= IPS_CTRL_FT; /* enable gamma */ + } + + if (crtc_st->ctm) { + malidp_write_group(reg, IPS_RGB_RGB_COEFF0, + KOMEDA_N_CTM_COEFFS, + st->ctm_coeffs); + ctrl |= IPS_CTRL_RGB; /* enable gamut */ + } + } + + mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420; + + /* config color format */ + if (st->color_format == DRM_COLOR_FORMAT_YCRCB420) + ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420; + else if (st->color_format == DRM_COLOR_FORMAT_YCRCB422) + ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422; + else if (st->color_format == DRM_COLOR_FORMAT_YCRCB444) + ctrl |= IPS_CTRL_YUV; + + malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl); +} + +static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf) +{ + u32 v[12], i; + + dump_block_header(sf, c->reg); + + get_values_from_reg(c->reg, 0x80, 2, v); + seq_printf(sf, "IPS_INPUT_ID0:\t\t0x%X\n", v[0]); + seq_printf(sf, "IPS_INPUT_ID1:\t\t0x%X\n", v[1]); + + get_values_from_reg(c->reg, 0xC0, 1, v); + seq_printf(sf, "IPS_INFO:\t\t0x%X\n", v[0]); + + get_values_from_reg(c->reg, 0xD0, 3, v); + seq_printf(sf, "IPS_CONTROL:\t\t0x%X\n", v[0]); + seq_printf(sf, "IPS_SIZE:\t\t0x%X\n", v[1]); + seq_printf(sf, "IPS_DEPTH:\t\t0x%X\n", v[2]); + + get_values_from_reg(c->reg, 0x130, 12, v); + for (i = 0; i < 12; i++) + seq_printf(sf, "IPS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]); + + get_values_from_reg(c->reg, 0x170, 12, v); + for (i = 0; i < 12; i++) + seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]); +} + +static const struct komeda_component_funcs d71_improc_funcs = { + .update = d71_improc_update, + .disable = d71_component_disable, + .dump_register = d71_improc_dump, +}; + +static int d71_improc_init(struct d71_dev *d71, + struct block_header *blk, u32 __iomem *reg) +{ + struct komeda_component *c; + struct komeda_improc *improc; + u32 pipe_id, comp_id, value; + + get_resources_id(blk->block_info, &pipe_id, &comp_id); + + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*improc), + comp_id, + BLOCK_INFO_INPUT_ID(blk->block_info), + &d71_improc_funcs, IPS_NUM_INPUT_IDS, + get_valid_inputs(blk), + IPS_NUM_OUTPUT_IDS, reg, "DOU%d_IPS", pipe_id); + if (IS_ERR(c)) { + DRM_ERROR("Failed to add improc component\n"); + return PTR_ERR(c); + } + + improc = to_improc(c); + improc->supported_color_depths = BIT(8) | BIT(10); + improc->supported_color_formats = DRM_COLOR_FORMAT_RGB444 | + DRM_COLOR_FORMAT_YCRCB444 | + DRM_COLOR_FORMAT_YCRCB422; + value = malidp_read32(reg, BLK_INFO); + if (value & IPS_INFO_CHD420) + improc->supported_color_formats |= DRM_COLOR_FORMAT_YCRCB420; + + improc->supports_csc = true; + improc->supports_gamma = true; + + return 0; +} + +static void d71_timing_ctrlr_disable(struct komeda_component *c) +{ + malidp_write32_mask(c->reg, BLK_CONTROL, BS_CTRL_EN, 0); +} + +static void d71_timing_ctrlr_update(struct komeda_component *c, + struct komeda_component_state *state) +{ + struct drm_crtc_state *crtc_st = state->crtc->state; + struct drm_display_mode *mode = &crtc_st->adjusted_mode; + u32 __iomem *reg = c->reg; + u32 hactive, hfront_porch, hback_porch, hsync_len; + u32 vactive, vfront_porch, vback_porch, vsync_len; + u32 value; + + hactive = mode->crtc_hdisplay; + hfront_porch = mode->crtc_hsync_start - mode->crtc_hdisplay; + hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; + hback_porch = mode->crtc_htotal - mode->crtc_hsync_end; + + vactive = mode->crtc_vdisplay; + vfront_porch = mode->crtc_vsync_start - mode->crtc_vdisplay; + vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; + vback_porch = mode->crtc_vtotal - mode->crtc_vsync_end; + + malidp_write32(reg, BS_ACTIVESIZE, HV_SIZE(hactive, vactive)); + malidp_write32(reg, BS_HINTERVALS, BS_H_INTVALS(hfront_porch, + hback_porch)); + malidp_write32(reg, BS_VINTERVALS, BS_V_INTVALS(vfront_porch, + vback_porch)); + + value = BS_SYNC_VSW(vsync_len) | BS_SYNC_HSW(hsync_len); + value |= mode->flags & DRM_MODE_FLAG_PVSYNC ? BS_SYNC_VSP : 0; + value |= mode->flags & DRM_MODE_FLAG_PHSYNC ? BS_SYNC_HSP : 0; + malidp_write32(reg, BS_SYNC, value); + + malidp_write32(reg, BS_PROG_LINE, D71_DEFAULT_PREPRETCH_LINE - 1); + malidp_write32(reg, BS_PREFETCH_LINE, D71_DEFAULT_PREPRETCH_LINE); + + /* configure bs control register */ + value = BS_CTRL_EN | BS_CTRL_VM; + if (c->pipeline->dual_link) { + malidp_write32(reg, BS_DRIFT_TO, hfront_porch + 16); + value |= BS_CTRL_DL; + } + + malidp_write32(reg, BLK_CONTROL, value); +} + +static void d71_timing_ctrlr_dump(struct komeda_component *c, + struct seq_file *sf) +{ + u32 v[8], i; + + dump_block_header(sf, c->reg); + + get_values_from_reg(c->reg, 0xC0, 1, v); + seq_printf(sf, "BS_INFO:\t\t0x%X\n", v[0]); + + get_values_from_reg(c->reg, 0xD0, 8, v); + seq_printf(sf, "BS_CONTROL:\t\t0x%X\n", v[0]); + seq_printf(sf, "BS_PROG_LINE:\t\t0x%X\n", v[1]); + seq_printf(sf, "BS_PREFETCH_LINE:\t0x%X\n", v[2]); + seq_printf(sf, "BS_BG_COLOR:\t\t0x%X\n", v[3]); + seq_printf(sf, "BS_ACTIVESIZE:\t\t0x%X\n", v[4]); + seq_printf(sf, "BS_HINTERVALS:\t\t0x%X\n", v[5]); + seq_printf(sf, "BS_VINTERVALS:\t\t0x%X\n", v[6]); + seq_printf(sf, "BS_SYNC:\t\t0x%X\n", v[7]); + + get_values_from_reg(c->reg, 0x100, 3, v); + seq_printf(sf, "BS_DRIFT_TO:\t\t0x%X\n", v[0]); + seq_printf(sf, "BS_FRAME_TO:\t\t0x%X\n", v[1]); + seq_printf(sf, "BS_TE_TO:\t\t0x%X\n", v[2]); + + get_values_from_reg(c->reg, 0x110, 3, v); + for (i = 0; i < 3; i++) + seq_printf(sf, "BS_T%u_INTERVAL:\t\t0x%X\n", i, v[i]); + + get_values_from_reg(c->reg, 0x120, 5, v); + for (i = 0; i < 2; i++) { + seq_printf(sf, "BS_CRC%u_LOW:\t\t0x%X\n", i, v[i << 1]); + seq_printf(sf, "BS_CRC%u_HIGH:\t\t0x%X\n", i, v[(i << 1) + 1]); + } + seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]); +} + +static const struct komeda_component_funcs d71_timing_ctrlr_funcs = { + .update = d71_timing_ctrlr_update, + .disable = d71_timing_ctrlr_disable, + .dump_register = d71_timing_ctrlr_dump, +}; + +static int d71_timing_ctrlr_init(struct d71_dev *d71, + struct block_header *blk, u32 __iomem *reg) +{ + struct komeda_component *c; + struct komeda_timing_ctrlr *ctrlr; + u32 pipe_id, comp_id; + + get_resources_id(blk->block_info, &pipe_id, &comp_id); + + c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*ctrlr), + KOMEDA_COMPONENT_TIMING_CTRLR, + BLOCK_INFO_INPUT_ID(blk->block_info), + &d71_timing_ctrlr_funcs, + 1, BIT(KOMEDA_COMPONENT_IPS0 + pipe_id), + BS_NUM_OUTPUT_IDS, reg, "DOU%d_BS", pipe_id); + if (IS_ERR(c)) { + DRM_ERROR("Failed to add display_ctrl component\n"); + return PTR_ERR(c); + } + + ctrlr = to_ctrlr(c); + + ctrlr->supports_dual_link = d71->supports_dual_link; + + return 0; +} + +int d71_probe_block(struct d71_dev *d71, + struct block_header *blk, u32 __iomem *reg) +{ + struct d71_pipeline *pipe; + int blk_id = BLOCK_INFO_BLK_ID(blk->block_info); + + int err = 0; + + switch (BLOCK_INFO_BLK_TYPE(blk->block_info)) { + case D71_BLK_TYPE_GCU: + break; + + case D71_BLK_TYPE_LPU: + pipe = d71->pipes[blk_id]; + pipe->lpu_addr = reg; + break; + + case D71_BLK_TYPE_LPU_LAYER: + err = d71_layer_init(d71, blk, reg); + break; + + case D71_BLK_TYPE_LPU_WB_LAYER: + err = d71_wb_layer_init(d71, blk, reg); + break; + + case D71_BLK_TYPE_CU: + pipe = d71->pipes[blk_id]; + pipe->cu_addr = reg; + err = d71_compiz_init(d71, blk, reg); + break; + + case D71_BLK_TYPE_CU_SCALER: + err = d71_scaler_init(d71, blk, reg); + break; + + case D71_BLK_TYPE_CU_SPLITTER: + err = d71_splitter_init(d71, blk, reg); + break; + + case D71_BLK_TYPE_CU_MERGER: + err = d71_merger_init(d71, blk, reg); + break; + + case D71_BLK_TYPE_DOU: + pipe = d71->pipes[blk_id]; + pipe->dou_addr = reg; + break; + + case D71_BLK_TYPE_DOU_IPS: + err = d71_improc_init(d71, blk, reg); + break; + + case D71_BLK_TYPE_DOU_FT_COEFF: + pipe = d71->pipes[blk_id]; + pipe->dou_ft_coeff_addr = reg; + break; + + case D71_BLK_TYPE_DOU_BS: + err = d71_timing_ctrlr_init(d71, blk, reg); + break; + + case D71_BLK_TYPE_GLB_LT_COEFF: + break; + + case D71_BLK_TYPE_GLB_SCL_COEFF: + d71->glb_scl_coeff_addr[blk_id] = reg; + break; + + default: + DRM_ERROR("Unknown block (block_info: 0x%x) is found\n", + blk->block_info); + err = -EINVAL; + break; + } + + return err; +} + +static void d71_gcu_dump(struct d71_dev *d71, struct seq_file *sf) +{ + u32 v[5]; + + seq_puts(sf, "\n------ GCU ------\n"); + + get_values_from_reg(d71->gcu_addr, 0, 3, v); + seq_printf(sf, "GLB_ARCH_ID:\t\t0x%X\n", v[0]); + seq_printf(sf, "GLB_CORE_ID:\t\t0x%X\n", v[1]); + seq_printf(sf, "GLB_CORE_INFO:\t\t0x%X\n", v[2]); + + get_values_from_reg(d71->gcu_addr, 0x10, 1, v); + seq_printf(sf, "GLB_IRQ_STATUS:\t\t0x%X\n", v[0]); + + get_values_from_reg(d71->gcu_addr, 0xA0, 5, v); + seq_printf(sf, "GCU_IRQ_RAW_STATUS:\t0x%X\n", v[0]); + seq_printf(sf, "GCU_IRQ_CLEAR:\t\t0x%X\n", v[1]); + seq_printf(sf, "GCU_IRQ_MASK:\t\t0x%X\n", v[2]); + seq_printf(sf, "GCU_IRQ_STATUS:\t\t0x%X\n", v[3]); + seq_printf(sf, "GCU_STATUS:\t\t0x%X\n", v[4]); + + get_values_from_reg(d71->gcu_addr, 0xD0, 3, v); + seq_printf(sf, "GCU_CONTROL:\t\t0x%X\n", v[0]); + seq_printf(sf, "GCU_CONFIG_VALID0:\t0x%X\n", v[1]); + seq_printf(sf, "GCU_CONFIG_VALID1:\t0x%X\n", v[2]); +} + +static void d71_lpu_dump(struct d71_pipeline *pipe, struct seq_file *sf) +{ + u32 v[6]; + + seq_printf(sf, "\n------ LPU%d ------\n", pipe->base.id); + + dump_block_header(sf, pipe->lpu_addr); + + get_values_from_reg(pipe->lpu_addr, 0xA0, 6, v); + seq_printf(sf, "LPU_IRQ_RAW_STATUS:\t0x%X\n", v[0]); + seq_printf(sf, "LPU_IRQ_CLEAR:\t\t0x%X\n", v[1]); + seq_printf(sf, "LPU_IRQ_MASK:\t\t0x%X\n", v[2]); + seq_printf(sf, "LPU_IRQ_STATUS:\t\t0x%X\n", v[3]); + seq_printf(sf, "LPU_STATUS:\t\t0x%X\n", v[4]); + seq_printf(sf, "LPU_TBU_STATUS:\t\t0x%X\n", v[5]); + + get_values_from_reg(pipe->lpu_addr, 0xC0, 1, v); + seq_printf(sf, "LPU_INFO:\t\t0x%X\n", v[0]); + + get_values_from_reg(pipe->lpu_addr, 0xD0, 3, v); + seq_printf(sf, "LPU_RAXI_CONTROL:\t0x%X\n", v[0]); + seq_printf(sf, "LPU_WAXI_CONTROL:\t0x%X\n", v[1]); + seq_printf(sf, "LPU_TBU_CONTROL:\t0x%X\n", v[2]); +} + +static void d71_dou_dump(struct d71_pipeline *pipe, struct seq_file *sf) +{ + u32 v[5]; + + seq_printf(sf, "\n------ DOU%d ------\n", pipe->base.id); + + dump_block_header(sf, pipe->dou_addr); + + get_values_from_reg(pipe->dou_addr, 0xA0, 5, v); + seq_printf(sf, "DOU_IRQ_RAW_STATUS:\t0x%X\n", v[0]); + seq_printf(sf, "DOU_IRQ_CLEAR:\t\t0x%X\n", v[1]); + seq_printf(sf, "DOU_IRQ_MASK:\t\t0x%X\n", v[2]); + seq_printf(sf, "DOU_IRQ_STATUS:\t\t0x%X\n", v[3]); + seq_printf(sf, "DOU_STATUS:\t\t0x%X\n", v[4]); +} + +static void d71_pipeline_dump(struct komeda_pipeline *pipe, struct seq_file *sf) +{ + struct d71_pipeline *d71_pipe = to_d71_pipeline(pipe); + + d71_lpu_dump(d71_pipe, sf); + d71_dou_dump(d71_pipe, sf); +} + +const struct komeda_pipeline_funcs d71_pipeline_funcs = { + .downscaling_clk_check = d71_downscaling_clk_check, + .dump_register = d71_pipeline_dump, +}; + +void d71_dump(struct komeda_dev *mdev, struct seq_file *sf) +{ + struct d71_dev *d71 = mdev->chip_data; + + d71_gcu_dump(d71, sf); +} diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c new file mode 100644 index 000000000..00fa56c29 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c @@ -0,0 +1,643 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ + +#include <drm/drm_print.h> +#include "d71_dev.h" +#include "malidp_io.h" + +static u64 get_lpu_event(struct d71_pipeline *d71_pipeline) +{ + u32 __iomem *reg = d71_pipeline->lpu_addr; + u32 status, raw_status; + u64 evts = 0ULL; + + raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS); + if (raw_status & LPU_IRQ_IBSY) + evts |= KOMEDA_EVENT_IBSY; + if (raw_status & LPU_IRQ_EOW) + evts |= KOMEDA_EVENT_EOW; + if (raw_status & LPU_IRQ_OVR) + evts |= KOMEDA_EVENT_OVR; + + if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY | LPU_IRQ_OVR)) { + u32 restore = 0, tbu_status; + /* Check error of LPU status */ + status = malidp_read32(reg, BLK_STATUS); + if (status & LPU_STATUS_AXIE) { + restore |= LPU_STATUS_AXIE; + evts |= KOMEDA_ERR_AXIE; + } + if (status & LPU_STATUS_ACE0) { + restore |= LPU_STATUS_ACE0; + evts |= KOMEDA_ERR_ACE0; + } + if (status & LPU_STATUS_ACE1) { + restore |= LPU_STATUS_ACE1; + evts |= KOMEDA_ERR_ACE1; + } + if (status & LPU_STATUS_ACE2) { + restore |= LPU_STATUS_ACE2; + evts |= KOMEDA_ERR_ACE2; + } + if (status & LPU_STATUS_ACE3) { + restore |= LPU_STATUS_ACE3; + evts |= KOMEDA_ERR_ACE3; + } + if (status & LPU_STATUS_FEMPTY) { + restore |= LPU_STATUS_FEMPTY; + evts |= KOMEDA_EVENT_EMPTY; + } + if (status & LPU_STATUS_FFULL) { + restore |= LPU_STATUS_FFULL; + evts |= KOMEDA_EVENT_FULL; + } + + if (restore != 0) + malidp_write32_mask(reg, BLK_STATUS, restore, 0); + + restore = 0; + /* Check errors of TBU status */ + tbu_status = malidp_read32(reg, LPU_TBU_STATUS); + if (tbu_status & LPU_TBU_STATUS_TCF) { + restore |= LPU_TBU_STATUS_TCF; + evts |= KOMEDA_ERR_TCF; + } + if (tbu_status & LPU_TBU_STATUS_TTNG) { + restore |= LPU_TBU_STATUS_TTNG; + evts |= KOMEDA_ERR_TTNG; + } + if (tbu_status & LPU_TBU_STATUS_TITR) { + restore |= LPU_TBU_STATUS_TITR; + evts |= KOMEDA_ERR_TITR; + } + if (tbu_status & LPU_TBU_STATUS_TEMR) { + restore |= LPU_TBU_STATUS_TEMR; + evts |= KOMEDA_ERR_TEMR; + } + if (tbu_status & LPU_TBU_STATUS_TTF) { + restore |= LPU_TBU_STATUS_TTF; + evts |= KOMEDA_ERR_TTF; + } + if (restore != 0) + malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0); + } + + malidp_write32(reg, BLK_IRQ_CLEAR, raw_status); + return evts; +} + +static u64 get_cu_event(struct d71_pipeline *d71_pipeline) +{ + u32 __iomem *reg = d71_pipeline->cu_addr; + u32 status, raw_status; + u64 evts = 0ULL; + + raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS); + if (raw_status & CU_IRQ_OVR) + evts |= KOMEDA_EVENT_OVR; + + if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) { + status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF; + if (status & CU_STATUS_CPE) + evts |= KOMEDA_ERR_CPE; + if (status & CU_STATUS_ZME) + evts |= KOMEDA_ERR_ZME; + if (status & CU_STATUS_CFGE) + evts |= KOMEDA_ERR_CFGE; + if (status) + malidp_write32_mask(reg, BLK_STATUS, status, 0); + } + + malidp_write32(reg, BLK_IRQ_CLEAR, raw_status); + + return evts; +} + +static u64 get_dou_event(struct d71_pipeline *d71_pipeline) +{ + u32 __iomem *reg = d71_pipeline->dou_addr; + u32 status, raw_status; + u64 evts = 0ULL; + + raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS); + if (raw_status & DOU_IRQ_PL0) + evts |= KOMEDA_EVENT_VSYNC; + if (raw_status & DOU_IRQ_UND) + evts |= KOMEDA_EVENT_URUN; + + if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) { + u32 restore = 0; + + status = malidp_read32(reg, BLK_STATUS); + if (status & DOU_STATUS_DRIFTTO) { + restore |= DOU_STATUS_DRIFTTO; + evts |= KOMEDA_ERR_DRIFTTO; + } + if (status & DOU_STATUS_FRAMETO) { + restore |= DOU_STATUS_FRAMETO; + evts |= KOMEDA_ERR_FRAMETO; + } + if (status & DOU_STATUS_TETO) { + restore |= DOU_STATUS_TETO; + evts |= KOMEDA_ERR_TETO; + } + if (status & DOU_STATUS_CSCE) { + restore |= DOU_STATUS_CSCE; + evts |= KOMEDA_ERR_CSCE; + } + + if (restore != 0) + malidp_write32_mask(reg, BLK_STATUS, restore, 0); + } + + malidp_write32(reg, BLK_IRQ_CLEAR, raw_status); + return evts; +} + +static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status) +{ + u32 evts = 0ULL; + + if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1)) + evts |= get_lpu_event(d71_pipeline); + + if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1)) + evts |= get_cu_event(d71_pipeline); + + if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1)) + evts |= get_dou_event(d71_pipeline); + + return evts; +} + +static irqreturn_t +d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts) +{ + struct d71_dev *d71 = mdev->chip_data; + u32 status, gcu_status, raw_status; + + gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS); + + if (gcu_status & GLB_IRQ_STATUS_GCU) { + raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS); + if (raw_status & GCU_IRQ_CVAL0) + evts->pipes[0] |= KOMEDA_EVENT_FLIP; + if (raw_status & GCU_IRQ_CVAL1) + evts->pipes[1] |= KOMEDA_EVENT_FLIP; + if (raw_status & GCU_IRQ_ERR) { + status = malidp_read32(d71->gcu_addr, BLK_STATUS); + if (status & GCU_STATUS_MERR) { + evts->global |= KOMEDA_ERR_MERR; + malidp_write32_mask(d71->gcu_addr, BLK_STATUS, + GCU_STATUS_MERR, 0); + } + } + + malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status); + } + + if (gcu_status & GLB_IRQ_STATUS_PIPE0) + evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status); + + if (gcu_status & GLB_IRQ_STATUS_PIPE1) + evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status); + + return IRQ_RETVAL(gcu_status); +} + +#define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \ + GCU_IRQ_MODE | GCU_IRQ_ERR) +#define ENABLED_LPU_IRQS (LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW) +#define ENABLED_CU_IRQS (CU_IRQ_OVR | CU_IRQ_ERR) +#define ENABLED_DOU_IRQS (DOU_IRQ_UND | DOU_IRQ_ERR) + +static int d71_enable_irq(struct komeda_dev *mdev) +{ + struct d71_dev *d71 = mdev->chip_data; + struct d71_pipeline *pipe; + u32 i; + + malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, + ENABLED_GCU_IRQS, ENABLED_GCU_IRQS); + for (i = 0; i < d71->num_pipelines; i++) { + pipe = d71->pipes[i]; + malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK, + ENABLED_CU_IRQS, ENABLED_CU_IRQS); + malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK, + ENABLED_LPU_IRQS, ENABLED_LPU_IRQS); + malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK, + ENABLED_DOU_IRQS, ENABLED_DOU_IRQS); + } + return 0; +} + +static int d71_disable_irq(struct komeda_dev *mdev) +{ + struct d71_dev *d71 = mdev->chip_data; + struct d71_pipeline *pipe; + u32 i; + + malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0); + for (i = 0; i < d71->num_pipelines; i++) { + pipe = d71->pipes[i]; + malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK, + ENABLED_CU_IRQS, 0); + malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK, + ENABLED_LPU_IRQS, 0); + malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK, + ENABLED_DOU_IRQS, 0); + } + return 0; +} + +static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on) +{ + struct d71_dev *d71 = mdev->chip_data; + struct d71_pipeline *pipe = d71->pipes[master_pipe]; + + malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK, + DOU_IRQ_PL0, on ? DOU_IRQ_PL0 : 0); +} + +static int to_d71_opmode(int core_mode) +{ + switch (core_mode) { + case KOMEDA_MODE_DISP0: + return DO0_ACTIVE_MODE; + case KOMEDA_MODE_DISP1: + return DO1_ACTIVE_MODE; + case KOMEDA_MODE_DUAL_DISP: + return DO01_ACTIVE_MODE; + case KOMEDA_MODE_INACTIVE: + return INACTIVE_MODE; + default: + WARN(1, "Unknown operation mode"); + return INACTIVE_MODE; + } +} + +static int d71_change_opmode(struct komeda_dev *mdev, int new_mode) +{ + struct d71_dev *d71 = mdev->chip_data; + u32 opmode = to_d71_opmode(new_mode); + int ret; + + malidp_write32_mask(d71->gcu_addr, BLK_CONTROL, 0x7, opmode); + + ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode), + 100, 1000, 10000); + + return ret; +} + +static void d71_flush(struct komeda_dev *mdev, + int master_pipe, u32 active_pipes) +{ + struct d71_dev *d71 = mdev->chip_data; + u32 reg_offset = (master_pipe == 0) ? + GCU_CONFIG_VALID0 : GCU_CONFIG_VALID1; + + malidp_write32(d71->gcu_addr, reg_offset, GCU_CONFIG_CVAL); +} + +static int d71_reset(struct d71_dev *d71) +{ + u32 __iomem *gcu = d71->gcu_addr; + int ret; + + malidp_write32_mask(gcu, BLK_CONTROL, + GCU_CONTROL_SRST, GCU_CONTROL_SRST); + + ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST), + 100, 1000, 10000); + + return ret; +} + +void d71_read_block_header(u32 __iomem *reg, struct block_header *blk) +{ + int i; + + blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO); + if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED) + return; + + blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO); + + /* get valid input and output ids */ + for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++) + blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0); + for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++) + blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0); +} + +static void d71_cleanup(struct komeda_dev *mdev) +{ + struct d71_dev *d71 = mdev->chip_data; + + if (!d71) + return; + + devm_kfree(mdev->dev, d71); + mdev->chip_data = NULL; +} + +static int d71_enum_resources(struct komeda_dev *mdev) +{ + struct d71_dev *d71; + struct komeda_pipeline *pipe; + struct block_header blk; + u32 __iomem *blk_base; + u32 i, value, offset; + int err; + + d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL); + if (!d71) + return -ENOMEM; + + mdev->chip_data = d71; + d71->mdev = mdev; + d71->gcu_addr = mdev->reg_base; + d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2); + + err = d71_reset(d71); + if (err) { + DRM_ERROR("Fail to reset d71 device.\n"); + goto err_cleanup; + } + + /* probe GCU */ + value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO); + d71->num_blocks = value & 0xFF; + d71->num_pipelines = (value >> 8) & 0x7; + + if (d71->num_pipelines > D71_MAX_PIPELINE) { + DRM_ERROR("d71 supports %d pipelines, but got: %d.\n", + D71_MAX_PIPELINE, d71->num_pipelines); + err = -EINVAL; + goto err_cleanup; + } + + /* Only the legacy HW has the periph block, the newer merges the periph + * into GCU + */ + value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO); + if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH) + d71->periph_addr = NULL; + + if (d71->periph_addr) { + /* probe PERIPHERAL in legacy HW */ + value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID); + + d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048; + d71->max_vsize = 4096; + d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1; + d71->supports_dual_link = !!(value & PERIPH_SPLIT_EN); + d71->integrates_tbu = !!(value & PERIPH_TBU_EN); + } else { + value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID0); + d71->max_line_size = GCU_MAX_LINE_SIZE(value); + d71->max_vsize = GCU_MAX_NUM_LINES(value); + + value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID1); + d71->num_rich_layers = GCU_NUM_RICH_LAYERS(value); + d71->supports_dual_link = GCU_DISPLAY_SPLIT_EN(value); + d71->integrates_tbu = GCU_DISPLAY_TBU_EN(value); + } + + for (i = 0; i < d71->num_pipelines; i++) { + pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline), + &d71_pipeline_funcs); + if (IS_ERR(pipe)) { + err = PTR_ERR(pipe); + goto err_cleanup; + } + + /* D71 HW doesn't update shadow registers when display output + * is turning off, so when we disable all pipeline components + * together with display output disable by one flush or one + * operation, the disable operation updated registers will not + * be flush to or valid in HW, which may leads problem. + * To workaround this problem, introduce a two phase disable. + * Phase1: Disabling components with display is on to make sure + * the disable can be flushed to HW. + * Phase2: Only turn-off display output. + */ + value = KOMEDA_PIPELINE_IMPROCS | + BIT(KOMEDA_COMPONENT_TIMING_CTRLR); + + pipe->standalone_disabled_comps = value; + + d71->pipes[i] = to_d71_pipeline(pipe); + } + + /* loop the register blks and probe. + * NOTE: d71->num_blocks includes reserved blocks. + * d71->num_blocks = GCU + valid blocks + reserved blocks + */ + i = 1; /* exclude GCU */ + offset = D71_BLOCK_SIZE; /* skip GCU */ + while (i < d71->num_blocks) { + blk_base = mdev->reg_base + (offset >> 2); + + d71_read_block_header(blk_base, &blk); + if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) { + err = d71_probe_block(d71, &blk, blk_base); + if (err) + goto err_cleanup; + } + + i++; + offset += D71_BLOCK_SIZE; + } + + DRM_DEBUG("total %d (out of %d) blocks are found.\n", + i, d71->num_blocks); + + return 0; + +err_cleanup: + d71_cleanup(mdev); + return err; +} + +#define __HW_ID(__group, __format) \ + ((((__group) & 0x7) << 3) | ((__format) & 0x7)) + +#define RICH KOMEDA_FMT_RICH_LAYER +#define SIMPLE KOMEDA_FMT_SIMPLE_LAYER +#define RICH_SIMPLE (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_SIMPLE_LAYER) +#define RICH_WB (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_WB_LAYER) +#define RICH_SIMPLE_WB (RICH_SIMPLE | KOMEDA_FMT_WB_LAYER) + +#define Rot_0 DRM_MODE_ROTATE_0 +#define Flip_H_V (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y | Rot_0) +#define Rot_ALL_H_V (DRM_MODE_ROTATE_MASK | Flip_H_V) + +#define LYT_NM BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16) +#define LYT_WB BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) +#define LYT_NM_WB (LYT_NM | LYT_WB) + +#define AFB_TH AFBC(_TILED | _SPARSE) +#define AFB_TH_SC_YTR AFBC(_TILED | _SC | _SPARSE | _YTR) +#define AFB_TH_SC_YTR_BS AFBC(_TILED | _SC | _SPARSE | _YTR | _SPLIT) + +static struct komeda_format_caps d71_format_caps_table[] = { + /* HW_ID | fourcc | layer_types | rots | afbc_layouts | afbc_features */ + /* ABGR_2101010*/ + {__HW_ID(0, 0), DRM_FORMAT_ARGB2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */ + {__HW_ID(0, 2), DRM_FORMAT_RGBA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(0, 3), DRM_FORMAT_BGRA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + /* ABGR_8888*/ + {__HW_ID(1, 0), DRM_FORMAT_ARGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */ + {__HW_ID(1, 2), DRM_FORMAT_RGBA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(1, 3), DRM_FORMAT_BGRA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + /* XBGB_8888 */ + {__HW_ID(2, 0), DRM_FORMAT_XRGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(2, 1), DRM_FORMAT_XBGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(2, 2), DRM_FORMAT_RGBX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(2, 3), DRM_FORMAT_BGRX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + /* BGR_888 */ /* none-afbc RGB888 doesn't support rotation and flip */ + {__HW_ID(3, 0), DRM_FORMAT_RGB888, RICH_SIMPLE_WB, Rot_0, 0, 0}, + {__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE_WB, Rot_0, 0, 0}, + {__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */ + /* BGR 16bpp */ + {__HW_ID(4, 0), DRM_FORMAT_RGBA5551, RICH_SIMPLE, Flip_H_V, 0, 0}, + {__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Flip_H_V, 0, 0}, + {__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */ + {__HW_ID(4, 2), DRM_FORMAT_RGB565, RICH_SIMPLE, Flip_H_V, 0, 0}, + {__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Flip_H_V, 0, 0}, + {__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */ + {__HW_ID(4, 4), DRM_FORMAT_R8, SIMPLE, Rot_0, 0, 0}, + /* YUV 444/422/420 8bit */ + {__HW_ID(5, 1), DRM_FORMAT_YUYV, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */ + {__HW_ID(5, 2), DRM_FORMAT_YUYV, RICH, Flip_H_V, 0, 0}, + {__HW_ID(5, 3), DRM_FORMAT_UYVY, RICH, Flip_H_V, 0, 0}, + {__HW_ID(5, 6), DRM_FORMAT_NV12, RICH, Flip_H_V, 0, 0}, + {__HW_ID(5, 6), DRM_FORMAT_YUV420_8BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */ + {__HW_ID(5, 7), DRM_FORMAT_YUV420, RICH, Flip_H_V, 0, 0}, + /* YUV 10bit*/ + {__HW_ID(6, 6), DRM_FORMAT_X0L2, RICH, Flip_H_V, 0, 0}, + {__HW_ID(6, 7), DRM_FORMAT_P010, RICH, Flip_H_V, 0, 0}, + {__HW_ID(6, 7), DRM_FORMAT_YUV420_10BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, +}; + +static bool d71_format_mod_supported(const struct komeda_format_caps *caps, + u32 layer_type, u64 modifier, u32 rot) +{ + uint64_t layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK; + + if ((layout == AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) && + drm_rotation_90_or_270(rot)) { + DRM_DEBUG_ATOMIC("D71 doesn't support ROT90 for WB-AFBC.\n"); + return false; + } + + return true; +} + +static void d71_init_fmt_tbl(struct komeda_dev *mdev) +{ + struct komeda_format_caps_table *table = &mdev->fmt_tbl; + + table->format_caps = d71_format_caps_table; + table->format_mod_supported = d71_format_mod_supported; + table->n_formats = ARRAY_SIZE(d71_format_caps_table); +} + +static int d71_connect_iommu(struct komeda_dev *mdev) +{ + struct d71_dev *d71 = mdev->chip_data; + u32 __iomem *reg = d71->gcu_addr; + u32 check_bits = (d71->num_pipelines == 2) ? + GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0; + int i, ret; + + if (!d71->integrates_tbu) + return -1; + + malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_CONNECT_MODE); + + ret = dp_wait_cond(has_bits(check_bits, malidp_read32(reg, BLK_STATUS)), + 100, 1000, 1000); + if (ret < 0) { + DRM_ERROR("timed out connecting to TCU!\n"); + malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE); + return ret; + } + + for (i = 0; i < d71->num_pipelines; i++) + malidp_write32_mask(d71->pipes[i]->lpu_addr, LPU_TBU_CONTROL, + LPU_TBU_CTRL_TLBPEN, LPU_TBU_CTRL_TLBPEN); + return 0; +} + +static int d71_disconnect_iommu(struct komeda_dev *mdev) +{ + struct d71_dev *d71 = mdev->chip_data; + u32 __iomem *reg = d71->gcu_addr; + u32 check_bits = (d71->num_pipelines == 2) ? + GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0; + int ret; + + malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_DISCONNECT_MODE); + + ret = dp_wait_cond(((malidp_read32(reg, BLK_STATUS) & check_bits) == 0), + 100, 1000, 1000); + if (ret < 0) { + DRM_ERROR("timed out disconnecting from TCU!\n"); + malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE); + } + + return ret; +} + +static const struct komeda_dev_funcs d71_chip_funcs = { + .init_format_table = d71_init_fmt_tbl, + .enum_resources = d71_enum_resources, + .cleanup = d71_cleanup, + .irq_handler = d71_irq_handler, + .enable_irq = d71_enable_irq, + .disable_irq = d71_disable_irq, + .on_off_vblank = d71_on_off_vblank, + .change_opmode = d71_change_opmode, + .flush = d71_flush, + .connect_iommu = d71_connect_iommu, + .disconnect_iommu = d71_disconnect_iommu, + .dump_register = d71_dump, +}; + +const struct komeda_dev_funcs * +d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip) +{ + const struct komeda_dev_funcs *funcs; + u32 product_id; + + chip->core_id = malidp_read32(reg_base, GLB_CORE_ID); + + product_id = MALIDP_CORE_ID_PRODUCT_ID(chip->core_id); + + switch (product_id) { + case MALIDP_D71_PRODUCT_ID: + case MALIDP_D32_PRODUCT_ID: + funcs = &d71_chip_funcs; + break; + default: + DRM_ERROR("Unsupported product: 0x%x\n", product_id); + return NULL; + } + + chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID); + chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO); + chip->bus_width = D71_BUS_WIDTH_16_BYTES; + + return funcs; +} diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h new file mode 100644 index 000000000..c7357c2b9 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#ifndef _D71_DEV_H_ +#define _D71_DEV_H_ + +#include "komeda_dev.h" +#include "komeda_pipeline.h" +#include "d71_regs.h" + +struct d71_pipeline { + struct komeda_pipeline base; + + /* d71 private pipeline blocks */ + u32 __iomem *lpu_addr; + u32 __iomem *cu_addr; + u32 __iomem *dou_addr; + u32 __iomem *dou_ft_coeff_addr; /* forward transform coeffs table */ +}; + +struct d71_dev { + struct komeda_dev *mdev; + + int num_blocks; + int num_pipelines; + int num_rich_layers; + u32 max_line_size; + u32 max_vsize; + u32 supports_dual_link : 1; + u32 integrates_tbu : 1; + + /* global register blocks */ + u32 __iomem *gcu_addr; + /* scaling coeffs table */ + u32 __iomem *glb_scl_coeff_addr[D71_MAX_GLB_SCL_COEFF]; + u32 __iomem *periph_addr; + + struct d71_pipeline *pipes[D71_MAX_PIPELINE]; +}; + +#define to_d71_pipeline(x) container_of(x, struct d71_pipeline, base) + +extern const struct komeda_pipeline_funcs d71_pipeline_funcs; + +int d71_probe_block(struct d71_dev *d71, + struct block_header *blk, u32 __iomem *reg); +void d71_read_block_header(u32 __iomem *reg, struct block_header *blk); + +void d71_dump(struct komeda_dev *mdev, struct seq_file *sf); + +#endif /* !_D71_DEV_H_ */ diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h new file mode 100644 index 000000000..e80172a0b --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h @@ -0,0 +1,541 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#ifndef _D71_REG_H_ +#define _D71_REG_H_ + +/* Common block registers offset */ +#define BLK_BLOCK_INFO 0x000 +#define BLK_PIPELINE_INFO 0x004 +#define BLK_MAX_LINE_SIZE 0x008 +#define BLK_VALID_INPUT_ID0 0x020 +#define BLK_OUTPUT_ID0 0x060 +#define BLK_INPUT_ID0 0x080 +#define BLK_IRQ_RAW_STATUS 0x0A0 +#define BLK_IRQ_CLEAR 0x0A4 +#define BLK_IRQ_MASK 0x0A8 +#define BLK_IRQ_STATUS 0x0AC +#define BLK_STATUS 0x0B0 +#define BLK_INFO 0x0C0 +#define BLK_CONTROL 0x0D0 +#define BLK_SIZE 0x0D4 +#define BLK_IN_SIZE 0x0E0 + +#define BLK_P0_PTR_LOW 0x100 +#define BLK_P0_PTR_HIGH 0x104 +#define BLK_P0_STRIDE 0x108 +#define BLK_P1_PTR_LOW 0x110 +#define BLK_P1_PTR_HIGH 0x114 +#define BLK_P1_STRIDE 0x118 +#define BLK_P2_PTR_LOW 0x120 +#define BLK_P2_PTR_HIGH 0x124 + +#define BLOCK_INFO_N_SUBBLKS(x) ((x) & 0x000F) +#define BLOCK_INFO_BLK_ID(x) (((x) & 0x00F0) >> 4) +#define BLOCK_INFO_BLK_TYPE(x) (((x) & 0xFF00) >> 8) +#define BLOCK_INFO_INPUT_ID(x) ((x) & 0xFFF0) +#define BLOCK_INFO_TYPE_ID(x) (((x) & 0x0FF0) >> 4) + +#define PIPELINE_INFO_N_OUTPUTS(x) ((x) & 0x000F) +#define PIPELINE_INFO_N_VALID_INPUTS(x) (((x) & 0x0F00) >> 8) + +/* Common block control register bits */ +#define BLK_CTRL_EN BIT(0) +/* Common size macro */ +#define HV_SIZE(h, v) (((h) & 0x1FFF) + (((v) & 0x1FFF) << 16)) +#define HV_OFFSET(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16)) +#define HV_CROP(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16)) + +/* AD_CONTROL register */ +#define AD_CONTROL 0x160 + +/* AD_CONTROL register bits */ +#define AD_AEN BIT(0) +#define AD_YT BIT(1) +#define AD_BS BIT(2) +#define AD_WB BIT(3) +#define AD_TH BIT(4) + +/* Global Control Unit */ +#define GLB_ARCH_ID 0x000 +#define GLB_CORE_ID 0x004 +#define GLB_CORE_INFO 0x008 +#define GLB_IRQ_STATUS 0x010 + +#define GCU_CONFIG_VALID0 0x0D4 +#define GCU_CONFIG_VALID1 0x0D8 + +/* GCU_CONTROL_BITS */ +#define GCU_CONTROL_MODE(x) ((x) & 0x7) +#define GCU_CONTROL_SRST BIT(16) + +/* GCU_CONFIGURATION registers */ +#define GCU_CONFIGURATION_ID0 0x100 +#define GCU_CONFIGURATION_ID1 0x104 + +/* GCU configuration */ +#define GCU_MAX_LINE_SIZE(x) ((x) & 0xFFFF) +#define GCU_MAX_NUM_LINES(x) ((x) >> 16) +#define GCU_NUM_RICH_LAYERS(x) ((x) & 0x7) +#define GCU_NUM_PIPELINES(x) (((x) >> 3) & 0x7) +#define GCU_NUM_SCALERS(x) (((x) >> 6) & 0x7) +#define GCU_DISPLAY_SPLIT_EN(x) (((x) >> 16) & 0x1) +#define GCU_DISPLAY_TBU_EN(x) (((x) >> 17) & 0x1) + +/* GCU opmode */ +#define INACTIVE_MODE 0 +#define TBU_CONNECT_MODE 1 +#define TBU_DISCONNECT_MODE 2 +#define DO0_ACTIVE_MODE 3 +#define DO1_ACTIVE_MODE 4 +#define DO01_ACTIVE_MODE 5 + +/* GLB_IRQ_STATUS bits */ +#define GLB_IRQ_STATUS_GCU BIT(0) +#define GLB_IRQ_STATUS_LPU0 BIT(8) +#define GLB_IRQ_STATUS_LPU1 BIT(9) +#define GLB_IRQ_STATUS_ATU0 BIT(10) +#define GLB_IRQ_STATUS_ATU1 BIT(11) +#define GLB_IRQ_STATUS_ATU2 BIT(12) +#define GLB_IRQ_STATUS_ATU3 BIT(13) +#define GLB_IRQ_STATUS_CU0 BIT(16) +#define GLB_IRQ_STATUS_CU1 BIT(17) +#define GLB_IRQ_STATUS_DOU0 BIT(24) +#define GLB_IRQ_STATUS_DOU1 BIT(25) + +#define GLB_IRQ_STATUS_PIPE0 (GLB_IRQ_STATUS_LPU0 |\ + GLB_IRQ_STATUS_ATU0 |\ + GLB_IRQ_STATUS_ATU1 |\ + GLB_IRQ_STATUS_CU0 |\ + GLB_IRQ_STATUS_DOU0) + +#define GLB_IRQ_STATUS_PIPE1 (GLB_IRQ_STATUS_LPU1 |\ + GLB_IRQ_STATUS_ATU2 |\ + GLB_IRQ_STATUS_ATU3 |\ + GLB_IRQ_STATUS_CU1 |\ + GLB_IRQ_STATUS_DOU1) + +#define GLB_IRQ_STATUS_ATU (GLB_IRQ_STATUS_ATU0 |\ + GLB_IRQ_STATUS_ATU1 |\ + GLB_IRQ_STATUS_ATU2 |\ + GLB_IRQ_STATUS_ATU3) + +/* GCU_IRQ_BITS */ +#define GCU_IRQ_CVAL0 BIT(0) +#define GCU_IRQ_CVAL1 BIT(1) +#define GCU_IRQ_MODE BIT(4) +#define GCU_IRQ_ERR BIT(11) + +/* GCU_STATUS_BITS */ +#define GCU_STATUS_MODE(x) ((x) & 0x7) +#define GCU_STATUS_MERR BIT(4) +#define GCU_STATUS_TCS0 BIT(8) +#define GCU_STATUS_TCS1 BIT(9) +#define GCU_STATUS_ACTIVE BIT(31) + +/* GCU_CONFIG_VALIDx BITS */ +#define GCU_CONFIG_CVAL BIT(0) + +/* PERIPHERAL registers */ +#define PERIPH_MAX_LINE_SIZE BIT(0) +#define PERIPH_NUM_RICH_LAYERS BIT(4) +#define PERIPH_SPLIT_EN BIT(8) +#define PERIPH_TBU_EN BIT(12) +#define PERIPH_AFBC_DMA_EN BIT(16) +#define PERIPH_CONFIGURATION_ID 0x1D4 + +/* LPU register */ +#define LPU_TBU_STATUS 0x0B4 +#define LPU_RAXI_CONTROL 0x0D0 +#define LPU_WAXI_CONTROL 0x0D4 +#define LPU_TBU_CONTROL 0x0D8 + +/* LPU_xAXI_CONTROL_BITS */ +#define TO_RAXI_AOUTSTDCAPB(x) (x) +#define TO_RAXI_BOUTSTDCAPB(x) ((x) << 8) +#define TO_RAXI_BEN(x) ((x) << 15) +#define TO_xAXI_BURSTLEN(x) ((x) << 16) +#define TO_xAXI_AxQOS(x) ((x) << 24) +#define TO_xAXI_ORD(x) ((x) << 31) +#define TO_WAXI_OUTSTDCAPB(x) (x) + +#define RAXI_AOUTSTDCAPB_MASK 0x7F +#define RAXI_BOUTSTDCAPB_MASK 0x7F00 +#define RAXI_BEN_MASK BIT(15) +#define xAXI_BURSTLEN_MASK 0x3F0000 +#define xAXI_AxQOS_MASK 0xF000000 +#define xAXI_ORD_MASK BIT(31) +#define WAXI_OUTSTDCAPB_MASK 0x3F + +/* LPU_TBU_CONTROL BITS */ +#define TO_TBU_DOUTSTDCAPB(x) (x) +#define TBU_DOUTSTDCAPB_MASK 0x3F + +/* LPU_IRQ_BITS */ +#define LPU_IRQ_OVR BIT(9) +#define LPU_IRQ_IBSY BIT(10) +#define LPU_IRQ_ERR BIT(11) +#define LPU_IRQ_EOW BIT(12) +#define LPU_IRQ_PL0 BIT(13) + +/* LPU_STATUS_BITS */ +#define LPU_STATUS_AXIED(x) ((x) & 0xF) +#define LPU_STATUS_AXIE BIT(4) +#define LPU_STATUS_AXIRP BIT(5) +#define LPU_STATUS_AXIWP BIT(6) +#define LPU_STATUS_FEMPTY BIT(11) +#define LPU_STATUS_FFULL BIT(14) +#define LPU_STATUS_ACE0 BIT(16) +#define LPU_STATUS_ACE1 BIT(17) +#define LPU_STATUS_ACE2 BIT(18) +#define LPU_STATUS_ACE3 BIT(19) +#define LPU_STATUS_ACTIVE BIT(31) + +#define AXIEID_MASK 0xF +#define AXIE_MASK LPU_STATUS_AXIE +#define AXIRP_MASK LPU_STATUS_AXIRP +#define AXIWP_MASK LPU_STATUS_AXIWP + +#define FROM_AXIEID(reg) ((reg) & AXIEID_MASK) +#define TO_AXIE(x) ((x) << 4) +#define FROM_AXIRP(reg) (((reg) & AXIRP_MASK) >> 5) +#define FROM_AXIWP(reg) (((reg) & AXIWP_MASK) >> 6) + +/* LPU_TBU_STATUS_BITS */ +#define LPU_TBU_STATUS_TCF BIT(1) +#define LPU_TBU_STATUS_TTNG BIT(2) +#define LPU_TBU_STATUS_TITR BIT(8) +#define LPU_TBU_STATUS_TEMR BIT(16) +#define LPU_TBU_STATUS_TTF BIT(31) + +/* LPU_TBU_CONTROL BITS */ +#define LPU_TBU_CTRL_TLBPEN BIT(16) + +/* CROSSBAR CONTROL BITS */ +#define CBU_INPUT_CTRL_EN BIT(0) +#define CBU_NUM_INPUT_IDS 5 +#define CBU_NUM_OUTPUT_IDS 5 + +/* CU register */ +#define CU_BG_COLOR 0x0DC +#define CU_INPUT0_SIZE 0x0E0 +#define CU_INPUT0_OFFSET 0x0E4 +#define CU_INPUT0_CONTROL 0x0E8 +#define CU_INPUT1_SIZE 0x0F0 +#define CU_INPUT1_OFFSET 0x0F4 +#define CU_INPUT1_CONTROL 0x0F8 +#define CU_INPUT2_SIZE 0x100 +#define CU_INPUT2_OFFSET 0x104 +#define CU_INPUT2_CONTROL 0x108 +#define CU_INPUT3_SIZE 0x110 +#define CU_INPUT3_OFFSET 0x114 +#define CU_INPUT3_CONTROL 0x118 +#define CU_INPUT4_SIZE 0x120 +#define CU_INPUT4_OFFSET 0x124 +#define CU_INPUT4_CONTROL 0x128 + +#define CU_PER_INPUT_REGS 4 + +#define CU_NUM_INPUT_IDS 5 +#define CU_NUM_OUTPUT_IDS 1 + +/* CU control register bits */ +#define CU_CTRL_COPROC BIT(0) + +/* CU_IRQ_BITS */ +#define CU_IRQ_OVR BIT(9) +#define CU_IRQ_ERR BIT(11) + +/* CU_STATUS_BITS */ +#define CU_STATUS_CPE BIT(0) +#define CU_STATUS_ZME BIT(1) +#define CU_STATUS_CFGE BIT(2) +#define CU_STATUS_ACTIVE BIT(31) + +/* CU input control register bits */ +#define CU_INPUT_CTRL_EN BIT(0) +#define CU_INPUT_CTRL_PAD BIT(1) +#define CU_INPUT_CTRL_PMUL BIT(2) +#define CU_INPUT_CTRL_ALPHA(x) (((x) & 0xFF) << 8) + +/* DOU register */ + +/* DOU_IRQ_BITS */ +#define DOU_IRQ_UND BIT(8) +#define DOU_IRQ_ERR BIT(11) +#define DOU_IRQ_PL0 BIT(13) +#define DOU_IRQ_PL1 BIT(14) + +/* DOU_STATUS_BITS */ +#define DOU_STATUS_DRIFTTO BIT(0) +#define DOU_STATUS_FRAMETO BIT(1) +#define DOU_STATUS_TETO BIT(2) +#define DOU_STATUS_CSCE BIT(8) +#define DOU_STATUS_ACTIVE BIT(31) + +/* Layer registers */ +#define LAYER_INFO 0x0C0 +#define LAYER_R_CONTROL 0x0D4 +#define LAYER_FMT 0x0D8 +#define LAYER_LT_COEFFTAB 0x0DC +#define LAYER_PALPHA 0x0E4 + +#define LAYER_YUV_RGB_COEFF0 0x130 + +#define LAYER_AD_H_CROP 0x164 +#define LAYER_AD_V_CROP 0x168 + +#define LAYER_RGB_RGB_COEFF0 0x170 + +/* L_CONTROL_BITS */ +#define L_EN BIT(0) +#define L_IT BIT(4) +#define L_R2R BIT(5) +#define L_FT BIT(6) +#define L_ROT(x) (((x) & 3) << 8) +#define L_HFLIP BIT(10) +#define L_VFLIP BIT(11) +#define L_TBU_EN BIT(16) +#define L_A_RCACHE(x) (((x) & 0xF) << 28) +#define L_ROT_R0 0 +#define L_ROT_R90 1 +#define L_ROT_R180 2 +#define L_ROT_R270 3 + +/* LAYER_R_CONTROL BITS */ +#define LR_CHI422_BILINEAR 0 +#define LR_CHI422_REPLICATION 1 +#define LR_CHI420_JPEG (0 << 2) +#define LR_CHI420_MPEG (1 << 2) + +#define L_ITSEL(x) ((x) & 0xFFF) +#define L_FTSEL(x) (((x) & 0xFFF) << 16) + +#define LAYER_PER_PLANE_REGS 4 + +/* Layer_WR registers */ +#define LAYER_WR_PROG_LINE 0x0D4 +#define LAYER_WR_FORMAT 0x0D8 + +/* Layer_WR control bits */ +#define LW_OFM BIT(4) +#define LW_LALPHA(x) (((x) & 0xFF) << 8) +#define LW_A_WCACHE(x) (((x) & 0xF) << 28) +#define LW_TBU_EN BIT(16) + +#define AxCACHE_MASK 0xF0000000 + +/* Layer AXI R/W cache setting */ +#define AxCACHE_B BIT(0) /* Bufferable */ +#define AxCACHE_M BIT(1) /* Modifiable */ +#define AxCACHE_RA BIT(2) /* Read-Allocate */ +#define AxCACHE_WA BIT(3) /* Write-Allocate */ + +/* Layer info bits */ +#define L_INFO_RF BIT(0) +#define L_INFO_CM BIT(1) +#define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7) +#define L_INFO_YUV_MAX_LINESZ(x) (((x) >> 16) & 0xFFFF) + +/* Scaler registers */ +#define SC_COEFFTAB 0x0DC +#define SC_OUT_SIZE 0x0E4 +#define SC_H_CROP 0x0E8 +#define SC_V_CROP 0x0EC +#define SC_H_INIT_PH 0x0F0 +#define SC_H_DELTA_PH 0x0F4 +#define SC_V_INIT_PH 0x0F8 +#define SC_V_DELTA_PH 0x0FC +#define SC_ENH_LIMITS 0x130 +#define SC_ENH_COEFF0 0x134 + +#define SC_MAX_ENH_COEFF 9 + +/* SC_CTRL_BITS */ +#define SC_CTRL_SCL BIT(0) +#define SC_CTRL_LS BIT(1) +#define SC_CTRL_AP BIT(4) +#define SC_CTRL_IENH BIT(8) +#define SC_CTRL_RGBSM BIT(16) +#define SC_CTRL_ASM BIT(17) + +#define SC_VTSEL(vtal) ((vtal) << 16) + +#define SC_NUM_INPUTS_IDS 1 +#define SC_NUM_OUTPUTS_IDS 1 + +#define MG_NUM_INPUTS_IDS 2 +#define MG_NUM_OUTPUTS_IDS 1 + +/* Merger registers */ +#define MG_INPUT_ID0 BLK_INPUT_ID0 +#define MG_INPUT_ID1 (MG_INPUT_ID0 + 4) +#define MG_SIZE BLK_SIZE + +/* Splitter registers */ +#define SP_OVERLAP_SIZE 0xD8 + +/* Backend registers */ +#define BS_INFO 0x0C0 +#define BS_PROG_LINE 0x0D4 +#define BS_PREFETCH_LINE 0x0D8 +#define BS_BG_COLOR 0x0DC +#define BS_ACTIVESIZE 0x0E0 +#define BS_HINTERVALS 0x0E4 +#define BS_VINTERVALS 0x0E8 +#define BS_SYNC 0x0EC +#define BS_DRIFT_TO 0x100 +#define BS_FRAME_TO 0x104 +#define BS_TE_TO 0x108 +#define BS_T0_INTERVAL 0x110 +#define BS_T1_INTERVAL 0x114 +#define BS_T2_INTERVAL 0x118 +#define BS_CRC0_LOW 0x120 +#define BS_CRC0_HIGH 0x124 +#define BS_CRC1_LOW 0x128 +#define BS_CRC1_HIGH 0x12C +#define BS_USER 0x130 + +/* BS control register bits */ +#define BS_CTRL_EN BIT(0) +#define BS_CTRL_VM BIT(1) +#define BS_CTRL_BM BIT(2) +#define BS_CTRL_HMASK BIT(4) +#define BS_CTRL_VD BIT(5) +#define BS_CTRL_TE BIT(8) +#define BS_CTRL_TS BIT(9) +#define BS_CTRL_TM BIT(12) +#define BS_CTRL_DL BIT(16) +#define BS_CTRL_SBS BIT(17) +#define BS_CTRL_CRC BIT(18) +#define BS_CTRL_PM BIT(20) + +/* BS active size/intervals */ +#define BS_H_INTVALS(hfp, hbp) (((hfp) & 0xFFF) + (((hbp) & 0x3FF) << 16)) +#define BS_V_INTVALS(vfp, vbp) (((vfp) & 0x3FFF) + (((vbp) & 0xFF) << 16)) + +/* BS_SYNC bits */ +#define BS_SYNC_HSW(x) ((x) & 0x3FF) +#define BS_SYNC_HSP BIT(12) +#define BS_SYNC_VSW(x) (((x) & 0xFF) << 16) +#define BS_SYNC_VSP BIT(28) + +#define BS_NUM_INPUT_IDS 0 +#define BS_NUM_OUTPUT_IDS 0 + +/* Image process registers */ +#define IPS_DEPTH 0x0D8 +#define IPS_RGB_RGB_COEFF0 0x130 +#define IPS_RGB_YUV_COEFF0 0x170 + +#define IPS_DEPTH_MARK 0xF + +/* IPS control register bits */ +#define IPS_CTRL_RGB BIT(0) +#define IPS_CTRL_FT BIT(4) +#define IPS_CTRL_YUV BIT(8) +#define IPS_CTRL_CHD422 BIT(9) +#define IPS_CTRL_CHD420 BIT(10) +#define IPS_CTRL_LPF BIT(11) +#define IPS_CTRL_DITH BIT(12) +#define IPS_CTRL_CLAMP BIT(16) +#define IPS_CTRL_SBS BIT(17) + +/* IPS info register bits */ +#define IPS_INFO_CHD420 BIT(10) + +#define IPS_NUM_INPUT_IDS 2 +#define IPS_NUM_OUTPUT_IDS 1 + +/* FT_COEFF block registers */ +#define FT_COEFF0 0x80 +#define GLB_IT_COEFF 0x80 + +/* GLB_SC_COEFF registers */ +#define GLB_SC_COEFF_ADDR 0x0080 +#define GLB_SC_COEFF_DATA 0x0084 +#define GLB_LT_COEFF_DATA 0x0080 + +#define GLB_SC_COEFF_MAX_NUM 1024 +#define GLB_LT_COEFF_NUM 65 +/* GLB_SC_ADDR */ +#define SC_COEFF_R_ADDR BIT(18) +#define SC_COEFF_G_ADDR BIT(17) +#define SC_COEFF_B_ADDR BIT(16) + +#define SC_COEFF_DATA(x, y) (((y) & 0xFFFF) | (((x) & 0xFFFF) << 16)) + +enum d71_blk_type { + D71_BLK_TYPE_GCU = 0x00, + D71_BLK_TYPE_LPU = 0x01, + D71_BLK_TYPE_CU = 0x02, + D71_BLK_TYPE_DOU = 0x03, + D71_BLK_TYPE_AEU = 0x04, + D71_BLK_TYPE_GLB_LT_COEFF = 0x05, + D71_BLK_TYPE_GLB_SCL_COEFF = 0x06, /* SH/SV scaler coeff */ + D71_BLK_TYPE_GLB_SC_COEFF = 0x07, + D71_BLK_TYPE_PERIPH = 0x08, + D71_BLK_TYPE_LPU_TRUSTED = 0x09, + D71_BLK_TYPE_AEU_TRUSTED = 0x0A, + D71_BLK_TYPE_LPU_LAYER = 0x10, + D71_BLK_TYPE_LPU_WB_LAYER = 0x11, + D71_BLK_TYPE_CU_SPLITTER = 0x20, + D71_BLK_TYPE_CU_SCALER = 0x21, + D71_BLK_TYPE_CU_MERGER = 0x22, + D71_BLK_TYPE_DOU_IPS = 0x30, + D71_BLK_TYPE_DOU_BS = 0x31, + D71_BLK_TYPE_DOU_FT_COEFF = 0x32, + D71_BLK_TYPE_AEU_DS = 0x40, + D71_BLK_TYPE_AEU_AES = 0x41, + D71_BLK_TYPE_RESERVED = 0xFF +}; + +/* Constant of components */ +#define D71_MAX_PIPELINE 2 +#define D71_PIPELINE_MAX_SCALERS 2 +#define D71_PIPELINE_MAX_LAYERS 4 + +#define D71_MAX_GLB_IT_COEFF 3 +#define D71_MAX_GLB_SCL_COEFF 4 + +#define D71_MAX_LAYERS_PER_LPU 4 +#define D71_BLOCK_MAX_INPUT 9 +#define D71_BLOCK_MAX_OUTPUT 5 +#define D71_MAX_SC_PER_CU 2 + +#define D71_BLOCK_OFFSET_PERIPH 0xFE00 +#define D71_BLOCK_SIZE 0x0200 + +#define D71_DEFAULT_PREPRETCH_LINE 5 +#define D71_BUS_WIDTH_16_BYTES 16 + +#define D71_SC_MAX_UPSCALING 64 +#define D71_SC_MAX_DOWNSCALING 6 +#define D71_SC_SPLIT_OVERLAP 8 +#define D71_SC_ENH_SPLIT_OVERLAP 1 + +#define D71_MG_MIN_MERGED_SIZE 4 +#define D71_MG_MAX_MERGED_HSIZE 4032 +#define D71_MG_MAX_MERGED_VSIZE 4096 + +#define D71_PALPHA_DEF_MAP 0xFFAA5500 +#define D71_LAYER_CONTROL_DEFAULT 0x30000000 +#define D71_WB_LAYER_CONTROL_DEFAULT 0x3000FF00 +#define D71_BS_CONTROL_DEFAULT 0x00000002 + +struct block_header { + u32 block_info; + u32 pipeline_info; + u32 input_ids[D71_BLOCK_MAX_INPUT]; + u32 output_ids[D71_BLOCK_MAX_OUTPUT]; +}; + +static inline u32 get_block_type(struct block_header *blk) +{ + return BLOCK_INFO_BLK_TYPE(blk->block_info); +} + +#endif /* !_D71_REG_H_ */ diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c new file mode 100644 index 000000000..d8e449e6e --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2019 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ + +#include "komeda_color_mgmt.h" + +/* 10bit precision YUV2RGB matrix */ +static const s32 yuv2rgb_bt601_narrow[KOMEDA_N_YUV2RGB_COEFFS] = { + 1192, 0, 1634, + 1192, -401, -832, + 1192, 2066, 0, + 64, 512, 512 +}; + +static const s32 yuv2rgb_bt601_wide[KOMEDA_N_YUV2RGB_COEFFS] = { + 1024, 0, 1436, + 1024, -352, -731, + 1024, 1815, 0, + 0, 512, 512 +}; + +static const s32 yuv2rgb_bt709_narrow[KOMEDA_N_YUV2RGB_COEFFS] = { + 1192, 0, 1836, + 1192, -218, -546, + 1192, 2163, 0, + 64, 512, 512 +}; + +static const s32 yuv2rgb_bt709_wide[KOMEDA_N_YUV2RGB_COEFFS] = { + 1024, 0, 1613, + 1024, -192, -479, + 1024, 1900, 0, + 0, 512, 512 +}; + +static const s32 yuv2rgb_bt2020[KOMEDA_N_YUV2RGB_COEFFS] = { + 1024, 0, 1476, + 1024, -165, -572, + 1024, 1884, 0, + 0, 512, 512 +}; + +const s32 *komeda_select_yuv2rgb_coeffs(u32 color_encoding, u32 color_range) +{ + bool narrow = color_range == DRM_COLOR_YCBCR_LIMITED_RANGE; + const s32 *coeffs; + + switch (color_encoding) { + case DRM_COLOR_YCBCR_BT709: + coeffs = narrow ? yuv2rgb_bt709_narrow : yuv2rgb_bt709_wide; + break; + case DRM_COLOR_YCBCR_BT601: + coeffs = narrow ? yuv2rgb_bt601_narrow : yuv2rgb_bt601_wide; + break; + case DRM_COLOR_YCBCR_BT2020: + coeffs = yuv2rgb_bt2020; + break; + default: + coeffs = NULL; + break; + } + + return coeffs; +} + +struct gamma_curve_sector { + u32 boundary_start; + u32 num_of_segments; + u32 segment_width; +}; + +struct gamma_curve_segment { + u32 start; + u32 end; +}; + +static struct gamma_curve_sector sector_tbl[] = { + { 0, 4, 4 }, + { 16, 4, 4 }, + { 32, 4, 8 }, + { 64, 4, 16 }, + { 128, 4, 32 }, + { 256, 4, 64 }, + { 512, 16, 32 }, + { 1024, 24, 128 }, +}; + +static void +drm_lut_to_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs, + struct gamma_curve_sector *sector_tbl, u32 num_sectors) +{ + struct drm_color_lut *lut; + u32 i, j, in, num = 0; + + if (!lut_blob) + return; + + lut = lut_blob->data; + + for (i = 0; i < num_sectors; i++) { + for (j = 0; j < sector_tbl[i].num_of_segments; j++) { + in = sector_tbl[i].boundary_start + + j * sector_tbl[i].segment_width; + + coeffs[num++] = drm_color_lut_extract(lut[in].red, + KOMEDA_COLOR_PRECISION); + } + } + + coeffs[num] = BIT(KOMEDA_COLOR_PRECISION); +} + +void drm_lut_to_fgamma_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs) +{ + drm_lut_to_coeffs(lut_blob, coeffs, sector_tbl, ARRAY_SIZE(sector_tbl)); +} + +void drm_ctm_to_coeffs(struct drm_property_blob *ctm_blob, u32 *coeffs) +{ + struct drm_color_ctm *ctm; + u32 i; + + if (!ctm_blob) + return; + + ctm = ctm_blob->data; + + for (i = 0; i < KOMEDA_N_CTM_COEFFS; i++) + coeffs[i] = drm_color_ctm_s31_32_to_qm_n(ctm->matrix[i], 3, 12); +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h new file mode 100644 index 000000000..2f4668466 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2019 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ + +#ifndef _KOMEDA_COLOR_MGMT_H_ +#define _KOMEDA_COLOR_MGMT_H_ + +#include <drm/drm_color_mgmt.h> + +#define KOMEDA_N_YUV2RGB_COEFFS 12 +#define KOMEDA_N_RGB2YUV_COEFFS 12 +#define KOMEDA_COLOR_PRECISION 12 +#define KOMEDA_N_GAMMA_COEFFS 65 +#define KOMEDA_COLOR_LUT_SIZE BIT(KOMEDA_COLOR_PRECISION) +#define KOMEDA_N_CTM_COEFFS 9 + +void drm_lut_to_fgamma_coeffs(struct drm_property_blob *lut_blob, u32 *coeffs); +void drm_ctm_to_coeffs(struct drm_property_blob *ctm_blob, u32 *coeffs); + +const s32 *komeda_select_yuv2rgb_coeffs(u32 color_encoding, u32 color_range); + +#endif /*_KOMEDA_COLOR_MGMT_H_*/ diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c new file mode 100644 index 000000000..f33418d6e --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -0,0 +1,636 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include <linux/clk.h> +#include <linux/pm_runtime.h> +#include <linux/spinlock.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> +#include <drm/drm_vblank.h> + +#include "komeda_dev.h" +#include "komeda_kms.h" + +void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st, + u32 *color_depths, u32 *color_formats) +{ + struct drm_connector *conn; + struct drm_connector_state *conn_st; + u32 conn_color_formats = ~0u; + int i, min_bpc = 31, conn_bpc = 0; + + for_each_new_connector_in_state(crtc_st->state, conn, conn_st, i) { + if (conn_st->crtc != crtc_st->crtc) + continue; + + conn_bpc = conn->display_info.bpc ? conn->display_info.bpc : 8; + conn_color_formats &= conn->display_info.color_formats; + + if (conn_bpc < min_bpc) + min_bpc = conn_bpc; + } + + /* connector doesn't config any color_format, use RGB444 as default */ + if (!conn_color_formats) + conn_color_formats = DRM_COLOR_FORMAT_RGB444; + + *color_depths = GENMASK(min_bpc, 0); + *color_formats = conn_color_formats; +} + +static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st) +{ + u64 pxlclk, aclk; + + if (!kcrtc_st->base.active) { + kcrtc_st->clock_ratio = 0; + return; + } + + pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000ULL; + aclk = komeda_crtc_get_aclk(kcrtc_st); + + kcrtc_st->clock_ratio = div64_u64(aclk << 32, pxlclk); +} + +/** + * komeda_crtc_atomic_check - build display output data flow + * @crtc: DRM crtc + * @state: the crtc state object + * + * crtc_atomic_check is the final check stage, so beside build a display data + * pipeline according to the crtc_state, but still needs to release or disable + * the unclaimed pipeline resources. + * + * RETURNS: + * Zero for success or -errno + */ +static int +komeda_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct komeda_crtc *kcrtc = to_kcrtc(crtc); + struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(state); + int err; + + if (drm_atomic_crtc_needs_modeset(state)) + komeda_crtc_update_clock_ratio(kcrtc_st); + + if (state->active) { + err = komeda_build_display_data_flow(kcrtc, kcrtc_st); + if (err) + return err; + } + + /* release unclaimed pipeline resources */ + err = komeda_release_unclaimed_resources(kcrtc->slave, kcrtc_st); + if (err) + return err; + + err = komeda_release_unclaimed_resources(kcrtc->master, kcrtc_st); + if (err) + return err; + + return 0; +} + +/* For active a crtc, mainly need two parts of preparation + * 1. adjust display operation mode. + * 2. enable needed clk + */ +static int +komeda_crtc_prepare(struct komeda_crtc *kcrtc) +{ + struct komeda_dev *mdev = kcrtc->base.dev->dev_private; + struct komeda_pipeline *master = kcrtc->master; + struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state); + struct drm_display_mode *mode = &kcrtc_st->base.adjusted_mode; + u32 new_mode; + int err; + + mutex_lock(&mdev->lock); + + new_mode = mdev->dpmode | BIT(master->id); + if (WARN_ON(new_mode == mdev->dpmode)) { + err = 0; + goto unlock; + } + + err = mdev->funcs->change_opmode(mdev, new_mode); + if (err) { + DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,", + mdev->dpmode, new_mode); + goto unlock; + } + + mdev->dpmode = new_mode; + /* Only need to enable aclk on single display mode, but no need to + * enable aclk it on dual display mode, since the dual mode always + * switch from single display mode, the aclk already enabled, no need + * to enable it again. + */ + if (new_mode != KOMEDA_MODE_DUAL_DISP) { + err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st)); + if (err) + DRM_ERROR("failed to set aclk.\n"); + err = clk_prepare_enable(mdev->aclk); + if (err) + DRM_ERROR("failed to enable aclk.\n"); + } + + err = clk_set_rate(master->pxlclk, mode->crtc_clock * 1000); + if (err) + DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id); + err = clk_prepare_enable(master->pxlclk); + if (err) + DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id); + +unlock: + mutex_unlock(&mdev->lock); + + return err; +} + +static int +komeda_crtc_unprepare(struct komeda_crtc *kcrtc) +{ + struct komeda_dev *mdev = kcrtc->base.dev->dev_private; + struct komeda_pipeline *master = kcrtc->master; + u32 new_mode; + int err; + + mutex_lock(&mdev->lock); + + new_mode = mdev->dpmode & (~BIT(master->id)); + + if (WARN_ON(new_mode == mdev->dpmode)) { + err = 0; + goto unlock; + } + + err = mdev->funcs->change_opmode(mdev, new_mode); + if (err) { + DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,", + mdev->dpmode, new_mode); + goto unlock; + } + + mdev->dpmode = new_mode; + + clk_disable_unprepare(master->pxlclk); + if (new_mode == KOMEDA_MODE_INACTIVE) + clk_disable_unprepare(mdev->aclk); + +unlock: + mutex_unlock(&mdev->lock); + + return err; +} + +void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, + struct komeda_events *evts) +{ + struct drm_crtc *crtc = &kcrtc->base; + u32 events = evts->pipes[kcrtc->master->id]; + + if (events & KOMEDA_EVENT_VSYNC) + drm_crtc_handle_vblank(crtc); + + if (events & KOMEDA_EVENT_EOW) { + struct komeda_wb_connector *wb_conn = kcrtc->wb_conn; + + if (wb_conn) + drm_writeback_signal_completion(&wb_conn->base, 0); + else + DRM_WARN("CRTC[%d]: EOW happen but no wb_connector.\n", + drm_crtc_index(&kcrtc->base)); + } + /* will handle it together with the write back support */ + if (events & KOMEDA_EVENT_EOW) + DRM_DEBUG("EOW.\n"); + + if (events & KOMEDA_EVENT_FLIP) { + unsigned long flags; + struct drm_pending_vblank_event *event; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (kcrtc->disable_done) { + complete_all(kcrtc->disable_done); + kcrtc->disable_done = NULL; + } else if (crtc->state->event) { + event = crtc->state->event; + /* + * Consume event before notifying drm core that flip + * happened. + */ + crtc->state->event = NULL; + drm_crtc_send_vblank_event(crtc, event); + } else { + DRM_WARN("CRTC[%d]: FLIP happen but no pending commit.\n", + drm_crtc_index(&kcrtc->base)); + } + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } +} + +static void +komeda_crtc_do_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old) +{ + struct komeda_crtc *kcrtc = to_kcrtc(crtc); + struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc->state); + struct komeda_dev *mdev = kcrtc->base.dev->dev_private; + struct komeda_pipeline *master = kcrtc->master; + struct komeda_pipeline *slave = kcrtc->slave; + struct komeda_wb_connector *wb_conn = kcrtc->wb_conn; + struct drm_connector_state *conn_st; + + DRM_DEBUG_ATOMIC("CRTC%d_FLUSH: active_pipes: 0x%x, affected: 0x%x.\n", + drm_crtc_index(crtc), + kcrtc_st->active_pipes, kcrtc_st->affected_pipes); + + /* step 1: update the pipeline/component state to HW */ + if (has_bit(master->id, kcrtc_st->affected_pipes)) + komeda_pipeline_update(master, old->state); + + if (slave && has_bit(slave->id, kcrtc_st->affected_pipes)) + komeda_pipeline_update(slave, old->state); + + conn_st = wb_conn ? wb_conn->base.base.state : NULL; + if (conn_st && conn_st->writeback_job) + drm_writeback_queue_job(&wb_conn->base, conn_st); + + /* step 2: notify the HW to kickoff the update */ + mdev->funcs->flush(mdev, master->id, kcrtc_st->active_pipes); +} + +static void +komeda_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old) +{ + pm_runtime_get_sync(crtc->dev->dev); + komeda_crtc_prepare(to_kcrtc(crtc)); + drm_crtc_vblank_on(crtc); + WARN_ON(drm_crtc_vblank_get(crtc)); + komeda_crtc_do_flush(crtc, old); +} + +static void +komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc, + struct completion *input_flip_done) +{ + struct drm_device *drm = kcrtc->base.dev; + struct komeda_dev *mdev = kcrtc->master->mdev; + struct completion *flip_done; + struct completion temp; + int timeout; + + /* if caller doesn't send a flip_done, use a private flip_done */ + if (input_flip_done) { + flip_done = input_flip_done; + } else { + init_completion(&temp); + kcrtc->disable_done = &temp; + flip_done = &temp; + } + + mdev->funcs->flush(mdev, kcrtc->master->id, 0); + + /* wait the flip take affect.*/ + timeout = wait_for_completion_timeout(flip_done, HZ); + if (timeout == 0) { + DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id); + if (!input_flip_done) { + unsigned long flags; + + spin_lock_irqsave(&drm->event_lock, flags); + kcrtc->disable_done = NULL; + spin_unlock_irqrestore(&drm->event_lock, flags); + } + } +} + +static void +komeda_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old) +{ + struct komeda_crtc *kcrtc = to_kcrtc(crtc); + struct komeda_crtc_state *old_st = to_kcrtc_st(old); + struct komeda_pipeline *master = kcrtc->master; + struct komeda_pipeline *slave = kcrtc->slave; + struct completion *disable_done; + bool needs_phase2 = false; + + DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x\n", + drm_crtc_index(crtc), + old_st->active_pipes, old_st->affected_pipes); + + if (slave && has_bit(slave->id, old_st->active_pipes)) + komeda_pipeline_disable(slave, old->state); + + if (has_bit(master->id, old_st->active_pipes)) + needs_phase2 = komeda_pipeline_disable(master, old->state); + + /* crtc_disable has two scenarios according to the state->active switch. + * 1. active -> inactive + * this commit is a disable commit. and the commit will be finished + * or done after the disable operation. on this case we can directly + * use the crtc->state->event to tracking the HW disable operation. + * 2. active -> active + * the crtc->commit is not for disable, but a modeset operation when + * crtc is active, such commit actually has been completed by 3 + * DRM operations: + * crtc_disable, update_planes(crtc_flush), crtc_enable + * so on this case the crtc->commit is for the whole process. + * we can not use it for tracing the disable, we need a temporary + * flip_done for tracing the disable. and crtc->state->event for + * the crtc_enable operation. + * That's also the reason why skip modeset commit in + * komeda_crtc_atomic_flush() + */ + disable_done = (needs_phase2 || crtc->state->active) ? + NULL : &crtc->state->commit->flip_done; + + /* wait phase 1 disable done */ + komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done); + + /* phase 2 */ + if (needs_phase2) { + komeda_pipeline_disable(kcrtc->master, old->state); + + disable_done = crtc->state->active ? + NULL : &crtc->state->commit->flip_done; + + komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done); + } + + drm_crtc_vblank_put(crtc); + drm_crtc_vblank_off(crtc); + komeda_crtc_unprepare(kcrtc); + pm_runtime_put(crtc->dev->dev); +} + +static void +komeda_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old) +{ + /* commit with modeset will be handled in enable/disable */ + if (drm_atomic_crtc_needs_modeset(crtc->state)) + return; + + komeda_crtc_do_flush(crtc, old); +} + +/* Returns the minimum frequency of the aclk rate (main engine clock) in Hz */ +static unsigned long +komeda_calc_min_aclk_rate(struct komeda_crtc *kcrtc, + unsigned long pxlclk) +{ + /* Once dual-link one display pipeline drives two display outputs, + * the aclk needs run on the double rate of pxlclk + */ + if (kcrtc->master->dual_link) + return pxlclk * 2; + else + return pxlclk; +} + +/* Get current aclk rate that specified by state */ +unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st) +{ + struct drm_crtc *crtc = kcrtc_st->base.crtc; + struct komeda_dev *mdev = crtc->dev->dev_private; + unsigned long pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000; + unsigned long min_aclk; + + min_aclk = komeda_calc_min_aclk_rate(to_kcrtc(crtc), pxlclk); + + return clk_round_rate(mdev->aclk, min_aclk); +} + +static enum drm_mode_status +komeda_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *m) +{ + struct komeda_dev *mdev = crtc->dev->dev_private; + struct komeda_crtc *kcrtc = to_kcrtc(crtc); + struct komeda_pipeline *master = kcrtc->master; + unsigned long min_pxlclk, min_aclk; + + if (m->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + + min_pxlclk = m->clock * 1000; + if (master->dual_link) + min_pxlclk /= 2; + + if (min_pxlclk != clk_round_rate(master->pxlclk, min_pxlclk)) { + DRM_DEBUG_ATOMIC("pxlclk doesn't support %lu Hz\n", min_pxlclk); + + return MODE_NOCLOCK; + } + + min_aclk = komeda_calc_min_aclk_rate(to_kcrtc(crtc), min_pxlclk); + if (clk_round_rate(mdev->aclk, min_aclk) < min_aclk) { + DRM_DEBUG_ATOMIC("engine clk can't satisfy the requirement of %s-clk: %lu.\n", + m->name, min_pxlclk); + + return MODE_CLOCK_HIGH; + } + + return MODE_OK; +} + +static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *m, + struct drm_display_mode *adjusted_mode) +{ + struct komeda_crtc *kcrtc = to_kcrtc(crtc); + unsigned long clk_rate; + + drm_mode_set_crtcinfo(adjusted_mode, 0); + /* In dual link half the horizontal settings */ + if (kcrtc->master->dual_link) { + adjusted_mode->crtc_clock /= 2; + adjusted_mode->crtc_hdisplay /= 2; + adjusted_mode->crtc_hsync_start /= 2; + adjusted_mode->crtc_hsync_end /= 2; + adjusted_mode->crtc_htotal /= 2; + } + + clk_rate = adjusted_mode->crtc_clock * 1000; + /* crtc_clock will be used as the komeda output pixel clock */ + adjusted_mode->crtc_clock = clk_round_rate(kcrtc->master->pxlclk, + clk_rate) / 1000; + + return true; +} + +static const struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = { + .atomic_check = komeda_crtc_atomic_check, + .atomic_flush = komeda_crtc_atomic_flush, + .atomic_enable = komeda_crtc_atomic_enable, + .atomic_disable = komeda_crtc_atomic_disable, + .mode_valid = komeda_crtc_mode_valid, + .mode_fixup = komeda_crtc_mode_fixup, +}; + +static void komeda_crtc_reset(struct drm_crtc *crtc) +{ + struct komeda_crtc_state *state; + + if (crtc->state) + __drm_atomic_helper_crtc_destroy_state(crtc->state); + + kfree(to_kcrtc_st(crtc->state)); + crtc->state = NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_crtc_reset(crtc, &state->base); +} + +static struct drm_crtc_state * +komeda_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct komeda_crtc_state *old = to_kcrtc_st(crtc->state); + struct komeda_crtc_state *new; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &new->base); + + new->affected_pipes = old->active_pipes; + new->clock_ratio = old->clock_ratio; + new->max_slave_zorder = old->max_slave_zorder; + + return &new->base; +} + +static void komeda_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + __drm_atomic_helper_crtc_destroy_state(state); + kfree(to_kcrtc_st(state)); +} + +static int komeda_crtc_vblank_enable(struct drm_crtc *crtc) +{ + struct komeda_dev *mdev = crtc->dev->dev_private; + struct komeda_crtc *kcrtc = to_kcrtc(crtc); + + mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, true); + return 0; +} + +static void komeda_crtc_vblank_disable(struct drm_crtc *crtc) +{ + struct komeda_dev *mdev = crtc->dev->dev_private; + struct komeda_crtc *kcrtc = to_kcrtc(crtc); + + mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, false); +} + +static const struct drm_crtc_funcs komeda_crtc_funcs = { + .gamma_set = drm_atomic_helper_legacy_gamma_set, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = komeda_crtc_reset, + .atomic_duplicate_state = komeda_crtc_atomic_duplicate_state, + .atomic_destroy_state = komeda_crtc_atomic_destroy_state, + .enable_vblank = komeda_crtc_vblank_enable, + .disable_vblank = komeda_crtc_vblank_disable, +}; + +int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, + struct komeda_dev *mdev) +{ + struct komeda_crtc *crtc; + struct komeda_pipeline *master; + char str[16]; + int i; + + kms->n_crtcs = 0; + + for (i = 0; i < mdev->n_pipelines; i++) { + crtc = &kms->crtcs[kms->n_crtcs]; + master = mdev->pipelines[i]; + + crtc->master = master; + crtc->slave = komeda_pipeline_get_slave(master); + + if (crtc->slave) + sprintf(str, "pipe-%d", crtc->slave->id); + else + sprintf(str, "None"); + + DRM_INFO("CRTC-%d: master(pipe-%d) slave(%s).\n", + kms->n_crtcs, master->id, str); + + kms->n_crtcs++; + } + + return 0; +} + +static struct drm_plane * +get_crtc_primary(struct komeda_kms_dev *kms, struct komeda_crtc *crtc) +{ + struct komeda_plane *kplane; + struct drm_plane *plane; + + drm_for_each_plane(plane, &kms->base) { + if (plane->type != DRM_PLANE_TYPE_PRIMARY) + continue; + + kplane = to_kplane(plane); + /* only master can be primary */ + if (kplane->layer->base.pipeline == crtc->master) + return plane; + } + + return NULL; +} + +static int komeda_crtc_add(struct komeda_kms_dev *kms, + struct komeda_crtc *kcrtc) +{ + struct drm_crtc *crtc = &kcrtc->base; + int err; + + err = drm_crtc_init_with_planes(&kms->base, crtc, + get_crtc_primary(kms, kcrtc), NULL, + &komeda_crtc_funcs, NULL); + if (err) + return err; + + drm_crtc_helper_add(crtc, &komeda_crtc_helper_funcs); + + crtc->port = kcrtc->master->of_output_port; + + drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE); + + return err; +} + +int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev) +{ + int i, err; + + for (i = 0; i < kms->n_crtcs; i++) { + err = komeda_crtc_add(kms, &kms->crtcs[i]); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c new file mode 100644 index 000000000..1d767473b --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include <linux/io.h> +#include <linux/iommu.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/dma-mapping.h> +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#endif + +#include <drm/drm_print.h> + +#include "komeda_dev.h" + +static int komeda_register_show(struct seq_file *sf, void *x) +{ + struct komeda_dev *mdev = sf->private; + int i; + + seq_puts(sf, "\n====== Komeda register dump =========\n"); + + pm_runtime_get_sync(mdev->dev); + + if (mdev->funcs->dump_register) + mdev->funcs->dump_register(mdev, sf); + + for (i = 0; i < mdev->n_pipelines; i++) + komeda_pipeline_dump_register(mdev->pipelines[i], sf); + + pm_runtime_put(mdev->dev); + + return 0; +} + +static int komeda_register_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, komeda_register_show, inode->i_private); +} + +static const struct file_operations komeda_register_fops = { + .owner = THIS_MODULE, + .open = komeda_register_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#ifdef CONFIG_DEBUG_FS +static void komeda_debugfs_init(struct komeda_dev *mdev) +{ + if (!debugfs_initialized()) + return; + + mdev->debugfs_root = debugfs_create_dir("komeda", NULL); + debugfs_create_file("register", 0444, mdev->debugfs_root, + mdev, &komeda_register_fops); + debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root, + &mdev->err_verbosity); +} +#endif + +static ssize_t +core_id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct komeda_dev *mdev = dev_to_mdev(dev); + + return snprintf(buf, PAGE_SIZE, "0x%08x\n", mdev->chip.core_id); +} +static DEVICE_ATTR_RO(core_id); + +static ssize_t +config_id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct komeda_dev *mdev = dev_to_mdev(dev); + struct komeda_pipeline *pipe = mdev->pipelines[0]; + union komeda_config_id config_id; + int i; + + memset(&config_id, 0, sizeof(config_id)); + + config_id.max_line_sz = pipe->layers[0]->hsize_in.end; + config_id.n_pipelines = mdev->n_pipelines; + config_id.n_scalers = pipe->n_scalers; + config_id.n_layers = pipe->n_layers; + config_id.n_richs = 0; + for (i = 0; i < pipe->n_layers; i++) { + if (pipe->layers[i]->layer_type == KOMEDA_FMT_RICH_LAYER) + config_id.n_richs++; + } + return snprintf(buf, PAGE_SIZE, "0x%08x\n", config_id.value); +} +static DEVICE_ATTR_RO(config_id); + +static ssize_t +aclk_hz_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct komeda_dev *mdev = dev_to_mdev(dev); + + return snprintf(buf, PAGE_SIZE, "%lu\n", clk_get_rate(mdev->aclk)); +} +static DEVICE_ATTR_RO(aclk_hz); + +static struct attribute *komeda_sysfs_entries[] = { + &dev_attr_core_id.attr, + &dev_attr_config_id.attr, + &dev_attr_aclk_hz.attr, + NULL, +}; + +static struct attribute_group komeda_sysfs_attr_group = { + .attrs = komeda_sysfs_entries, +}; + +static int komeda_parse_pipe_dt(struct komeda_pipeline *pipe) +{ + struct device_node *np = pipe->of_node; + struct clk *clk; + + clk = of_clk_get_by_name(np, "pxclk"); + if (IS_ERR(clk)) { + DRM_ERROR("get pxclk for pipeline %d failed!\n", pipe->id); + return PTR_ERR(clk); + } + pipe->pxlclk = clk; + + /* enum ports */ + pipe->of_output_links[0] = + of_graph_get_remote_node(np, KOMEDA_OF_PORT_OUTPUT, 0); + pipe->of_output_links[1] = + of_graph_get_remote_node(np, KOMEDA_OF_PORT_OUTPUT, 1); + pipe->of_output_port = + of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT); + + pipe->dual_link = pipe->of_output_links[0] && pipe->of_output_links[1]; + + return 0; +} + +static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct device_node *child, *np = dev->of_node; + struct komeda_pipeline *pipe; + u32 pipe_id = U32_MAX; + int ret = -1; + + mdev->irq = platform_get_irq(pdev, 0); + if (mdev->irq < 0) { + DRM_ERROR("could not get IRQ number.\n"); + return mdev->irq; + } + + /* Get the optional framebuffer memory resource */ + ret = of_reserved_mem_device_init(dev); + if (ret && ret != -ENODEV) + return ret; + ret = 0; + + for_each_available_child_of_node(np, child) { + if (of_node_name_eq(child, "pipeline")) { + of_property_read_u32(child, "reg", &pipe_id); + if (pipe_id >= mdev->n_pipelines) { + DRM_WARN("Skip the redundant DT node: pipeline-%u.\n", + pipe_id); + continue; + } + mdev->pipelines[pipe_id]->of_node = of_node_get(child); + } + } + + for (pipe_id = 0; pipe_id < mdev->n_pipelines; pipe_id++) { + pipe = mdev->pipelines[pipe_id]; + + if (!pipe->of_node) { + DRM_ERROR("Pipeline-%d doesn't have a DT node.\n", + pipe->id); + return -EINVAL; + } + ret = komeda_parse_pipe_dt(pipe); + if (ret) + return ret; + } + + return 0; +} + +struct komeda_dev *komeda_dev_create(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + komeda_identify_func komeda_identify; + struct komeda_dev *mdev; + int err = 0; + + komeda_identify = of_device_get_match_data(dev); + if (!komeda_identify) + return ERR_PTR(-ENODEV); + + mdev = devm_kzalloc(dev, sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return ERR_PTR(-ENOMEM); + + mutex_init(&mdev->lock); + + mdev->dev = dev; + mdev->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mdev->reg_base)) { + DRM_ERROR("Map register space failed.\n"); + err = PTR_ERR(mdev->reg_base); + mdev->reg_base = NULL; + goto err_cleanup; + } + + mdev->aclk = devm_clk_get(dev, "aclk"); + if (IS_ERR(mdev->aclk)) { + DRM_ERROR("Get engine clk failed.\n"); + err = PTR_ERR(mdev->aclk); + mdev->aclk = NULL; + goto err_cleanup; + } + + clk_prepare_enable(mdev->aclk); + + mdev->funcs = komeda_identify(mdev->reg_base, &mdev->chip); + if (!mdev->funcs) { + DRM_ERROR("Failed to identify the HW.\n"); + err = -ENODEV; + goto disable_clk; + } + + DRM_INFO("Found ARM Mali-D%x version r%dp%d\n", + MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id), + MALIDP_CORE_ID_MAJOR(mdev->chip.core_id), + MALIDP_CORE_ID_MINOR(mdev->chip.core_id)); + + mdev->funcs->init_format_table(mdev); + + err = mdev->funcs->enum_resources(mdev); + if (err) { + DRM_ERROR("enumerate display resource failed.\n"); + goto disable_clk; + } + + err = komeda_parse_dt(dev, mdev); + if (err) { + DRM_ERROR("parse device tree failed.\n"); + goto disable_clk; + } + + err = komeda_assemble_pipelines(mdev); + if (err) { + DRM_ERROR("assemble display pipelines failed.\n"); + goto disable_clk; + } + + dev->dma_parms = &mdev->dma_parms; + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + + mdev->iommu = iommu_get_domain_for_dev(mdev->dev); + if (!mdev->iommu) + DRM_INFO("continue without IOMMU support!\n"); + + clk_disable_unprepare(mdev->aclk); + + err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group); + if (err) { + DRM_ERROR("create sysfs group failed.\n"); + goto err_cleanup; + } + + mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS; + +#ifdef CONFIG_DEBUG_FS + komeda_debugfs_init(mdev); +#endif + + return mdev; + +disable_clk: + clk_disable_unprepare(mdev->aclk); +err_cleanup: + komeda_dev_destroy(mdev); + return ERR_PTR(err); +} + +void komeda_dev_destroy(struct komeda_dev *mdev) +{ + struct device *dev = mdev->dev; + const struct komeda_dev_funcs *funcs = mdev->funcs; + int i; + + sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group); + +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(mdev->debugfs_root); +#endif + + if (mdev->aclk) + clk_prepare_enable(mdev->aclk); + + for (i = 0; i < mdev->n_pipelines; i++) { + komeda_pipeline_destroy(mdev, mdev->pipelines[i]); + mdev->pipelines[i] = NULL; + } + + mdev->n_pipelines = 0; + + of_reserved_mem_device_release(dev); + + if (funcs && funcs->cleanup) + funcs->cleanup(mdev); + + if (mdev->reg_base) { + devm_iounmap(dev, mdev->reg_base); + mdev->reg_base = NULL; + } + + if (mdev->aclk) { + clk_disable_unprepare(mdev->aclk); + devm_clk_put(dev, mdev->aclk); + mdev->aclk = NULL; + } + + devm_kfree(dev, mdev); +} + +int komeda_dev_resume(struct komeda_dev *mdev) +{ + clk_prepare_enable(mdev->aclk); + + mdev->funcs->enable_irq(mdev); + + if (mdev->iommu && mdev->funcs->connect_iommu) + if (mdev->funcs->connect_iommu(mdev)) + DRM_ERROR("connect iommu failed.\n"); + + return 0; +} + +int komeda_dev_suspend(struct komeda_dev *mdev) +{ + if (mdev->iommu && mdev->funcs->disconnect_iommu) + if (mdev->funcs->disconnect_iommu(mdev)) + DRM_ERROR("disconnect iommu failed.\n"); + + mdev->funcs->disable_irq(mdev); + + clk_disable_unprepare(mdev->aclk); + + return 0; +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h new file mode 100644 index 000000000..ce27f2f27 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#ifndef _KOMEDA_DEV_H_ +#define _KOMEDA_DEV_H_ + +#include <linux/device.h> +#include <linux/clk.h> +#include "komeda_pipeline.h" +#include "malidp_product.h" +#include "komeda_format_caps.h" + +#define KOMEDA_EVENT_VSYNC BIT_ULL(0) +#define KOMEDA_EVENT_FLIP BIT_ULL(1) +#define KOMEDA_EVENT_URUN BIT_ULL(2) +#define KOMEDA_EVENT_IBSY BIT_ULL(3) +#define KOMEDA_EVENT_OVR BIT_ULL(4) +#define KOMEDA_EVENT_EOW BIT_ULL(5) +#define KOMEDA_EVENT_MODE BIT_ULL(6) +#define KOMEDA_EVENT_FULL BIT_ULL(7) +#define KOMEDA_EVENT_EMPTY BIT_ULL(8) + +#define KOMEDA_ERR_TETO BIT_ULL(14) +#define KOMEDA_ERR_TEMR BIT_ULL(15) +#define KOMEDA_ERR_TITR BIT_ULL(16) +#define KOMEDA_ERR_CPE BIT_ULL(17) +#define KOMEDA_ERR_CFGE BIT_ULL(18) +#define KOMEDA_ERR_AXIE BIT_ULL(19) +#define KOMEDA_ERR_ACE0 BIT_ULL(20) +#define KOMEDA_ERR_ACE1 BIT_ULL(21) +#define KOMEDA_ERR_ACE2 BIT_ULL(22) +#define KOMEDA_ERR_ACE3 BIT_ULL(23) +#define KOMEDA_ERR_DRIFTTO BIT_ULL(24) +#define KOMEDA_ERR_FRAMETO BIT_ULL(25) +#define KOMEDA_ERR_CSCE BIT_ULL(26) +#define KOMEDA_ERR_ZME BIT_ULL(27) +#define KOMEDA_ERR_MERR BIT_ULL(28) +#define KOMEDA_ERR_TCF BIT_ULL(29) +#define KOMEDA_ERR_TTNG BIT_ULL(30) +#define KOMEDA_ERR_TTF BIT_ULL(31) + +#define KOMEDA_ERR_EVENTS \ + (KOMEDA_EVENT_URUN | KOMEDA_EVENT_IBSY | KOMEDA_EVENT_OVR |\ + KOMEDA_ERR_TETO | KOMEDA_ERR_TEMR | KOMEDA_ERR_TITR |\ + KOMEDA_ERR_CPE | KOMEDA_ERR_CFGE | KOMEDA_ERR_AXIE |\ + KOMEDA_ERR_ACE0 | KOMEDA_ERR_ACE1 | KOMEDA_ERR_ACE2 |\ + KOMEDA_ERR_ACE3 | KOMEDA_ERR_DRIFTTO | KOMEDA_ERR_FRAMETO |\ + KOMEDA_ERR_ZME | KOMEDA_ERR_MERR | KOMEDA_ERR_TCF |\ + KOMEDA_ERR_TTNG | KOMEDA_ERR_TTF) + +#define KOMEDA_WARN_EVENTS \ + (KOMEDA_ERR_CSCE | KOMEDA_EVENT_FULL | KOMEDA_EVENT_EMPTY) + +#define KOMEDA_INFO_EVENTS (0 \ + | KOMEDA_EVENT_VSYNC \ + | KOMEDA_EVENT_FLIP \ + | KOMEDA_EVENT_EOW \ + | KOMEDA_EVENT_MODE \ + ) + +/* pipeline DT ports */ +enum { + KOMEDA_OF_PORT_OUTPUT = 0, + KOMEDA_OF_PORT_COPROC = 1, +}; + +struct komeda_chip_info { + u32 arch_id; + u32 core_id; + u32 core_info; + u32 bus_width; +}; + +struct komeda_dev; + +struct komeda_events { + u64 global; + u64 pipes[KOMEDA_MAX_PIPELINES]; +}; + +/** + * struct komeda_dev_funcs + * + * Supplied by chip level and returned by the chip entry function xxx_identify, + */ +struct komeda_dev_funcs { + /** + * @init_format_table: + * + * initialize &komeda_dev->format_table, this function should be called + * before the &enum_resource + */ + void (*init_format_table)(struct komeda_dev *mdev); + /** + * @enum_resources: + * + * for CHIP to report or add pipeline and component resources to CORE + */ + int (*enum_resources)(struct komeda_dev *mdev); + /** @cleanup: call to chip to cleanup komeda_dev->chip data */ + void (*cleanup)(struct komeda_dev *mdev); + /** @connect_iommu: Optional, connect to external iommu */ + int (*connect_iommu)(struct komeda_dev *mdev); + /** @disconnect_iommu: Optional, disconnect to external iommu */ + int (*disconnect_iommu)(struct komeda_dev *mdev); + /** + * @irq_handler: + * + * for CORE to get the HW event from the CHIP when interrupt happened. + */ + irqreturn_t (*irq_handler)(struct komeda_dev *mdev, + struct komeda_events *events); + /** @enable_irq: enable irq */ + int (*enable_irq)(struct komeda_dev *mdev); + /** @disable_irq: disable irq */ + int (*disable_irq)(struct komeda_dev *mdev); + /** @on_off_vblank: notify HW to on/off vblank */ + void (*on_off_vblank)(struct komeda_dev *mdev, + int master_pipe, bool on); + + /** @dump_register: Optional, dump registers to seq_file */ + void (*dump_register)(struct komeda_dev *mdev, struct seq_file *seq); + /** + * @change_opmode: + * + * Notify HW to switch to a new display operation mode. + */ + int (*change_opmode)(struct komeda_dev *mdev, int new_mode); + /** @flush: Notify the HW to flush or kickoff the update */ + void (*flush)(struct komeda_dev *mdev, + int master_pipe, u32 active_pipes); +}; + +/* + * DISPLAY_MODE describes how many display been enabled, and which will be + * passed to CHIP by &komeda_dev_funcs->change_opmode(), then CHIP can do the + * pipeline resources assignment according to this usage hint. + * - KOMEDA_MODE_DISP0: Only one display enabled, pipeline-0 work as master. + * - KOMEDA_MODE_DISP1: Only one display enabled, pipeline-0 work as master. + * - KOMEDA_MODE_DUAL_DISP: Dual display mode, both display has been enabled. + * And D71 supports assign two pipelines to one single display on mode + * KOMEDA_MODE_DISP0/DISP1 + */ +enum { + KOMEDA_MODE_INACTIVE = 0, + KOMEDA_MODE_DISP0 = BIT(0), + KOMEDA_MODE_DISP1 = BIT(1), + KOMEDA_MODE_DUAL_DISP = KOMEDA_MODE_DISP0 | KOMEDA_MODE_DISP1, +}; + +/** + * struct komeda_dev + * + * Pipeline and component are used to describe how to handle the pixel data. + * komeda_device is for describing the whole view of the device, and the + * control-abilites of device. + */ +struct komeda_dev { + /** @dev: the base device structure */ + struct device *dev; + /** @reg_base: the base address of komeda io space */ + u32 __iomem *reg_base; + /** @dma_parms: the dma parameters of komeda */ + struct device_dma_parameters dma_parms; + + /** @chip: the basic chip information */ + struct komeda_chip_info chip; + /** @fmt_tbl: initialized by &komeda_dev_funcs->init_format_table */ + struct komeda_format_caps_table fmt_tbl; + /** @aclk: HW main engine clk */ + struct clk *aclk; + + /** @irq: irq number */ + int irq; + + /** @lock: used to protect dpmode */ + struct mutex lock; + /** @dpmode: current display mode */ + u32 dpmode; + + /** @n_pipelines: the number of pipe in @pipelines */ + int n_pipelines; + /** @pipelines: the komeda pipelines */ + struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES]; + + /** @funcs: chip funcs to access to HW */ + const struct komeda_dev_funcs *funcs; + /** + * @chip_data: + * + * chip data will be added by &komeda_dev_funcs.enum_resources() and + * destroyed by &komeda_dev_funcs.cleanup() + */ + void *chip_data; + + /** @iommu: iommu domain */ + struct iommu_domain *iommu; + + /** @debugfs_root: root directory of komeda debugfs */ + struct dentry *debugfs_root; + /** + * @err_verbosity: bitmask for how much extra info to print on error + * + * See KOMEDA_DEV_* macros for details. Low byte contains the debug + * level categories, the high byte contains extra debug options. + */ + u16 err_verbosity; + /* Print a single line per error per frame with error events. */ +#define KOMEDA_DEV_PRINT_ERR_EVENTS BIT(0) + /* Print a single line per warning per frame with error events. */ +#define KOMEDA_DEV_PRINT_WARN_EVENTS BIT(1) + /* Print a single line per info event per frame with error events. */ +#define KOMEDA_DEV_PRINT_INFO_EVENTS BIT(2) + /* Dump DRM state on an error or warning event. */ +#define KOMEDA_DEV_PRINT_DUMP_STATE_ON_EVENT BIT(8) + /* Disable rate limiting of event prints (normally one per commit) */ +#define KOMEDA_DEV_PRINT_DISABLE_RATELIMIT BIT(12) +}; + +static inline bool +komeda_product_match(struct komeda_dev *mdev, u32 target) +{ + return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target; +} + +typedef const struct komeda_dev_funcs * +(*komeda_identify_func)(u32 __iomem *reg, struct komeda_chip_info *chip); + +const struct komeda_dev_funcs * +d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip); + +struct komeda_dev *komeda_dev_create(struct device *dev); +void komeda_dev_destroy(struct komeda_dev *mdev); + +struct komeda_dev *dev_to_mdev(struct device *dev); + +void komeda_print_events(struct komeda_events *evts, struct drm_device *dev); + +int komeda_dev_resume(struct komeda_dev *mdev); +int komeda_dev_suspend(struct komeda_dev *mdev); + +#endif /*_KOMEDA_DEV_H_*/ diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c new file mode 100644 index 000000000..e7933930a --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/component.h> +#include <linux/pm_runtime.h> +#include <drm/drm_of.h> +#include "komeda_dev.h" +#include "komeda_kms.h" + +struct komeda_drv { + struct komeda_dev *mdev; + struct komeda_kms_dev *kms; +}; + +struct komeda_dev *dev_to_mdev(struct device *dev) +{ + struct komeda_drv *mdrv = dev_get_drvdata(dev); + + return mdrv ? mdrv->mdev : NULL; +} + +static void komeda_unbind(struct device *dev) +{ + struct komeda_drv *mdrv = dev_get_drvdata(dev); + + if (!mdrv) + return; + + komeda_kms_detach(mdrv->kms); + + if (pm_runtime_enabled(dev)) + pm_runtime_disable(dev); + else + komeda_dev_suspend(mdrv->mdev); + + komeda_dev_destroy(mdrv->mdev); + + dev_set_drvdata(dev, NULL); + devm_kfree(dev, mdrv); +} + +static int komeda_bind(struct device *dev) +{ + struct komeda_drv *mdrv; + int err; + + mdrv = devm_kzalloc(dev, sizeof(*mdrv), GFP_KERNEL); + if (!mdrv) + return -ENOMEM; + + mdrv->mdev = komeda_dev_create(dev); + if (IS_ERR(mdrv->mdev)) { + err = PTR_ERR(mdrv->mdev); + goto free_mdrv; + } + + pm_runtime_enable(dev); + if (!pm_runtime_enabled(dev)) + komeda_dev_resume(mdrv->mdev); + + mdrv->kms = komeda_kms_attach(mdrv->mdev); + if (IS_ERR(mdrv->kms)) { + err = PTR_ERR(mdrv->kms); + goto destroy_mdev; + } + + dev_set_drvdata(dev, mdrv); + + return 0; + +destroy_mdev: + if (pm_runtime_enabled(dev)) + pm_runtime_disable(dev); + else + komeda_dev_suspend(mdrv->mdev); + + komeda_dev_destroy(mdrv->mdev); + +free_mdrv: + devm_kfree(dev, mdrv); + return err; +} + +static const struct component_master_ops komeda_master_ops = { + .bind = komeda_bind, + .unbind = komeda_unbind, +}; + +static int compare_of(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static void komeda_add_slave(struct device *master, + struct component_match **match, + struct device_node *np, + u32 port, u32 endpoint) +{ + struct device_node *remote; + + remote = of_graph_get_remote_node(np, port, endpoint); + if (remote) { + drm_of_component_match_add(master, match, compare_of, remote); + of_node_put(remote); + } +} + +static int komeda_platform_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct component_match *match = NULL; + struct device_node *child; + + if (!dev->of_node) + return -ENODEV; + + for_each_available_child_of_node(dev->of_node, child) { + if (of_node_cmp(child->name, "pipeline") != 0) + continue; + + /* add connector */ + komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 0); + komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 1); + } + + return component_master_add_with_match(dev, &komeda_master_ops, match); +} + +static int komeda_platform_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &komeda_master_ops); + return 0; +} + +static const struct of_device_id komeda_of_match[] = { + { .compatible = "arm,mali-d71", .data = d71_identify, }, + { .compatible = "arm,mali-d32", .data = d71_identify, }, + {}, +}; + +MODULE_DEVICE_TABLE(of, komeda_of_match); + +static int __maybe_unused komeda_rt_pm_suspend(struct device *dev) +{ + struct komeda_drv *mdrv = dev_get_drvdata(dev); + + return komeda_dev_suspend(mdrv->mdev); +} + +static int __maybe_unused komeda_rt_pm_resume(struct device *dev) +{ + struct komeda_drv *mdrv = dev_get_drvdata(dev); + + return komeda_dev_resume(mdrv->mdev); +} + +static int __maybe_unused komeda_pm_suspend(struct device *dev) +{ + struct komeda_drv *mdrv = dev_get_drvdata(dev); + int res; + + res = drm_mode_config_helper_suspend(&mdrv->kms->base); + + if (!pm_runtime_status_suspended(dev)) + komeda_dev_suspend(mdrv->mdev); + + return res; +} + +static int __maybe_unused komeda_pm_resume(struct device *dev) +{ + struct komeda_drv *mdrv = dev_get_drvdata(dev); + + if (!pm_runtime_status_suspended(dev)) + komeda_dev_resume(mdrv->mdev); + + return drm_mode_config_helper_resume(&mdrv->kms->base); +} + +static const struct dev_pm_ops komeda_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(komeda_pm_suspend, komeda_pm_resume) + SET_RUNTIME_PM_OPS(komeda_rt_pm_suspend, komeda_rt_pm_resume, NULL) +}; + +static struct platform_driver komeda_platform_driver = { + .probe = komeda_platform_probe, + .remove = komeda_platform_remove, + .driver = { + .name = "komeda", + .of_match_table = komeda_of_match, + .pm = &komeda_pm_ops, + }, +}; + +module_platform_driver(komeda_platform_driver); + +MODULE_AUTHOR("James.Qian.Wang <james.qian.wang@arm.com>"); +MODULE_DESCRIPTION("Komeda KMS driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_event.c b/drivers/gpu/drm/arm/display/komeda/komeda_event.c new file mode 100644 index 000000000..53f944e66 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_event.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2019 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include <drm/drm_atomic.h> +#include <drm/drm_print.h> + +#include "komeda_dev.h" + +struct komeda_str { + char *str; + u32 sz; + u32 len; +}; + +/* return 0 on success, < 0 on no space. + */ +__printf(2, 3) +static int komeda_sprintf(struct komeda_str *str, const char *fmt, ...) +{ + va_list args; + int num, free_sz; + int err; + + free_sz = str->sz - str->len - 1; + if (free_sz <= 0) + return -ENOSPC; + + va_start(args, fmt); + + num = vsnprintf(str->str + str->len, free_sz, fmt, args); + + va_end(args); + + if (num < free_sz) { + str->len += num; + err = 0; + } else { + str->len = str->sz - 1; + err = -ENOSPC; + } + + return err; +} + +static void evt_sprintf(struct komeda_str *str, u64 evt, const char *msg) +{ + if (evt) + komeda_sprintf(str, msg); +} + +static void evt_str(struct komeda_str *str, u64 events) +{ + if (events == 0ULL) { + komeda_sprintf(str, "None"); + return; + } + + evt_sprintf(str, events & KOMEDA_EVENT_VSYNC, "VSYNC|"); + evt_sprintf(str, events & KOMEDA_EVENT_FLIP, "FLIP|"); + evt_sprintf(str, events & KOMEDA_EVENT_EOW, "EOW|"); + evt_sprintf(str, events & KOMEDA_EVENT_MODE, "OP-MODE|"); + + evt_sprintf(str, events & KOMEDA_EVENT_URUN, "UNDERRUN|"); + evt_sprintf(str, events & KOMEDA_EVENT_OVR, "OVERRUN|"); + + /* GLB error */ + evt_sprintf(str, events & KOMEDA_ERR_MERR, "MERR|"); + evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|"); + + /* DOU error */ + evt_sprintf(str, events & KOMEDA_ERR_DRIFTTO, "DRIFTTO|"); + evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|"); + evt_sprintf(str, events & KOMEDA_ERR_TETO, "TETO|"); + evt_sprintf(str, events & KOMEDA_ERR_CSCE, "CSCE|"); + + /* LPU errors or events */ + evt_sprintf(str, events & KOMEDA_EVENT_IBSY, "IBSY|"); + evt_sprintf(str, events & KOMEDA_EVENT_EMPTY, "EMPTY|"); + evt_sprintf(str, events & KOMEDA_EVENT_FULL, "FULL|"); + evt_sprintf(str, events & KOMEDA_ERR_AXIE, "AXIE|"); + evt_sprintf(str, events & KOMEDA_ERR_ACE0, "ACE0|"); + evt_sprintf(str, events & KOMEDA_ERR_ACE1, "ACE1|"); + evt_sprintf(str, events & KOMEDA_ERR_ACE2, "ACE2|"); + evt_sprintf(str, events & KOMEDA_ERR_ACE3, "ACE3|"); + + /* LPU TBU errors*/ + evt_sprintf(str, events & KOMEDA_ERR_TCF, "TCF|"); + evt_sprintf(str, events & KOMEDA_ERR_TTNG, "TTNG|"); + evt_sprintf(str, events & KOMEDA_ERR_TITR, "TITR|"); + evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|"); + evt_sprintf(str, events & KOMEDA_ERR_TTF, "TTF|"); + + /* CU errors*/ + evt_sprintf(str, events & KOMEDA_ERR_CPE, "COPROC|"); + evt_sprintf(str, events & KOMEDA_ERR_ZME, "ZME|"); + evt_sprintf(str, events & KOMEDA_ERR_CFGE, "CFGE|"); + evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|"); + + if (str->len > 0 && (str->str[str->len - 1] == '|')) { + str->str[str->len - 1] = 0; + str->len--; + } +} + +static bool is_new_frame(struct komeda_events *a) +{ + return (a->pipes[0] | a->pipes[1]) & + (KOMEDA_EVENT_FLIP | KOMEDA_EVENT_EOW); +} + +void komeda_print_events(struct komeda_events *evts, struct drm_device *dev) +{ + u64 print_evts = 0; + static bool en_print = true; + struct komeda_dev *mdev = dev->dev_private; + u16 const err_verbosity = mdev->err_verbosity; + u64 evts_mask = evts->global | evts->pipes[0] | evts->pipes[1]; + + /* reduce the same msg print, only print the first evt for one frame */ + if (evts->global || is_new_frame(evts)) + en_print = true; + if (!(err_verbosity & KOMEDA_DEV_PRINT_DISABLE_RATELIMIT) && !en_print) + return; + + if (err_verbosity & KOMEDA_DEV_PRINT_ERR_EVENTS) + print_evts |= KOMEDA_ERR_EVENTS; + if (err_verbosity & KOMEDA_DEV_PRINT_WARN_EVENTS) + print_evts |= KOMEDA_WARN_EVENTS; + if (err_verbosity & KOMEDA_DEV_PRINT_INFO_EVENTS) + print_evts |= KOMEDA_INFO_EVENTS; + + if (evts_mask & print_evts) { + char msg[256]; + struct komeda_str str; + struct drm_printer p = drm_info_printer(dev->dev); + + str.str = msg; + str.sz = sizeof(msg); + str.len = 0; + + komeda_sprintf(&str, "gcu: "); + evt_str(&str, evts->global); + komeda_sprintf(&str, ", pipes[0]: "); + evt_str(&str, evts->pipes[0]); + komeda_sprintf(&str, ", pipes[1]: "); + evt_str(&str, evts->pipes[1]); + + DRM_ERROR("err detect: %s\n", msg); + if ((err_verbosity & KOMEDA_DEV_PRINT_DUMP_STATE_ON_EVENT) && + (evts_mask & (KOMEDA_ERR_EVENTS | KOMEDA_WARN_EVENTS))) + drm_state_dump(dev, &p); + + en_print = false; + } +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c new file mode 100644 index 000000000..c9a1edb9a --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ + +#include <linux/slab.h> +#include "komeda_format_caps.h" +#include "malidp_utils.h" + +const struct komeda_format_caps * +komeda_get_format_caps(struct komeda_format_caps_table *table, + u32 fourcc, u64 modifier) +{ + const struct komeda_format_caps *caps; + u64 afbc_features = modifier & ~(AFBC_FORMAT_MOD_BLOCK_SIZE_MASK); + u32 afbc_layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK; + int id; + + for (id = 0; id < table->n_formats; id++) { + caps = &table->format_caps[id]; + + if (fourcc != caps->fourcc) + continue; + + if ((modifier == 0ULL) && (caps->supported_afbc_layouts == 0)) + return caps; + + if (has_bits(afbc_features, caps->supported_afbc_features) && + has_bit(afbc_layout, caps->supported_afbc_layouts)) + return caps; + } + + return NULL; +} + +u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier) +{ + u32 bpp; + + switch (info->format) { + case DRM_FORMAT_YUV420_8BIT: + bpp = 12; + break; + case DRM_FORMAT_YUV420_10BIT: + bpp = 15; + break; + default: + bpp = info->cpp[0] * 8; + break; + } + + return bpp; +} + +/* Two assumptions + * 1. RGB always has YTR + * 2. Tiled RGB always has SC + */ +u64 komeda_supported_modifiers[] = { + /* AFBC_16x16 + features: YUV+RGB both */ + AFBC_16x16(0), + /* SPARSE */ + AFBC_16x16(_SPARSE), + /* YTR + (SPARSE) */ + AFBC_16x16(_YTR | _SPARSE), + AFBC_16x16(_YTR), + /* SPLIT + SPARSE + YTR RGB only */ + /* split mode is only allowed for sparse mode */ + AFBC_16x16(_SPLIT | _SPARSE | _YTR), + /* TILED + (SPARSE) */ + /* TILED YUV format only */ + AFBC_16x16(_TILED | _SPARSE), + AFBC_16x16(_TILED), + /* TILED + SC + (SPLIT+SPARSE | SPARSE) + (YTR) */ + AFBC_16x16(_TILED | _SC | _SPLIT | _SPARSE | _YTR), + AFBC_16x16(_TILED | _SC | _SPARSE | _YTR), + AFBC_16x16(_TILED | _SC | _YTR), + /* AFBC_32x8 + features: which are RGB formats only */ + /* YTR + (SPARSE) */ + AFBC_32x8(_YTR | _SPARSE), + AFBC_32x8(_YTR), + /* SPLIT + SPARSE + (YTR) */ + /* split mode is only allowed for sparse mode */ + AFBC_32x8(_SPLIT | _SPARSE | _YTR), + /* TILED + SC + (SPLIT+SPARSE | SPARSE) + YTR */ + AFBC_32x8(_TILED | _SC | _SPLIT | _SPARSE | _YTR), + AFBC_32x8(_TILED | _SC | _SPARSE | _YTR), + AFBC_32x8(_TILED | _SC | _YTR), + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +bool komeda_format_mod_supported(struct komeda_format_caps_table *table, + u32 layer_type, u32 fourcc, u64 modifier, + u32 rot) +{ + const struct komeda_format_caps *caps; + + caps = komeda_get_format_caps(table, fourcc, modifier); + if (!caps) + return false; + + if (!(caps->supported_layer_types & layer_type)) + return false; + + if (table->format_mod_supported) + return table->format_mod_supported(caps, layer_type, modifier, + rot); + + return true; +} + +u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, + u32 layer_type, u32 *n_fmts) +{ + const struct komeda_format_caps *cap; + u32 *fmts; + int i, j, n = 0; + + fmts = kcalloc(table->n_formats, sizeof(u32), GFP_KERNEL); + if (!fmts) + return NULL; + + for (i = 0; i < table->n_formats; i++) { + cap = &table->format_caps[i]; + if (!(layer_type & cap->supported_layer_types) || + (cap->fourcc == 0)) + continue; + + /* one fourcc may has two caps items in table (afbc/none-afbc), + * so check the existing list to avoid adding a duplicated one. + */ + for (j = n - 1; j >= 0; j--) + if (fmts[j] == cap->fourcc) + break; + + if (j < 0) + fmts[n++] = cap->fourcc; + } + + if (n_fmts) + *n_fmts = n; + + return fmts; +} + +void komeda_put_fourcc_list(u32 *fourcc_list) +{ + kfree(fourcc_list); +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h new file mode 100644 index 000000000..32273cf18 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ + +#ifndef _KOMEDA_FORMAT_CAPS_H_ +#define _KOMEDA_FORMAT_CAPS_H_ + +#include <linux/types.h> +#include <uapi/drm/drm_fourcc.h> +#include <drm/drm_fourcc.h> + +#define AFBC(x) DRM_FORMAT_MOD_ARM_AFBC(x) + +/* afbc layerout */ +#define AFBC_16x16(x) AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | (x)) +#define AFBC_32x8(x) AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | (x)) + +/* afbc features */ +#define _YTR AFBC_FORMAT_MOD_YTR +#define _SPLIT AFBC_FORMAT_MOD_SPLIT +#define _SPARSE AFBC_FORMAT_MOD_SPARSE +#define _CBR AFBC_FORMAT_MOD_CBR +#define _TILED AFBC_FORMAT_MOD_TILED +#define _SC AFBC_FORMAT_MOD_SC + +/* layer_type */ +#define KOMEDA_FMT_RICH_LAYER BIT(0) +#define KOMEDA_FMT_SIMPLE_LAYER BIT(1) +#define KOMEDA_FMT_WB_LAYER BIT(2) + +#define AFBC_TH_LAYOUT_ALIGNMENT 8 +#define AFBC_HEADER_SIZE 16 +#define AFBC_SUPERBLK_ALIGNMENT 128 +#define AFBC_SUPERBLK_PIXELS 256 +#define AFBC_BODY_START_ALIGNMENT 1024 +#define AFBC_TH_BODY_START_ALIGNMENT 4096 + +/** + * struct komeda_format_caps + * + * komeda_format_caps is for describing ARM display specific features and + * limitations for a specific format, and format_caps will be linked into + * &komeda_framebuffer like a extension of &drm_format_info. + * + * NOTE: one fourcc may has two different format_caps items for fourcc and + * fourcc+modifier + * + * @hw_id: hw format id, hw specific value. + * @fourcc: drm fourcc format. + * @supported_layer_types: indicate which layer supports this format + * @supported_rots: allowed rotations for this format + * @supported_afbc_layouts: supported afbc layerout + * @supported_afbc_features: supported afbc features + */ +struct komeda_format_caps { + u32 hw_id; + u32 fourcc; + u32 supported_layer_types; + u32 supported_rots; + u32 supported_afbc_layouts; + u64 supported_afbc_features; +}; + +/** + * struct komeda_format_caps_table - format_caps mananger + * + * @n_formats: the size of format_caps list. + * @format_caps: format_caps list. + * @format_mod_supported: Optional. Some HW may have special requirements or + * limitations which can not be described by format_caps, this func supply HW + * the ability to do the further HW specific check. + */ +struct komeda_format_caps_table { + u32 n_formats; + const struct komeda_format_caps *format_caps; + bool (*format_mod_supported)(const struct komeda_format_caps *caps, + u32 layer_type, u64 modifier, u32 rot); +}; + +extern u64 komeda_supported_modifiers[]; + +static inline const char *komeda_get_format_name(u32 fourcc, u64 modifier) +{ + struct drm_format_name_buf buf; + static char name[64]; + + snprintf(name, sizeof(name), "%s with modifier: 0x%llx.", + drm_get_format_name(fourcc, &buf), modifier); + + return name; +} + +const struct komeda_format_caps * +komeda_get_format_caps(struct komeda_format_caps_table *table, + u32 fourcc, u64 modifier); + +u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, + u64 modifier); + +u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, + u32 layer_type, u32 *n_fmts); + +void komeda_put_fourcc_list(u32 *fourcc_list); + +bool komeda_format_mod_supported(struct komeda_format_caps_table *table, + u32 layer_type, u32 fourcc, u64 modifier, + u32 rot); + +#endif diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c new file mode 100644 index 000000000..170f9dc8e --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include <drm/drm_device.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> + +#include "komeda_framebuffer.h" +#include "komeda_dev.h" + +static void komeda_fb_destroy(struct drm_framebuffer *fb) +{ + struct komeda_fb *kfb = to_kfb(fb); + u32 i; + + for (i = 0; i < fb->format->num_planes; i++) + drm_gem_object_put(fb->obj[i]); + + drm_framebuffer_cleanup(fb); + kfree(kfb); +} + +static int komeda_fb_create_handle(struct drm_framebuffer *fb, + struct drm_file *file, u32 *handle) +{ + return drm_gem_handle_create(file, fb->obj[0], handle); +} + +static const struct drm_framebuffer_funcs komeda_fb_funcs = { + .destroy = komeda_fb_destroy, + .create_handle = komeda_fb_create_handle, +}; + +static int +komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_framebuffer *fb = &kfb->base; + const struct drm_format_info *info = fb->format; + struct drm_gem_object *obj; + u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp; + u64 min_size; + + obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); + if (!obj) { + DRM_DEBUG_KMS("Failed to lookup GEM object\n"); + return -ENOENT; + } + + switch (fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) { + case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8: + alignment_w = 32; + alignment_h = 8; + break; + case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16: + alignment_w = 16; + alignment_h = 16; + break; + default: + WARN(1, "Invalid AFBC_FORMAT_MOD_BLOCK_SIZE: %lld.\n", + fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK); + break; + } + + /* tiled header afbc */ + if (fb->modifier & AFBC_FORMAT_MOD_TILED) { + alignment_w *= AFBC_TH_LAYOUT_ALIGNMENT; + alignment_h *= AFBC_TH_LAYOUT_ALIGNMENT; + alignment_header = AFBC_TH_BODY_START_ALIGNMENT; + } else { + alignment_header = AFBC_BODY_START_ALIGNMENT; + } + + kfb->aligned_w = ALIGN(fb->width, alignment_w); + kfb->aligned_h = ALIGN(fb->height, alignment_h); + + if (fb->offsets[0] % alignment_header) { + DRM_DEBUG_KMS("afbc offset alignment check failed.\n"); + goto check_failed; + } + + n_blocks = (kfb->aligned_w * kfb->aligned_h) / AFBC_SUPERBLK_PIXELS; + kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE, + alignment_header); + + bpp = komeda_get_afbc_format_bpp(info, fb->modifier); + kfb->afbc_size = kfb->offset_payload + n_blocks * + ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8, + AFBC_SUPERBLK_ALIGNMENT); + min_size = kfb->afbc_size + fb->offsets[0]; + if (min_size > obj->size) { + DRM_DEBUG_KMS("afbc size check failed, obj_size: 0x%zx. min_size 0x%llx.\n", + obj->size, min_size); + goto check_failed; + } + + fb->obj[0] = obj; + return 0; + +check_failed: + drm_gem_object_put(obj); + return -EINVAL; +} + +static int +komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb, + struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_framebuffer *fb = &kfb->base; + const struct drm_format_info *info = fb->format; + struct drm_gem_object *obj; + u32 i, block_h; + u64 min_size; + + if (komeda_fb_check_src_coords(kfb, 0, 0, fb->width, fb->height)) + return -EINVAL; + + for (i = 0; i < info->num_planes; i++) { + obj = drm_gem_object_lookup(file, mode_cmd->handles[i]); + if (!obj) { + DRM_DEBUG_KMS("Failed to lookup GEM object\n"); + return -ENOENT; + } + fb->obj[i] = obj; + + block_h = drm_format_info_block_height(info, i); + if ((fb->pitches[i] * block_h) % mdev->chip.bus_width) { + DRM_DEBUG_KMS("Pitch[%d]: 0x%x doesn't align to 0x%x\n", + i, fb->pitches[i], mdev->chip.bus_width); + return -EINVAL; + } + + min_size = komeda_fb_get_pixel_addr(kfb, 0, fb->height, i) + - to_drm_gem_cma_obj(obj)->paddr; + if (obj->size < min_size) { + DRM_DEBUG_KMS("The fb->obj[%d] size: 0x%zx lower than the minimum requirement: 0x%llx.\n", + i, obj->size, min_size); + return -EINVAL; + } + } + + if (fb->format->num_planes == 3) { + if (fb->pitches[1] != fb->pitches[2]) { + DRM_DEBUG_KMS("The pitch[1] and [2] are not same\n"); + return -EINVAL; + } + } + + return 0; +} + +struct drm_framebuffer * +komeda_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct komeda_dev *mdev = dev->dev_private; + struct komeda_fb *kfb; + int ret = 0, i; + + kfb = kzalloc(sizeof(*kfb), GFP_KERNEL); + if (!kfb) + return ERR_PTR(-ENOMEM); + + kfb->format_caps = komeda_get_format_caps(&mdev->fmt_tbl, + mode_cmd->pixel_format, + mode_cmd->modifier[0]); + if (!kfb->format_caps) { + DRM_DEBUG_KMS("FMT %x is not supported.\n", + mode_cmd->pixel_format); + kfree(kfb); + return ERR_PTR(-EINVAL); + } + + drm_helper_mode_fill_fb_struct(dev, &kfb->base, mode_cmd); + + if (kfb->base.modifier) + ret = komeda_fb_afbc_size_check(kfb, file, mode_cmd); + else + ret = komeda_fb_none_afbc_size_check(mdev, kfb, file, mode_cmd); + if (ret < 0) + goto err_cleanup; + + ret = drm_framebuffer_init(dev, &kfb->base, &komeda_fb_funcs); + if (ret < 0) { + DRM_DEBUG_KMS("failed to initialize fb\n"); + + goto err_cleanup; + } + + kfb->is_va = mdev->iommu ? true : false; + + return &kfb->base; + +err_cleanup: + for (i = 0; i < kfb->base.format->num_planes; i++) + drm_gem_object_put(kfb->base.obj[i]); + + kfree(kfb); + return ERR_PTR(ret); +} + +int komeda_fb_check_src_coords(const struct komeda_fb *kfb, + u32 src_x, u32 src_y, u32 src_w, u32 src_h) +{ + const struct drm_framebuffer *fb = &kfb->base; + const struct drm_format_info *info = fb->format; + u32 block_w = drm_format_info_block_width(fb->format, 0); + u32 block_h = drm_format_info_block_height(fb->format, 0); + + if ((src_x + src_w > fb->width) || (src_y + src_h > fb->height)) { + DRM_DEBUG_ATOMIC("Invalid source coordinate.\n"); + return -EINVAL; + } + + if ((src_x % info->hsub) || (src_w % info->hsub) || + (src_y % info->vsub) || (src_h % info->vsub)) { + DRM_DEBUG_ATOMIC("Wrong subsampling dimension x:%d, y:%d, w:%d, h:%d for format: %x.\n", + src_x, src_y, src_w, src_h, info->format); + return -EINVAL; + } + + if ((src_x % block_w) || (src_w % block_w) || + (src_y % block_h) || (src_h % block_h)) { + DRM_DEBUG_ATOMIC("x:%d, y:%d, w:%d, h:%d should be multiple of block_w/h for format: %x.\n", + src_x, src_y, src_w, src_h, info->format); + return -EINVAL; + } + + return 0; +} + +dma_addr_t +komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane) +{ + struct drm_framebuffer *fb = &kfb->base; + const struct drm_gem_cma_object *obj; + u32 offset, plane_x, plane_y, block_w, block_sz; + + if (plane >= fb->format->num_planes) { + DRM_DEBUG_KMS("Out of max plane num.\n"); + return -EINVAL; + } + + obj = drm_fb_cma_get_gem_obj(fb, plane); + + offset = fb->offsets[plane]; + if (!fb->modifier) { + block_w = drm_format_info_block_width(fb->format, plane); + block_sz = fb->format->char_per_block[plane]; + plane_x = x / (plane ? fb->format->hsub : 1); + plane_y = y / (plane ? fb->format->vsub : 1); + + offset += (plane_x / block_w) * block_sz + + plane_y * fb->pitches[plane]; + } + + return obj->paddr + offset; +} + +/* if the fb can be supported by a specific layer */ +bool komeda_fb_is_layer_supported(struct komeda_fb *kfb, u32 layer_type, + u32 rot) +{ + struct drm_framebuffer *fb = &kfb->base; + struct komeda_dev *mdev = fb->dev->dev_private; + u32 fourcc = fb->format->format; + u64 modifier = fb->modifier; + bool supported; + + supported = komeda_format_mod_supported(&mdev->fmt_tbl, layer_type, + fourcc, modifier, rot); + if (!supported) + DRM_DEBUG_ATOMIC("Layer TYPE: %d doesn't support fb FMT: %s.\n", + layer_type, komeda_get_format_name(fourcc, modifier)); + + return supported; +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h new file mode 100644 index 000000000..c61ca98a3 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#ifndef _KOMEDA_FRAMEBUFFER_H_ +#define _KOMEDA_FRAMEBUFFER_H_ + +#include <drm/drm_framebuffer.h> +#include "komeda_format_caps.h" + +/** + * struct komeda_fb - Entending drm_framebuffer with komeda attribute + */ +struct komeda_fb { + /** @base: &drm_framebuffer */ + struct drm_framebuffer base; + /** + * @format_caps: + * extends drm_format_info for komeda specific information + */ + const struct komeda_format_caps *format_caps; + /** @is_va: if smmu is enabled, it will be true */ + bool is_va; + /** @aligned_w: aligned frame buffer width */ + u32 aligned_w; + /** @aligned_h: aligned frame buffer height */ + u32 aligned_h; + /** @afbc_size: minimum size of afbc */ + u32 afbc_size; + /** @offset_payload: start of afbc body buffer */ + u32 offset_payload; +}; + +#define to_kfb(dfb) container_of(dfb, struct komeda_fb, base) + +struct drm_framebuffer * +komeda_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd); +int komeda_fb_check_src_coords(const struct komeda_fb *kfb, + u32 src_x, u32 src_y, u32 src_w, u32 src_h); +dma_addr_t +komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane); +bool komeda_fb_is_layer_supported(struct komeda_fb *kfb, u32 layer_type, + u32 rot); + +#endif diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c new file mode 100644 index 000000000..1f6682032 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -0,0 +1,337 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include <linux/component.h> +#include <linux/interrupt.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_irq.h> +#include <drm/drm_managed.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "komeda_dev.h" +#include "komeda_framebuffer.h" +#include "komeda_kms.h" + +DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops); + +static int komeda_gem_cma_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct komeda_dev *mdev = dev->dev_private; + u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + + args->pitch = ALIGN(pitch, mdev->chip.bus_width); + + return drm_gem_cma_dumb_create_internal(file, dev, args); +} + +static irqreturn_t komeda_kms_irq_handler(int irq, void *data) +{ + struct drm_device *drm = data; + struct komeda_dev *mdev = drm->dev_private; + struct komeda_kms_dev *kms = to_kdev(drm); + struct komeda_events evts; + irqreturn_t status; + u32 i; + + /* Call into the CHIP to recognize events */ + memset(&evts, 0, sizeof(evts)); + status = mdev->funcs->irq_handler(mdev, &evts); + + komeda_print_events(&evts, drm); + + /* Notify the crtc to handle the events */ + for (i = 0; i < kms->n_crtcs; i++) + komeda_crtc_handle_event(&kms->crtcs[i], &evts); + + return status; +} + +static struct drm_driver komeda_kms_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .lastclose = drm_fb_helper_lastclose, + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_cma_dumb_create), + .fops = &komeda_cma_fops, + .name = "komeda", + .desc = "Arm Komeda Display Processor driver", + .date = "20181101", + .major = 0, + .minor = 1, +}; + +static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) +{ + struct drm_device *dev = old_state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, old_state); + + drm_atomic_helper_commit_planes(dev, old_state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); + + drm_atomic_helper_commit_modeset_enables(dev, old_state); + + drm_atomic_helper_wait_for_flip_done(dev, old_state); + + drm_atomic_helper_commit_hw_done(old_state); + + drm_atomic_helper_cleanup_planes(dev, old_state); +} + +static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = { + .atomic_commit_tail = komeda_kms_commit_tail, +}; + +static int komeda_plane_state_list_add(struct drm_plane_state *plane_st, + struct list_head *zorder_list) +{ + struct komeda_plane_state *new = to_kplane_st(plane_st); + struct komeda_plane_state *node, *last; + + last = list_empty(zorder_list) ? + NULL : list_last_entry(zorder_list, typeof(*last), zlist_node); + + /* Considering the list sequence is zpos increasing, so if list is empty + * or the zpos of new node bigger than the last node in list, no need + * loop and just insert the new one to the tail of the list. + */ + if (!last || (new->base.zpos > last->base.zpos)) { + list_add_tail(&new->zlist_node, zorder_list); + return 0; + } + + /* Build the list by zpos increasing */ + list_for_each_entry(node, zorder_list, zlist_node) { + if (new->base.zpos < node->base.zpos) { + list_add_tail(&new->zlist_node, &node->zlist_node); + break; + } else if (node->base.zpos == new->base.zpos) { + struct drm_plane *a = node->base.plane; + struct drm_plane *b = new->base.plane; + + /* Komeda doesn't support setting a same zpos for + * different planes. + */ + DRM_DEBUG_ATOMIC("PLANE: %s and PLANE: %s are configured same zpos: %d.\n", + a->name, b->name, node->base.zpos); + return -EINVAL; + } + } + + return 0; +} + +static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_st) +{ + struct drm_atomic_state *state = crtc_st->state; + struct komeda_crtc *kcrtc = to_kcrtc(crtc); + struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st); + struct komeda_plane_state *kplane_st; + struct drm_plane_state *plane_st; + struct drm_plane *plane; + struct list_head zorder_list; + int order = 0, err; + + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", + crtc->base.id, crtc->name); + + INIT_LIST_HEAD(&zorder_list); + + /* This loop also added all effected planes into the new state */ + drm_for_each_plane_mask(plane, crtc->dev, crtc_st->plane_mask) { + plane_st = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_st)) + return PTR_ERR(plane_st); + + /* Build a list by zpos increasing */ + err = komeda_plane_state_list_add(plane_st, &zorder_list); + if (err) + return err; + } + + kcrtc_st->max_slave_zorder = 0; + + list_for_each_entry(kplane_st, &zorder_list, zlist_node) { + plane_st = &kplane_st->base; + plane = plane_st->plane; + + plane_st->normalized_zpos = order++; + /* When layer_split has been enabled, one plane will be handled + * by two separated komeda layers (left/right), which may needs + * two zorders. + * - zorder: for left_layer for left display part. + * - zorder + 1: will be reserved for right layer. + */ + if (to_kplane_st(plane_st)->layer_split) + order++; + + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] zpos:%d, normalized zpos: %d\n", + plane->base.id, plane->name, + plane_st->zpos, plane_st->normalized_zpos); + + /* calculate max slave zorder */ + if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) + kcrtc_st->max_slave_zorder = + max(plane_st->normalized_zpos, + kcrtc_st->max_slave_zorder); + } + + crtc_st->zpos_changed = true; + + return 0; +} + +static int komeda_kms_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_st; + int i, err; + + err = drm_atomic_helper_check_modeset(dev, state); + if (err) + return err; + + /* Komeda need to re-calculate resource assumption in every commit + * so need to add all affected_planes (even unchanged) to + * drm_atomic_state. + */ + for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) { + err = drm_atomic_add_affected_planes(state, crtc); + if (err) + return err; + + err = komeda_crtc_normalize_zpos(crtc, new_crtc_st); + if (err) + return err; + } + + err = drm_atomic_helper_check_planes(dev, state); + if (err) + return err; + + return 0; +} + +static const struct drm_mode_config_funcs komeda_mode_config_funcs = { + .fb_create = komeda_fb_create, + .atomic_check = komeda_kms_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms, + struct komeda_dev *mdev) +{ + struct drm_mode_config *config = &kms->base.mode_config; + + drm_mode_config_init(&kms->base); + + komeda_kms_setup_crtcs(kms, mdev); + + /* Get value from dev */ + config->min_width = 0; + config->min_height = 0; + config->max_width = 4096; + config->max_height = 4096; + config->allow_fb_modifiers = true; + + config->funcs = &komeda_mode_config_funcs; + config->helper_private = &komeda_mode_config_helpers; +} + +struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) +{ + struct komeda_kms_dev *kms; + struct drm_device *drm; + int err; + + kms = devm_drm_dev_alloc(mdev->dev, &komeda_kms_driver, + struct komeda_kms_dev, base); + if (IS_ERR(kms)) + return kms; + + drm = &kms->base; + + drm->dev_private = mdev; + + komeda_kms_mode_config_init(kms, mdev); + + err = komeda_kms_add_private_objs(kms, mdev); + if (err) + goto cleanup_mode_config; + + err = komeda_kms_add_planes(kms, mdev); + if (err) + goto cleanup_mode_config; + + err = drm_vblank_init(drm, kms->n_crtcs); + if (err) + goto cleanup_mode_config; + + err = komeda_kms_add_crtcs(kms, mdev); + if (err) + goto cleanup_mode_config; + + err = komeda_kms_add_wb_connectors(kms, mdev); + if (err) + goto cleanup_mode_config; + + err = component_bind_all(mdev->dev, kms); + if (err) + goto cleanup_mode_config; + + drm_mode_config_reset(drm); + + err = devm_request_irq(drm->dev, mdev->irq, + komeda_kms_irq_handler, IRQF_SHARED, + drm->driver->name, drm); + if (err) + goto free_component_binding; + + drm->irq_enabled = true; + + drm_kms_helper_poll_init(drm); + + err = drm_dev_register(drm, 0); + if (err) + goto free_interrupts; + + return kms; + +free_interrupts: + drm_kms_helper_poll_fini(drm); + drm->irq_enabled = false; +free_component_binding: + component_unbind_all(mdev->dev, drm); +cleanup_mode_config: + drm_mode_config_cleanup(drm); + komeda_kms_cleanup_private_objs(kms); + drm->dev_private = NULL; + return ERR_PTR(err); +} + +void komeda_kms_detach(struct komeda_kms_dev *kms) +{ + struct drm_device *drm = &kms->base; + struct komeda_dev *mdev = drm->dev_private; + + drm_dev_unregister(drm); + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); + drm->irq_enabled = false; + component_unbind_all(mdev->dev, drm); + drm_mode_config_cleanup(drm); + komeda_kms_cleanup_private_objs(kms); + drm->dev_private = NULL; +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h new file mode 100644 index 000000000..456f3c435 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#ifndef _KOMEDA_KMS_H_ +#define _KOMEDA_KMS_H_ + +#include <linux/list.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_device.h> +#include <drm/drm_writeback.h> +#include <drm/drm_print.h> + +/** + * struct komeda_plane - komeda instance of drm_plane + */ +struct komeda_plane { + /** @base: &drm_plane */ + struct drm_plane base; + /** + * @layer: + * + * represents available layer input pipelines for this plane. + * + * NOTE: + * the layer is not for a specific Layer, but indicate a group of + * Layers with same capabilities. + */ + struct komeda_layer *layer; +}; + +/** + * struct komeda_plane_state + * + * The plane_state can be split into two data flow (left/right) and handled + * by two layers &komeda_plane.layer and &komeda_plane.layer.right + */ +struct komeda_plane_state { + /** @base: &drm_plane_state */ + struct drm_plane_state base; + /** @zlist_node: zorder list node */ + struct list_head zlist_node; + + /** @layer_split: on/off layer_split */ + u8 layer_split : 1; +}; + +/** + * struct komeda_wb_connector + */ +struct komeda_wb_connector { + /** @base: &drm_writeback_connector */ + struct drm_writeback_connector base; + + /** @wb_layer: represents associated writeback pipeline of komeda */ + struct komeda_layer *wb_layer; +}; + +/** + * struct komeda_crtc + */ +struct komeda_crtc { + /** @base: &drm_crtc */ + struct drm_crtc base; + /** @master: only master has display output */ + struct komeda_pipeline *master; + /** + * @slave: optional + * + * Doesn't have its own display output, the handled data flow will + * merge into the master. + */ + struct komeda_pipeline *slave; + + /** @slave_planes: komeda slave planes mask */ + u32 slave_planes; + + /** @wb_conn: komeda write back connector */ + struct komeda_wb_connector *wb_conn; + + /** @disable_done: this flip_done is for tracing the disable */ + struct completion *disable_done; +}; + +/** + * struct komeda_crtc_state + */ +struct komeda_crtc_state { + /** @base: &drm_crtc_state */ + struct drm_crtc_state base; + + /* private properties */ + + /* computed state which are used by validate/check */ + /** + * @affected_pipes: + * the affected pipelines in once display instance + */ + u32 affected_pipes; + /** + * @active_pipes: + * the active pipelines in once display instance + */ + u32 active_pipes; + + /** @clock_ratio: ratio of (aclk << 32)/pxlclk */ + u64 clock_ratio; + + /** @max_slave_zorder: the maximum of slave zorder */ + u32 max_slave_zorder; +}; + +/** struct komeda_kms_dev - for gather KMS related things */ +struct komeda_kms_dev { + /** @base: &drm_device */ + struct drm_device base; + + /** @n_crtcs: valid numbers of crtcs in &komeda_kms_dev.crtcs */ + int n_crtcs; + /** @crtcs: crtcs list */ + struct komeda_crtc crtcs[KOMEDA_MAX_PIPELINES]; +}; + +#define to_kplane(p) container_of(p, struct komeda_plane, base) +#define to_kplane_st(p) container_of(p, struct komeda_plane_state, base) +#define to_kconn(p) container_of(p, struct komeda_wb_connector, base) +#define to_kcrtc(p) container_of(p, struct komeda_crtc, base) +#define to_kcrtc_st(p) container_of(p, struct komeda_crtc_state, base) +#define to_kdev(p) container_of(p, struct komeda_kms_dev, base) +#define to_wb_conn(x) container_of(x, struct drm_writeback_connector, base) + +static inline bool is_writeback_only(struct drm_crtc_state *st) +{ + struct komeda_wb_connector *wb_conn = to_kcrtc(st->crtc)->wb_conn; + struct drm_connector *conn = wb_conn ? &wb_conn->base.base : NULL; + + return conn && (st->connector_mask == BIT(drm_connector_index(conn))); +} + +static inline bool +is_only_changed_connector(struct drm_crtc_state *st, struct drm_connector *conn) +{ + struct drm_crtc_state *old_st; + u32 changed_connectors; + + old_st = drm_atomic_get_old_crtc_state(st->state, st->crtc); + changed_connectors = st->connector_mask ^ old_st->connector_mask; + + return BIT(drm_connector_index(conn)) == changed_connectors; +} + +static inline bool has_flip_h(u32 rot) +{ + u32 rotation = drm_rotation_simplify(rot, + DRM_MODE_ROTATE_0 | + DRM_MODE_ROTATE_90 | + DRM_MODE_REFLECT_MASK); + + if (rotation & DRM_MODE_ROTATE_90) + return !!(rotation & DRM_MODE_REFLECT_Y); + else + return !!(rotation & DRM_MODE_REFLECT_X); +} + +void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st, + u32 *color_depths, u32 *color_formats); +unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st); + +int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev); + +int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev); +int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev); +int komeda_kms_add_private_objs(struct komeda_kms_dev *kms, + struct komeda_dev *mdev); +int komeda_kms_add_wb_connectors(struct komeda_kms_dev *kms, + struct komeda_dev *mdev); +void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms); + +void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, + struct komeda_events *evts); + +struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev); +void komeda_kms_detach(struct komeda_kms_dev *kms); + +#endif /*_KOMEDA_KMS_H_*/ diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c new file mode 100644 index 000000000..79a633984 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include <drm/drm_print.h> + +#include "komeda_dev.h" +#include "komeda_pipeline.h" + +/** komeda_pipeline_add - Add a pipeline to &komeda_dev */ +struct komeda_pipeline * +komeda_pipeline_add(struct komeda_dev *mdev, size_t size, + const struct komeda_pipeline_funcs *funcs) +{ + struct komeda_pipeline *pipe; + + if (mdev->n_pipelines + 1 > KOMEDA_MAX_PIPELINES) { + DRM_ERROR("Exceed max support %d pipelines.\n", + KOMEDA_MAX_PIPELINES); + return ERR_PTR(-ENOSPC); + } + + if (size < sizeof(*pipe)) { + DRM_ERROR("Request pipeline size too small.\n"); + return ERR_PTR(-EINVAL); + } + + pipe = devm_kzalloc(mdev->dev, size, GFP_KERNEL); + if (!pipe) + return ERR_PTR(-ENOMEM); + + pipe->mdev = mdev; + pipe->id = mdev->n_pipelines; + pipe->funcs = funcs; + + mdev->pipelines[mdev->n_pipelines] = pipe; + mdev->n_pipelines++; + + return pipe; +} + +void komeda_pipeline_destroy(struct komeda_dev *mdev, + struct komeda_pipeline *pipe) +{ + struct komeda_component *c; + int i; + unsigned long avail_comps = pipe->avail_comps; + + for_each_set_bit(i, &avail_comps, 32) { + c = komeda_pipeline_get_component(pipe, i); + komeda_component_destroy(mdev, c); + } + + clk_put(pipe->pxlclk); + + of_node_put(pipe->of_output_links[0]); + of_node_put(pipe->of_output_links[1]); + of_node_put(pipe->of_output_port); + of_node_put(pipe->of_node); + + devm_kfree(mdev->dev, pipe); +} + +static struct komeda_component ** +komeda_pipeline_get_component_pos(struct komeda_pipeline *pipe, int id) +{ + struct komeda_dev *mdev = pipe->mdev; + struct komeda_pipeline *temp = NULL; + struct komeda_component **pos = NULL; + + switch (id) { + case KOMEDA_COMPONENT_LAYER0: + case KOMEDA_COMPONENT_LAYER1: + case KOMEDA_COMPONENT_LAYER2: + case KOMEDA_COMPONENT_LAYER3: + pos = to_cpos(pipe->layers[id - KOMEDA_COMPONENT_LAYER0]); + break; + case KOMEDA_COMPONENT_WB_LAYER: + pos = to_cpos(pipe->wb_layer); + break; + case KOMEDA_COMPONENT_COMPIZ0: + case KOMEDA_COMPONENT_COMPIZ1: + temp = mdev->pipelines[id - KOMEDA_COMPONENT_COMPIZ0]; + if (!temp) { + DRM_ERROR("compiz-%d doesn't exist.\n", id); + return NULL; + } + pos = to_cpos(temp->compiz); + break; + case KOMEDA_COMPONENT_SCALER0: + case KOMEDA_COMPONENT_SCALER1: + pos = to_cpos(pipe->scalers[id - KOMEDA_COMPONENT_SCALER0]); + break; + case KOMEDA_COMPONENT_SPLITTER: + pos = to_cpos(pipe->splitter); + break; + case KOMEDA_COMPONENT_MERGER: + pos = to_cpos(pipe->merger); + break; + case KOMEDA_COMPONENT_IPS0: + case KOMEDA_COMPONENT_IPS1: + temp = mdev->pipelines[id - KOMEDA_COMPONENT_IPS0]; + if (!temp) { + DRM_ERROR("ips-%d doesn't exist.\n", id); + return NULL; + } + pos = to_cpos(temp->improc); + break; + case KOMEDA_COMPONENT_TIMING_CTRLR: + pos = to_cpos(pipe->ctrlr); + break; + default: + pos = NULL; + DRM_ERROR("Unknown pipeline resource ID: %d.\n", id); + break; + } + + return pos; +} + +struct komeda_component * +komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id) +{ + struct komeda_component **pos = NULL; + struct komeda_component *c = NULL; + + pos = komeda_pipeline_get_component_pos(pipe, id); + if (pos) + c = *pos; + + return c; +} + +struct komeda_component * +komeda_pipeline_get_first_component(struct komeda_pipeline *pipe, + u32 comp_mask) +{ + struct komeda_component *c = NULL; + int id; + + id = find_first_bit((unsigned long *)&comp_mask, 32); + if (id < 32) + c = komeda_pipeline_get_component(pipe, id); + + return c; +} + +static struct komeda_component * +komeda_component_pickup_input(struct komeda_component *c, u32 avail_comps) +{ + u32 avail_inputs = c->supported_inputs & (avail_comps); + + return komeda_pipeline_get_first_component(c->pipeline, avail_inputs); +} + +/** komeda_component_add - Add a component to &komeda_pipeline */ +struct komeda_component * +komeda_component_add(struct komeda_pipeline *pipe, + size_t comp_sz, u32 id, u32 hw_id, + const struct komeda_component_funcs *funcs, + u8 max_active_inputs, u32 supported_inputs, + u8 max_active_outputs, u32 __iomem *reg, + const char *name_fmt, ...) +{ + struct komeda_component **pos; + struct komeda_component *c; + int idx, *num = NULL; + + if (max_active_inputs > KOMEDA_COMPONENT_N_INPUTS) { + WARN(1, "please large KOMEDA_COMPONENT_N_INPUTS to %d.\n", + max_active_inputs); + return ERR_PTR(-ENOSPC); + } + + pos = komeda_pipeline_get_component_pos(pipe, id); + if (!pos || (*pos)) + return ERR_PTR(-EINVAL); + + if (has_bit(id, KOMEDA_PIPELINE_LAYERS)) { + idx = id - KOMEDA_COMPONENT_LAYER0; + num = &pipe->n_layers; + if (idx != pipe->n_layers) { + DRM_ERROR("please add Layer by id sequence.\n"); + return ERR_PTR(-EINVAL); + } + } else if (has_bit(id, KOMEDA_PIPELINE_SCALERS)) { + idx = id - KOMEDA_COMPONENT_SCALER0; + num = &pipe->n_scalers; + if (idx != pipe->n_scalers) { + DRM_ERROR("please add Scaler by id sequence.\n"); + return ERR_PTR(-EINVAL); + } + } + + c = devm_kzalloc(pipe->mdev->dev, comp_sz, GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + c->id = id; + c->hw_id = hw_id; + c->reg = reg; + c->pipeline = pipe; + c->max_active_inputs = max_active_inputs; + c->max_active_outputs = max_active_outputs; + c->supported_inputs = supported_inputs; + c->funcs = funcs; + + if (name_fmt) { + va_list args; + + va_start(args, name_fmt); + vsnprintf(c->name, sizeof(c->name), name_fmt, args); + va_end(args); + } + + if (num) + *num = *num + 1; + + pipe->avail_comps |= BIT(c->id); + *pos = c; + + return c; +} + +void komeda_component_destroy(struct komeda_dev *mdev, + struct komeda_component *c) +{ + devm_kfree(mdev->dev, c); +} + +static void komeda_component_dump(struct komeda_component *c) +{ + if (!c) + return; + + DRM_DEBUG(" %s: ID %d-0x%08lx.\n", + c->name, c->id, BIT(c->id)); + DRM_DEBUG(" max_active_inputs:%d, supported_inputs: 0x%08x.\n", + c->max_active_inputs, c->supported_inputs); + DRM_DEBUG(" max_active_outputs:%d, supported_outputs: 0x%08x.\n", + c->max_active_outputs, c->supported_outputs); +} + +static void komeda_pipeline_dump(struct komeda_pipeline *pipe) +{ + struct komeda_component *c; + int id; + unsigned long avail_comps = pipe->avail_comps; + + DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n", + pipe->id, pipe->n_layers, pipe->n_scalers, + pipe->dual_link ? "dual-link" : "single-link"); + DRM_INFO(" output_link[0]: %s.\n", + pipe->of_output_links[0] ? + pipe->of_output_links[0]->full_name : "none"); + DRM_INFO(" output_link[1]: %s.\n", + pipe->of_output_links[1] ? + pipe->of_output_links[1]->full_name : "none"); + + for_each_set_bit(id, &avail_comps, 32) { + c = komeda_pipeline_get_component(pipe, id); + + komeda_component_dump(c); + } +} + +static void komeda_component_verify_inputs(struct komeda_component *c) +{ + struct komeda_pipeline *pipe = c->pipeline; + struct komeda_component *input; + int id; + unsigned long supported_inputs = c->supported_inputs; + + for_each_set_bit(id, &supported_inputs, 32) { + input = komeda_pipeline_get_component(pipe, id); + if (!input) { + c->supported_inputs &= ~(BIT(id)); + DRM_WARN("Can not find input(ID-%d) for component: %s.\n", + id, c->name); + continue; + } + + input->supported_outputs |= BIT(c->id); + } +} + +static struct komeda_layer * +komeda_get_layer_split_right_layer(struct komeda_pipeline *pipe, + struct komeda_layer *left) +{ + int index = left->base.id - KOMEDA_COMPONENT_LAYER0; + int i; + + for (i = index + 1; i < pipe->n_layers; i++) + if (left->layer_type == pipe->layers[i]->layer_type) + return pipe->layers[i]; + return NULL; +} + +static void komeda_pipeline_assemble(struct komeda_pipeline *pipe) +{ + struct komeda_component *c; + struct komeda_layer *layer; + int i, id; + unsigned long avail_comps = pipe->avail_comps; + + for_each_set_bit(id, &avail_comps, 32) { + c = komeda_pipeline_get_component(pipe, id); + komeda_component_verify_inputs(c); + } + /* calculate right layer for the layer split */ + for (i = 0; i < pipe->n_layers; i++) { + layer = pipe->layers[i]; + + layer->right = komeda_get_layer_split_right_layer(pipe, layer); + } + + if (pipe->dual_link && !pipe->ctrlr->supports_dual_link) { + pipe->dual_link = false; + DRM_WARN("PIPE-%d doesn't support dual-link, ignore DT dual-link configuration.\n", + pipe->id); + } +} + +/* if pipeline_A accept another pipeline_B's component as input, treat + * pipeline_B as slave of pipeline_A. + */ +struct komeda_pipeline * +komeda_pipeline_get_slave(struct komeda_pipeline *master) +{ + struct komeda_component *slave; + + slave = komeda_component_pickup_input(&master->compiz->base, + KOMEDA_PIPELINE_COMPIZS); + + return slave ? slave->pipeline : NULL; +} + +int komeda_assemble_pipelines(struct komeda_dev *mdev) +{ + struct komeda_pipeline *pipe; + int i; + + for (i = 0; i < mdev->n_pipelines; i++) { + pipe = mdev->pipelines[i]; + + komeda_pipeline_assemble(pipe); + komeda_pipeline_dump(pipe); + } + + return 0; +} + +void komeda_pipeline_dump_register(struct komeda_pipeline *pipe, + struct seq_file *sf) +{ + struct komeda_component *c; + u32 id; + unsigned long avail_comps; + + seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id); + + if (pipe->funcs && pipe->funcs->dump_register) + pipe->funcs->dump_register(pipe, sf); + + avail_comps = pipe->avail_comps; + for_each_set_bit(id, &avail_comps, 32) { + c = komeda_pipeline_get_component(pipe, id); + + seq_printf(sf, "\n------%s------\n", c->name); + if (c->funcs->dump_register) + c->funcs->dump_register(c, sf); + } +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h new file mode 100644 index 000000000..ac8725e24 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h @@ -0,0 +1,565 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#ifndef _KOMEDA_PIPELINE_H_ +#define _KOMEDA_PIPELINE_H_ + +#include <linux/types.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include "malidp_utils.h" +#include "komeda_color_mgmt.h" + +#define KOMEDA_MAX_PIPELINES 2 +#define KOMEDA_PIPELINE_MAX_LAYERS 4 +#define KOMEDA_PIPELINE_MAX_SCALERS 2 +#define KOMEDA_COMPONENT_N_INPUTS 5 + +/* pipeline component IDs */ +enum { + KOMEDA_COMPONENT_LAYER0 = 0, + KOMEDA_COMPONENT_LAYER1 = 1, + KOMEDA_COMPONENT_LAYER2 = 2, + KOMEDA_COMPONENT_LAYER3 = 3, + KOMEDA_COMPONENT_WB_LAYER = 7, /* write back layer */ + KOMEDA_COMPONENT_SCALER0 = 8, + KOMEDA_COMPONENT_SCALER1 = 9, + KOMEDA_COMPONENT_SPLITTER = 12, + KOMEDA_COMPONENT_MERGER = 14, + KOMEDA_COMPONENT_COMPIZ0 = 16, /* compositor */ + KOMEDA_COMPONENT_COMPIZ1 = 17, + KOMEDA_COMPONENT_IPS0 = 20, /* post image processor */ + KOMEDA_COMPONENT_IPS1 = 21, + KOMEDA_COMPONENT_TIMING_CTRLR = 22, /* timing controller */ +}; + +#define KOMEDA_PIPELINE_LAYERS (BIT(KOMEDA_COMPONENT_LAYER0) |\ + BIT(KOMEDA_COMPONENT_LAYER1) |\ + BIT(KOMEDA_COMPONENT_LAYER2) |\ + BIT(KOMEDA_COMPONENT_LAYER3)) + +#define KOMEDA_PIPELINE_SCALERS (BIT(KOMEDA_COMPONENT_SCALER0) |\ + BIT(KOMEDA_COMPONENT_SCALER1)) + +#define KOMEDA_PIPELINE_COMPIZS (BIT(KOMEDA_COMPONENT_COMPIZ0) |\ + BIT(KOMEDA_COMPONENT_COMPIZ1)) + +#define KOMEDA_PIPELINE_IMPROCS (BIT(KOMEDA_COMPONENT_IPS0) |\ + BIT(KOMEDA_COMPONENT_IPS1)) +struct komeda_component; +struct komeda_component_state; + +/** komeda_component_funcs - component control functions */ +struct komeda_component_funcs { + /** @validate: optional, + * component may has special requirements or limitations, this function + * supply HW the ability to do the further HW specific check. + */ + int (*validate)(struct komeda_component *c, + struct komeda_component_state *state); + /** @update: update is a active update */ + void (*update)(struct komeda_component *c, + struct komeda_component_state *state); + /** @disable: disable component */ + void (*disable)(struct komeda_component *c); + /** @dump_register: Optional, dump registers to seq_file */ + void (*dump_register)(struct komeda_component *c, struct seq_file *seq); +}; + +/** + * struct komeda_component + * + * struct komeda_component describe the data flow capabilities for how to link a + * component into the display pipeline. + * all specified components are subclass of this structure. + */ +struct komeda_component { + /** @obj: treat component as private obj */ + struct drm_private_obj obj; + /** @pipeline: the komeda pipeline this component belongs to */ + struct komeda_pipeline *pipeline; + /** @name: component name */ + char name[32]; + /** + * @reg: + * component register base, + * which is initialized by chip and used by chip only + */ + u32 __iomem *reg; + /** @id: component id */ + u32 id; + /** + * @hw_id: component hw id, + * which is initialized by chip and used by chip only + */ + u32 hw_id; + + /** + * @max_active_inputs: + * @max_active_outputs: + * + * maximum number of inputs/outputs that can be active at the same time + * Note: + * the number isn't the bit number of @supported_inputs or + * @supported_outputs, but may be less than it, since component may not + * support enabling all @supported_inputs/outputs at the same time. + */ + u8 max_active_inputs; + /** @max_active_outputs: maximum number of outputs */ + u8 max_active_outputs; + /** + * @supported_inputs: + * @supported_outputs: + * + * bitmask of BIT(component->id) for the supported inputs/outputs, + * describes the possibilities of how a component is linked into a + * pipeline. + */ + u32 supported_inputs; + /** @supported_outputs: bitmask of supported output componenet ids */ + u32 supported_outputs; + + /** + * @funcs: chip functions to access HW + */ + const struct komeda_component_funcs *funcs; +}; + +/** + * struct komeda_component_output + * + * a component has multiple outputs, if want to know where the data + * comes from, only know the component is not enough, we still need to know + * its output port + */ +struct komeda_component_output { + /** @component: indicate which component the data comes from */ + struct komeda_component *component; + /** + * @output_port: + * the output port of the &komeda_component_output.component + */ + u8 output_port; +}; + +/** + * struct komeda_component_state + * + * component_state is the data flow configuration of the component, and it's + * the superclass of all specific component_state like @komeda_layer_state, + * @komeda_scaler_state + */ +struct komeda_component_state { + /** @obj: tracking component_state by drm_atomic_state */ + struct drm_private_state obj; + /** @component: backpointer to the component */ + struct komeda_component *component; + /** + * @binding_user: + * currently bound user, the user can be @crtc, @plane or @wb_conn, + * which is valid decided by @component and @inputs + * + * - Layer: its user always is plane. + * - compiz/improc/timing_ctrlr: the user is crtc. + * - wb_layer: wb_conn; + * - scaler: plane when input is layer, wb_conn if input is compiz. + */ + union { + /** @crtc: backpointer for user crtc */ + struct drm_crtc *crtc; + /** @plane: backpointer for user plane */ + struct drm_plane *plane; + /** @wb_conn: backpointer for user wb_connector */ + struct drm_connector *wb_conn; + void *binding_user; + }; + + /** + * @active_inputs: + * + * active_inputs is bitmask of @inputs index + * + * - active_inputs = changed_active_inputs | unchanged_active_inputs + * - affected_inputs = old->active_inputs | new->active_inputs; + * - disabling_inputs = affected_inputs ^ active_inputs; + * - changed_inputs = disabling_inputs | changed_active_inputs; + * + * NOTE: + * changed_inputs doesn't include all active_input but only + * @changed_active_inputs, and this bitmask can be used in chip + * level for dirty update. + */ + u16 active_inputs; + /** @changed_active_inputs: bitmask of the changed @active_inputs */ + u16 changed_active_inputs; + /** @affected_inputs: bitmask for affected @inputs */ + u16 affected_inputs; + /** + * @inputs: + * + * the specific inputs[i] only valid on BIT(i) has been set in + * @active_inputs, if not the inputs[i] is undefined. + */ + struct komeda_component_output inputs[KOMEDA_COMPONENT_N_INPUTS]; +}; + +static inline u16 component_disabling_inputs(struct komeda_component_state *st) +{ + return st->affected_inputs ^ st->active_inputs; +} + +static inline u16 component_changed_inputs(struct komeda_component_state *st) +{ + return component_disabling_inputs(st) | st->changed_active_inputs; +} + +#define for_each_changed_input(st, i) \ + for ((i) = 0; (i) < (st)->component->max_active_inputs; (i)++) \ + if (has_bit((i), component_changed_inputs(st))) + +#define to_comp(__c) (((__c) == NULL) ? NULL : &((__c)->base)) +#define to_cpos(__c) ((struct komeda_component **)&(__c)) + +struct komeda_layer { + struct komeda_component base; + /* accepted h/v input range before rotation */ + struct malidp_range hsize_in, vsize_in; + u32 layer_type; /* RICH, SIMPLE or WB */ + u32 line_sz; + u32 yuv_line_sz; /* maximum line size for YUV422 and YUV420 */ + u32 supported_rots; + /* komeda supports layer split which splits a whole image to two parts + * left and right and handle them by two individual layer processors + * Note: left/right are always according to the final display rect, + * not the source buffer. + */ + struct komeda_layer *right; +}; + +struct komeda_layer_state { + struct komeda_component_state base; + /* layer specific configuration state */ + u16 hsize, vsize; + u32 rot; + u16 afbc_crop_l; + u16 afbc_crop_r; + u16 afbc_crop_t; + u16 afbc_crop_b; + dma_addr_t addr[3]; +}; + +struct komeda_scaler { + struct komeda_component base; + struct malidp_range hsize, vsize; + u32 max_upscaling; + u32 max_downscaling; + u8 scaling_split_overlap; /* split overlap for scaling */ + u8 enh_split_overlap; /* split overlap for image enhancement */ +}; + +struct komeda_scaler_state { + struct komeda_component_state base; + u16 hsize_in, vsize_in; + u16 hsize_out, vsize_out; + u16 total_hsize_in, total_vsize_in; + u16 total_hsize_out; /* total_xxxx are size before split */ + u16 left_crop, right_crop; + u8 en_scaling : 1, + en_alpha : 1, /* enable alpha processing */ + en_img_enhancement : 1, + en_split : 1, + right_part : 1; /* right part of split image */ +}; + +struct komeda_compiz { + struct komeda_component base; + struct malidp_range hsize, vsize; +}; + +struct komeda_compiz_input_cfg { + u16 hsize, vsize; + u16 hoffset, voffset; + u8 pixel_blend_mode, layer_alpha; +}; + +struct komeda_compiz_state { + struct komeda_component_state base; + /* composition size */ + u16 hsize, vsize; + struct komeda_compiz_input_cfg cins[KOMEDA_COMPONENT_N_INPUTS]; +}; + +struct komeda_merger { + struct komeda_component base; + struct malidp_range hsize_merged; + struct malidp_range vsize_merged; +}; + +struct komeda_merger_state { + struct komeda_component_state base; + u16 hsize_merged; + u16 vsize_merged; +}; + +struct komeda_splitter { + struct komeda_component base; + struct malidp_range hsize, vsize; +}; + +struct komeda_splitter_state { + struct komeda_component_state base; + u16 hsize, vsize; + u16 overlap; +}; + +struct komeda_improc { + struct komeda_component base; + u32 supported_color_formats; /* DRM_RGB/YUV444/YUV420*/ + u32 supported_color_depths; /* BIT(8) | BIT(10)*/ + u8 supports_degamma : 1; + u8 supports_csc : 1; + u8 supports_gamma : 1; +}; + +struct komeda_improc_state { + struct komeda_component_state base; + u8 color_format, color_depth; + u16 hsize, vsize; + u32 fgamma_coeffs[KOMEDA_N_GAMMA_COEFFS]; + u32 ctm_coeffs[KOMEDA_N_CTM_COEFFS]; +}; + +/* display timing controller */ +struct komeda_timing_ctrlr { + struct komeda_component base; + u8 supports_dual_link : 1; +}; + +struct komeda_timing_ctrlr_state { + struct komeda_component_state base; +}; + +/* Why define A separated structure but not use plane_state directly ? + * 1. Komeda supports layer_split which means a plane_state can be split and + * handled by two layers, one layer only handle half of plane image. + * 2. Fix up the user properties according to HW's capabilities, like user + * set rotation to R180, but HW only supports REFLECT_X+Y. the rot here is + * after drm_rotation_simplify() + */ +struct komeda_data_flow_cfg { + struct komeda_component_output input; + u16 in_x, in_y, in_w, in_h; + u32 out_x, out_y, out_w, out_h; + u16 total_in_h, total_in_w; + u16 total_out_w; + u16 left_crop, right_crop, overlap; + u32 rot; + int blending_zorder; + u8 pixel_blend_mode, layer_alpha; + u8 en_scaling : 1, + en_img_enhancement : 1, + en_split : 1, + is_yuv : 1, + right_part : 1; /* right part of display image if split enabled */ +}; + +struct komeda_pipeline_funcs { + /* check if the aclk (main engine clock) can satisfy the clock + * requirements of the downscaling that specified by dflow + */ + int (*downscaling_clk_check)(struct komeda_pipeline *pipe, + struct drm_display_mode *mode, + unsigned long aclk_rate, + struct komeda_data_flow_cfg *dflow); + /* dump_register: Optional, dump registers to seq_file */ + void (*dump_register)(struct komeda_pipeline *pipe, + struct seq_file *sf); +}; + +/** + * struct komeda_pipeline + * + * Represent a complete display pipeline and hold all functional components. + */ +struct komeda_pipeline { + /** @obj: link pipeline as private obj of drm_atomic_state */ + struct drm_private_obj obj; + /** @mdev: the parent komeda_dev */ + struct komeda_dev *mdev; + /** @pxlclk: pixel clock */ + struct clk *pxlclk; + /** @id: pipeline id */ + int id; + /** @avail_comps: available components mask of pipeline */ + u32 avail_comps; + /** + * @standalone_disabled_comps: + * + * When disable the pipeline, some components can not be disabled + * together with others, but need a sparated and standalone disable. + * The standalone_disabled_comps are the components which need to be + * disabled standalone, and this concept also introduce concept of + * two phase. + * phase 1: for disabling the common components. + * phase 2: for disabling the standalong_disabled_comps. + */ + u32 standalone_disabled_comps; + /** @n_layers: the number of layer on @layers */ + int n_layers; + /** @layers: the pipeline layers */ + struct komeda_layer *layers[KOMEDA_PIPELINE_MAX_LAYERS]; + /** @n_scalers: the number of scaler on @scalers */ + int n_scalers; + /** @scalers: the pipeline scalers */ + struct komeda_scaler *scalers[KOMEDA_PIPELINE_MAX_SCALERS]; + /** @compiz: compositor */ + struct komeda_compiz *compiz; + /** @splitter: for split the compiz output to two half data flows */ + struct komeda_splitter *splitter; + /** @merger: merger */ + struct komeda_merger *merger; + /** @wb_layer: writeback layer */ + struct komeda_layer *wb_layer; + /** @improc: post image processor */ + struct komeda_improc *improc; + /** @ctrlr: timing controller */ + struct komeda_timing_ctrlr *ctrlr; + /** @funcs: chip private pipeline functions */ + const struct komeda_pipeline_funcs *funcs; + + /** @of_node: pipeline dt node */ + struct device_node *of_node; + /** @of_output_port: pipeline output port */ + struct device_node *of_output_port; + /** @of_output_links: output connector device nodes */ + struct device_node *of_output_links[2]; + /** @dual_link: true if of_output_links[0] and [1] are both valid */ + bool dual_link; +}; + +/** + * struct komeda_pipeline_state + * + * NOTE: + * Unlike the pipeline, pipeline_state doesn’t gather any component_state + * into it. It because all component will be managed by drm_atomic_state. + */ +struct komeda_pipeline_state { + /** @obj: tracking pipeline_state by drm_atomic_state */ + struct drm_private_state obj; + /** @pipe: backpointer to the pipeline */ + struct komeda_pipeline *pipe; + /** @crtc: currently bound crtc */ + struct drm_crtc *crtc; + /** + * @active_comps: + * + * bitmask - BIT(component->id) of active components + */ + u32 active_comps; +}; + +#define to_layer(c) container_of(c, struct komeda_layer, base) +#define to_compiz(c) container_of(c, struct komeda_compiz, base) +#define to_scaler(c) container_of(c, struct komeda_scaler, base) +#define to_splitter(c) container_of(c, struct komeda_splitter, base) +#define to_merger(c) container_of(c, struct komeda_merger, base) +#define to_improc(c) container_of(c, struct komeda_improc, base) +#define to_ctrlr(c) container_of(c, struct komeda_timing_ctrlr, base) + +#define to_layer_st(c) container_of(c, struct komeda_layer_state, base) +#define to_compiz_st(c) container_of(c, struct komeda_compiz_state, base) +#define to_scaler_st(c) container_of(c, struct komeda_scaler_state, base) +#define to_splitter_st(c) container_of(c, struct komeda_splitter_state, base) +#define to_merger_st(c) container_of(c, struct komeda_merger_state, base) +#define to_improc_st(c) container_of(c, struct komeda_improc_state, base) +#define to_ctrlr_st(c) container_of(c, struct komeda_timing_ctrlr_state, base) + +#define priv_to_comp_st(o) container_of(o, struct komeda_component_state, obj) +#define priv_to_pipe_st(o) container_of(o, struct komeda_pipeline_state, obj) + +/* pipeline APIs */ +struct komeda_pipeline * +komeda_pipeline_add(struct komeda_dev *mdev, size_t size, + const struct komeda_pipeline_funcs *funcs); +void komeda_pipeline_destroy(struct komeda_dev *mdev, + struct komeda_pipeline *pipe); +struct komeda_pipeline * +komeda_pipeline_get_slave(struct komeda_pipeline *master); +int komeda_assemble_pipelines(struct komeda_dev *mdev); +struct komeda_component * +komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id); +struct komeda_component * +komeda_pipeline_get_first_component(struct komeda_pipeline *pipe, + u32 comp_mask); + +void komeda_pipeline_dump_register(struct komeda_pipeline *pipe, + struct seq_file *sf); + +/* component APIs */ +extern __printf(10, 11) +struct komeda_component * +komeda_component_add(struct komeda_pipeline *pipe, + size_t comp_sz, u32 id, u32 hw_id, + const struct komeda_component_funcs *funcs, + u8 max_active_inputs, u32 supported_inputs, + u8 max_active_outputs, u32 __iomem *reg, + const char *name_fmt, ...); + +void komeda_component_destroy(struct komeda_dev *mdev, + struct komeda_component *c); + +static inline struct komeda_component * +komeda_component_pickup_output(struct komeda_component *c, u32 avail_comps) +{ + u32 avail_inputs = c->supported_outputs & (avail_comps); + + return komeda_pipeline_get_first_component(c->pipeline, avail_inputs); +} + +struct komeda_plane_state; +struct komeda_crtc_state; +struct komeda_crtc; + +void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st, + u16 *hsize, u16 *vsize); + +int komeda_build_layer_data_flow(struct komeda_layer *layer, + struct komeda_plane_state *kplane_st, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow); +int komeda_build_wb_data_flow(struct komeda_layer *wb_layer, + struct drm_connector_state *conn_st, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow); +int komeda_build_display_data_flow(struct komeda_crtc *kcrtc, + struct komeda_crtc_state *kcrtc_st); + +int komeda_build_layer_split_data_flow(struct komeda_layer *left, + struct komeda_plane_state *kplane_st, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow); +int komeda_build_wb_split_data_flow(struct komeda_layer *wb_layer, + struct drm_connector_state *conn_st, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow); + +int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe, + struct komeda_crtc_state *kcrtc_st); + +struct komeda_pipeline_state * +komeda_pipeline_get_old_state(struct komeda_pipeline *pipe, + struct drm_atomic_state *state); +bool komeda_pipeline_disable(struct komeda_pipeline *pipe, + struct drm_atomic_state *old_state); +void komeda_pipeline_update(struct komeda_pipeline *pipe, + struct drm_atomic_state *old_state); + +void komeda_complete_data_flow_cfg(struct komeda_layer *layer, + struct komeda_data_flow_cfg *dflow, + struct drm_framebuffer *fb); + +#endif /* _KOMEDA_PIPELINE_H_*/ diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c new file mode 100644 index 000000000..1e922703e --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c @@ -0,0 +1,1356 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ + +#include <drm/drm_print.h> +#include <linux/clk.h> +#include "komeda_dev.h" +#include "komeda_kms.h" +#include "komeda_pipeline.h" +#include "komeda_framebuffer.h" + +static inline bool is_switching_user(void *old, void *new) +{ + if (!old || !new) + return false; + + return old != new; +} + +static struct komeda_pipeline_state * +komeda_pipeline_get_state(struct komeda_pipeline *pipe, + struct drm_atomic_state *state) +{ + struct drm_private_state *priv_st; + + priv_st = drm_atomic_get_private_obj_state(state, &pipe->obj); + if (IS_ERR(priv_st)) + return ERR_CAST(priv_st); + + return priv_to_pipe_st(priv_st); +} + +struct komeda_pipeline_state * +komeda_pipeline_get_old_state(struct komeda_pipeline *pipe, + struct drm_atomic_state *state) +{ + struct drm_private_state *priv_st; + + priv_st = drm_atomic_get_old_private_obj_state(state, &pipe->obj); + if (priv_st) + return priv_to_pipe_st(priv_st); + return NULL; +} + +static struct komeda_pipeline_state * +komeda_pipeline_get_new_state(struct komeda_pipeline *pipe, + struct drm_atomic_state *state) +{ + struct drm_private_state *priv_st; + + priv_st = drm_atomic_get_new_private_obj_state(state, &pipe->obj); + if (priv_st) + return priv_to_pipe_st(priv_st); + return NULL; +} + +/* Assign pipeline for crtc */ +static struct komeda_pipeline_state * +komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline *pipe, + struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + struct komeda_pipeline_state *st; + + st = komeda_pipeline_get_state(pipe, state); + if (IS_ERR(st)) + return st; + + if (is_switching_user(crtc, st->crtc)) { + DRM_DEBUG_ATOMIC("CRTC%d required pipeline%d is busy.\n", + drm_crtc_index(crtc), pipe->id); + return ERR_PTR(-EBUSY); + } + + /* pipeline only can be disabled when the it is free or unused */ + if (!crtc && st->active_comps) { + DRM_DEBUG_ATOMIC("Disabling a busy pipeline:%d.\n", pipe->id); + return ERR_PTR(-EBUSY); + } + + st->crtc = crtc; + + if (crtc) { + struct komeda_crtc_state *kcrtc_st; + + kcrtc_st = to_kcrtc_st(drm_atomic_get_new_crtc_state(state, + crtc)); + + kcrtc_st->active_pipes |= BIT(pipe->id); + kcrtc_st->affected_pipes |= BIT(pipe->id); + } + return st; +} + +static struct komeda_component_state * +komeda_component_get_state(struct komeda_component *c, + struct drm_atomic_state *state) +{ + struct drm_private_state *priv_st; + + WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock)); + + priv_st = drm_atomic_get_private_obj_state(state, &c->obj); + if (IS_ERR(priv_st)) + return ERR_CAST(priv_st); + + return priv_to_comp_st(priv_st); +} + +static struct komeda_component_state * +komeda_component_get_old_state(struct komeda_component *c, + struct drm_atomic_state *state) +{ + struct drm_private_state *priv_st; + + priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj); + if (priv_st) + return priv_to_comp_st(priv_st); + return NULL; +} + +/** + * komeda_component_get_state_and_set_user() + * + * @c: component to get state and set user + * @state: global atomic state + * @user: direct user, the binding user + * @crtc: the CRTC user, the big boss :) + * + * This function accepts two users: + * - The direct user: can be plane/crtc/wb_connector depends on component + * - The big boss (CRTC) + * CRTC is the big boss (the final user), because all component resources + * eventually will be assigned to CRTC, like the layer will be binding to + * kms_plane, but kms plane will be binding to a CRTC eventually. + * + * The big boss (CRTC) is for pipeline assignment, since &komeda_component isn't + * independent and can be assigned to CRTC freely, but belongs to a specific + * pipeline, only pipeline can be shared between crtc, and pipeline as a whole + * (include all the internal components) assigned to a specific CRTC. + * + * So when set a user to komeda_component, need first to check the status of + * component->pipeline to see if the pipeline is available on this specific + * CRTC. if the pipeline is busy (assigned to another CRTC), even the required + * component is free, the component still cannot be assigned to the direct user. + */ +static struct komeda_component_state * +komeda_component_get_state_and_set_user(struct komeda_component *c, + struct drm_atomic_state *state, + void *user, + struct drm_crtc *crtc) +{ + struct komeda_pipeline_state *pipe_st; + struct komeda_component_state *st; + + /* First check if the pipeline is available */ + pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline, + state, crtc); + if (IS_ERR(pipe_st)) + return ERR_CAST(pipe_st); + + st = komeda_component_get_state(c, state); + if (IS_ERR(st)) + return st; + + /* check if the component has been occupied */ + if (is_switching_user(user, st->binding_user)) { + DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name); + return ERR_PTR(-EBUSY); + } + + st->binding_user = user; + /* mark the component as active if user is valid */ + if (st->binding_user) + pipe_st->active_comps |= BIT(c->id); + + return st; +} + +static void +komeda_component_add_input(struct komeda_component_state *state, + struct komeda_component_output *input, + int idx) +{ + struct komeda_component *c = state->component; + + WARN_ON((idx < 0 || idx >= c->max_active_inputs)); + + /* since the inputs[i] is only valid when it is active. So if a input[i] + * is a newly enabled input which switches from disable to enable, then + * the old inputs[i] is undefined (NOT zeroed), we can not rely on + * memcmp, but directly mark it changed + */ + if (!has_bit(idx, state->affected_inputs) || + memcmp(&state->inputs[idx], input, sizeof(*input))) { + memcpy(&state->inputs[idx], input, sizeof(*input)); + state->changed_active_inputs |= BIT(idx); + } + state->active_inputs |= BIT(idx); + state->affected_inputs |= BIT(idx); +} + +static int +komeda_component_check_input(struct komeda_component_state *state, + struct komeda_component_output *input, + int idx) +{ + struct komeda_component *c = state->component; + + if ((idx < 0) || (idx >= c->max_active_inputs)) { + DRM_DEBUG_ATOMIC("%s required an invalid %s-input[%d].\n", + input->component->name, c->name, idx); + return -EINVAL; + } + + if (has_bit(idx, state->active_inputs)) { + DRM_DEBUG_ATOMIC("%s required %s-input[%d] has been occupied already.\n", + input->component->name, c->name, idx); + return -EINVAL; + } + + return 0; +} + +static void +komeda_component_set_output(struct komeda_component_output *output, + struct komeda_component *comp, + u8 output_port) +{ + output->component = comp; + output->output_port = output_port; +} + +static int +komeda_component_validate_private(struct komeda_component *c, + struct komeda_component_state *st) +{ + int err; + + if (!c->funcs->validate) + return 0; + + err = c->funcs->validate(c, st); + if (err) + DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name); + + return err; +} + +/* Get current available scaler from the component->supported_outputs */ +static struct komeda_scaler * +komeda_component_get_avail_scaler(struct komeda_component *c, + struct drm_atomic_state *state) +{ + struct komeda_pipeline_state *pipe_st; + u32 avail_scalers; + + pipe_st = komeda_pipeline_get_state(c->pipeline, state); + if (!pipe_st) + return NULL; + + avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^ + KOMEDA_PIPELINE_SCALERS; + + c = komeda_component_pickup_output(c, avail_scalers); + + return to_scaler(c); +} + +static void +komeda_rotate_data_flow(struct komeda_data_flow_cfg *dflow, u32 rot) +{ + if (drm_rotation_90_or_270(rot)) { + swap(dflow->in_h, dflow->in_w); + swap(dflow->total_in_h, dflow->total_in_w); + } +} + +static int +komeda_layer_check_cfg(struct komeda_layer *layer, + struct komeda_fb *kfb, + struct komeda_data_flow_cfg *dflow) +{ + u32 src_x, src_y, src_w, src_h; + u32 line_sz, max_line_sz; + + if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot)) + return -EINVAL; + + if (layer->base.id == KOMEDA_COMPONENT_WB_LAYER) { + src_x = dflow->out_x; + src_y = dflow->out_y; + src_w = dflow->out_w; + src_h = dflow->out_h; + } else { + src_x = dflow->in_x; + src_y = dflow->in_y; + src_w = dflow->in_w; + src_h = dflow->in_h; + } + + if (komeda_fb_check_src_coords(kfb, src_x, src_y, src_w, src_h)) + return -EINVAL; + + if (!in_range(&layer->hsize_in, src_w)) { + DRM_DEBUG_ATOMIC("invalidate src_w %d.\n", src_w); + return -EINVAL; + } + + if (!in_range(&layer->vsize_in, src_h)) { + DRM_DEBUG_ATOMIC("invalidate src_h %d.\n", src_h); + return -EINVAL; + } + + if (drm_rotation_90_or_270(dflow->rot)) + line_sz = dflow->in_h; + else + line_sz = dflow->in_w; + + if (kfb->base.format->hsub > 1) + max_line_sz = layer->yuv_line_sz; + else + max_line_sz = layer->line_sz; + + if (line_sz > max_line_sz) { + DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n", + line_sz, max_line_sz); + return -EINVAL; + } + + return 0; +} + +static int +komeda_layer_validate(struct komeda_layer *layer, + struct komeda_plane_state *kplane_st, + struct komeda_data_flow_cfg *dflow) +{ + struct drm_plane_state *plane_st = &kplane_st->base; + struct drm_framebuffer *fb = plane_st->fb; + struct komeda_fb *kfb = to_kfb(fb); + struct komeda_component_state *c_st; + struct komeda_layer_state *st; + int i, err; + + err = komeda_layer_check_cfg(layer, kfb, dflow); + if (err) + return err; + + c_st = komeda_component_get_state_and_set_user(&layer->base, + plane_st->state, plane_st->plane, plane_st->crtc); + if (IS_ERR(c_st)) + return PTR_ERR(c_st); + + st = to_layer_st(c_st); + + st->rot = dflow->rot; + + if (fb->modifier) { + st->hsize = kfb->aligned_w; + st->vsize = kfb->aligned_h; + st->afbc_crop_l = dflow->in_x; + st->afbc_crop_r = kfb->aligned_w - dflow->in_x - dflow->in_w; + st->afbc_crop_t = dflow->in_y; + st->afbc_crop_b = kfb->aligned_h - dflow->in_y - dflow->in_h; + } else { + st->hsize = dflow->in_w; + st->vsize = dflow->in_h; + st->afbc_crop_l = 0; + st->afbc_crop_r = 0; + st->afbc_crop_t = 0; + st->afbc_crop_b = 0; + } + + for (i = 0; i < fb->format->num_planes; i++) + st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x, + dflow->in_y, i); + + err = komeda_component_validate_private(&layer->base, c_st); + if (err) + return err; + + /* update the data flow for the next stage */ + komeda_component_set_output(&dflow->input, &layer->base, 0); + + /* + * The rotation has been handled by layer, so adjusted the data flow for + * the next stage. + */ + komeda_rotate_data_flow(dflow, st->rot); + + return 0; +} + +static int +komeda_wb_layer_validate(struct komeda_layer *wb_layer, + struct drm_connector_state *conn_st, + struct komeda_data_flow_cfg *dflow) +{ + struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb); + struct komeda_component_state *c_st; + struct komeda_layer_state *st; + int i, err; + + err = komeda_layer_check_cfg(wb_layer, kfb, dflow); + if (err) + return err; + + c_st = komeda_component_get_state_and_set_user(&wb_layer->base, + conn_st->state, conn_st->connector, conn_st->crtc); + if (IS_ERR(c_st)) + return PTR_ERR(c_st); + + st = to_layer_st(c_st); + + st->hsize = dflow->out_w; + st->vsize = dflow->out_h; + + for (i = 0; i < kfb->base.format->num_planes; i++) + st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->out_x, + dflow->out_y, i); + + komeda_component_add_input(&st->base, &dflow->input, 0); + komeda_component_set_output(&dflow->input, &wb_layer->base, 0); + + return 0; +} + +static bool scaling_ratio_valid(u32 size_in, u32 size_out, + u32 max_upscaling, u32 max_downscaling) +{ + if (size_out > size_in * max_upscaling) + return false; + else if (size_in > size_out * max_downscaling) + return false; + return true; +} + +static int +komeda_scaler_check_cfg(struct komeda_scaler *scaler, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow) +{ + u32 hsize_in, vsize_in, hsize_out, vsize_out; + u32 max_upscaling; + + hsize_in = dflow->in_w; + vsize_in = dflow->in_h; + hsize_out = dflow->out_w; + vsize_out = dflow->out_h; + + if (!in_range(&scaler->hsize, hsize_in) || + !in_range(&scaler->hsize, hsize_out)) { + DRM_DEBUG_ATOMIC("Invalid horizontal sizes"); + return -EINVAL; + } + + if (!in_range(&scaler->vsize, vsize_in) || + !in_range(&scaler->vsize, vsize_out)) { + DRM_DEBUG_ATOMIC("Invalid vertical sizes"); + return -EINVAL; + } + + /* If input comes from compiz that means the scaling is for writeback + * and scaler can not do upscaling for writeback + */ + if (has_bit(dflow->input.component->id, KOMEDA_PIPELINE_COMPIZS)) + max_upscaling = 1; + else + max_upscaling = scaler->max_upscaling; + + if (!scaling_ratio_valid(hsize_in, hsize_out, max_upscaling, + scaler->max_downscaling)) { + DRM_DEBUG_ATOMIC("Invalid horizontal scaling ratio"); + return -EINVAL; + } + + if (!scaling_ratio_valid(vsize_in, vsize_out, max_upscaling, + scaler->max_downscaling)) { + DRM_DEBUG_ATOMIC("Invalid vertical scaling ratio"); + return -EINVAL; + } + + if (hsize_in > hsize_out || vsize_in > vsize_out) { + struct komeda_pipeline *pipe = scaler->base.pipeline; + int err; + + err = pipe->funcs->downscaling_clk_check(pipe, + &kcrtc_st->base.adjusted_mode, + komeda_crtc_get_aclk(kcrtc_st), dflow); + if (err) { + DRM_DEBUG_ATOMIC("aclk can't satisfy the clock requirement of the downscaling\n"); + return err; + } + } + + return 0; +} + +static int +komeda_scaler_validate(void *user, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow) +{ + struct drm_atomic_state *drm_st = kcrtc_st->base.state; + struct komeda_component_state *c_st; + struct komeda_scaler_state *st; + struct komeda_scaler *scaler; + int err = 0; + + if (!(dflow->en_scaling || dflow->en_img_enhancement)) + return 0; + + scaler = komeda_component_get_avail_scaler(dflow->input.component, + drm_st); + if (!scaler) { + DRM_DEBUG_ATOMIC("No scaler available"); + return -EINVAL; + } + + err = komeda_scaler_check_cfg(scaler, kcrtc_st, dflow); + if (err) + return err; + + c_st = komeda_component_get_state_and_set_user(&scaler->base, + drm_st, user, kcrtc_st->base.crtc); + if (IS_ERR(c_st)) + return PTR_ERR(c_st); + + st = to_scaler_st(c_st); + + st->hsize_in = dflow->in_w; + st->vsize_in = dflow->in_h; + st->hsize_out = dflow->out_w; + st->vsize_out = dflow->out_h; + st->right_crop = dflow->right_crop; + st->left_crop = dflow->left_crop; + st->total_vsize_in = dflow->total_in_h; + st->total_hsize_in = dflow->total_in_w; + st->total_hsize_out = dflow->total_out_w; + + /* Enable alpha processing if the next stage needs the pixel alpha */ + st->en_alpha = dflow->pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE; + st->en_scaling = dflow->en_scaling; + st->en_img_enhancement = dflow->en_img_enhancement; + st->en_split = dflow->en_split; + st->right_part = dflow->right_part; + + komeda_component_add_input(&st->base, &dflow->input, 0); + komeda_component_set_output(&dflow->input, &scaler->base, 0); + return err; +} + +static void komeda_split_data_flow(struct komeda_scaler *scaler, + struct komeda_data_flow_cfg *dflow, + struct komeda_data_flow_cfg *l_dflow, + struct komeda_data_flow_cfg *r_dflow); + +static int +komeda_splitter_validate(struct komeda_splitter *splitter, + struct drm_connector_state *conn_st, + struct komeda_data_flow_cfg *dflow, + struct komeda_data_flow_cfg *l_output, + struct komeda_data_flow_cfg *r_output) +{ + struct komeda_component_state *c_st; + struct komeda_splitter_state *st; + + if (!splitter) { + DRM_DEBUG_ATOMIC("Current HW doesn't support splitter.\n"); + return -EINVAL; + } + + if (!in_range(&splitter->hsize, dflow->in_w)) { + DRM_DEBUG_ATOMIC("split in_w:%d is out of the acceptable range.\n", + dflow->in_w); + return -EINVAL; + } + + if (!in_range(&splitter->vsize, dflow->in_h)) { + DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n", + dflow->in_h); + return -EINVAL; + } + + c_st = komeda_component_get_state_and_set_user(&splitter->base, + conn_st->state, conn_st->connector, conn_st->crtc); + + if (IS_ERR(c_st)) + return PTR_ERR(c_st); + + komeda_split_data_flow(splitter->base.pipeline->scalers[0], + dflow, l_output, r_output); + + st = to_splitter_st(c_st); + st->hsize = dflow->in_w; + st->vsize = dflow->in_h; + st->overlap = dflow->overlap; + + komeda_component_add_input(&st->base, &dflow->input, 0); + komeda_component_set_output(&l_output->input, &splitter->base, 0); + komeda_component_set_output(&r_output->input, &splitter->base, 1); + + return 0; +} + +static int +komeda_merger_validate(struct komeda_merger *merger, + void *user, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *left_input, + struct komeda_data_flow_cfg *right_input, + struct komeda_data_flow_cfg *output) +{ + struct komeda_component_state *c_st; + struct komeda_merger_state *st; + int err = 0; + + if (!merger) { + DRM_DEBUG_ATOMIC("No merger is available"); + return -EINVAL; + } + + if (!in_range(&merger->hsize_merged, output->out_w)) { + DRM_DEBUG_ATOMIC("merged_w: %d is out of the accepted range.\n", + output->out_w); + return -EINVAL; + } + + if (!in_range(&merger->vsize_merged, output->out_h)) { + DRM_DEBUG_ATOMIC("merged_h: %d is out of the accepted range.\n", + output->out_h); + return -EINVAL; + } + + c_st = komeda_component_get_state_and_set_user(&merger->base, + kcrtc_st->base.state, kcrtc_st->base.crtc, kcrtc_st->base.crtc); + + if (IS_ERR(c_st)) + return PTR_ERR(c_st); + + st = to_merger_st(c_st); + st->hsize_merged = output->out_w; + st->vsize_merged = output->out_h; + + komeda_component_add_input(c_st, &left_input->input, 0); + komeda_component_add_input(c_st, &right_input->input, 1); + komeda_component_set_output(&output->input, &merger->base, 0); + + return err; +} + +void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st, + u16 *hsize, u16 *vsize) +{ + struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode; + + if (hsize) + *hsize = m->hdisplay; + if (vsize) + *vsize = m->vdisplay; +} + +static int +komeda_compiz_set_input(struct komeda_compiz *compiz, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow) +{ + struct drm_atomic_state *drm_st = kcrtc_st->base.state; + struct komeda_component_state *c_st, *old_st; + struct komeda_compiz_input_cfg *cin; + u16 compiz_w, compiz_h; + int idx = dflow->blending_zorder; + + pipeline_composition_size(kcrtc_st, &compiz_w, &compiz_h); + /* check display rect */ + if ((dflow->out_x + dflow->out_w > compiz_w) || + (dflow->out_y + dflow->out_h > compiz_h) || + dflow->out_w == 0 || dflow->out_h == 0) { + DRM_DEBUG_ATOMIC("invalid disp rect [x=%d, y=%d, w=%d, h=%d]\n", + dflow->out_x, dflow->out_y, + dflow->out_w, dflow->out_h); + return -EINVAL; + } + + c_st = komeda_component_get_state_and_set_user(&compiz->base, drm_st, + kcrtc_st->base.crtc, kcrtc_st->base.crtc); + if (IS_ERR(c_st)) + return PTR_ERR(c_st); + + if (komeda_component_check_input(c_st, &dflow->input, idx)) + return -EINVAL; + + cin = &(to_compiz_st(c_st)->cins[idx]); + + cin->hsize = dflow->out_w; + cin->vsize = dflow->out_h; + cin->hoffset = dflow->out_x; + cin->voffset = dflow->out_y; + cin->pixel_blend_mode = dflow->pixel_blend_mode; + cin->layer_alpha = dflow->layer_alpha; + + old_st = komeda_component_get_old_state(&compiz->base, drm_st); + WARN_ON(!old_st); + + /* compare with old to check if this input has been changed */ + if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin))) + c_st->changed_active_inputs |= BIT(idx); + + komeda_component_add_input(c_st, &dflow->input, idx); + komeda_component_set_output(&dflow->input, &compiz->base, 0); + + return 0; +} + +static int +komeda_compiz_validate(struct komeda_compiz *compiz, + struct komeda_crtc_state *state, + struct komeda_data_flow_cfg *dflow) +{ + struct komeda_component_state *c_st; + struct komeda_compiz_state *st; + + c_st = komeda_component_get_state_and_set_user(&compiz->base, + state->base.state, state->base.crtc, state->base.crtc); + if (IS_ERR(c_st)) + return PTR_ERR(c_st); + + st = to_compiz_st(c_st); + + pipeline_composition_size(state, &st->hsize, &st->vsize); + + komeda_component_set_output(&dflow->input, &compiz->base, 0); + + /* compiz output dflow will be fed to the next pipeline stage, prepare + * the data flow configuration for the next stage + */ + if (dflow) { + dflow->in_w = st->hsize; + dflow->in_h = st->vsize; + dflow->out_w = dflow->in_w; + dflow->out_h = dflow->in_h; + /* the output data of compiz doesn't have alpha, it only can be + * used as bottom layer when blend it with master layers + */ + dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE; + dflow->layer_alpha = 0xFF; + dflow->blending_zorder = 0; + } + + return 0; +} + +static int +komeda_improc_validate(struct komeda_improc *improc, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow) +{ + struct drm_crtc *crtc = kcrtc_st->base.crtc; + struct drm_crtc_state *crtc_st = &kcrtc_st->base; + struct komeda_component_state *c_st; + struct komeda_improc_state *st; + + c_st = komeda_component_get_state_and_set_user(&improc->base, + kcrtc_st->base.state, crtc, crtc); + if (IS_ERR(c_st)) + return PTR_ERR(c_st); + + st = to_improc_st(c_st); + + st->hsize = dflow->in_w; + st->vsize = dflow->in_h; + + if (drm_atomic_crtc_needs_modeset(crtc_st)) { + u32 output_depths, output_formats; + u32 avail_depths, avail_formats; + + komeda_crtc_get_color_config(crtc_st, &output_depths, + &output_formats); + + avail_depths = output_depths & improc->supported_color_depths; + if (avail_depths == 0) { + DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n", + output_depths, + improc->supported_color_depths); + return -EINVAL; + } + + avail_formats = output_formats & + improc->supported_color_formats; + if (!avail_formats) { + DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n", + output_formats, + improc->supported_color_formats); + return -EINVAL; + } + + st->color_depth = __fls(avail_depths); + st->color_format = BIT(__ffs(avail_formats)); + } + + if (kcrtc_st->base.color_mgmt_changed) { + drm_lut_to_fgamma_coeffs(kcrtc_st->base.gamma_lut, + st->fgamma_coeffs); + drm_ctm_to_coeffs(kcrtc_st->base.ctm, st->ctm_coeffs); + } + + komeda_component_add_input(&st->base, &dflow->input, 0); + komeda_component_set_output(&dflow->input, &improc->base, 0); + + return 0; +} + +static int +komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow) +{ + struct drm_crtc *crtc = kcrtc_st->base.crtc; + struct komeda_timing_ctrlr_state *st; + struct komeda_component_state *c_st; + + c_st = komeda_component_get_state_and_set_user(&ctrlr->base, + kcrtc_st->base.state, crtc, crtc); + if (IS_ERR(c_st)) + return PTR_ERR(c_st); + + st = to_ctrlr_st(c_st); + + komeda_component_add_input(&st->base, &dflow->input, 0); + komeda_component_set_output(&dflow->input, &ctrlr->base, 0); + + return 0; +} + +void komeda_complete_data_flow_cfg(struct komeda_layer *layer, + struct komeda_data_flow_cfg *dflow, + struct drm_framebuffer *fb) +{ + struct komeda_scaler *scaler = layer->base.pipeline->scalers[0]; + u32 w = dflow->in_w; + u32 h = dflow->in_h; + + dflow->total_in_w = dflow->in_w; + dflow->total_in_h = dflow->in_h; + dflow->total_out_w = dflow->out_w; + + /* if format doesn't have alpha, fix blend mode to PIXEL_NONE */ + if (!fb->format->has_alpha) + dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE; + + if (drm_rotation_90_or_270(dflow->rot)) + swap(w, h); + + dflow->en_scaling = (w != dflow->out_w) || (h != dflow->out_h); + dflow->is_yuv = fb->format->is_yuv; + + /* try to enable image enhancer if data flow is a 2x+ upscaling */ + dflow->en_img_enhancement = dflow->out_w >= 2 * w || + dflow->out_h >= 2 * h; + + /* try to enable split if scaling exceed the scaler's acceptable + * input/output range. + */ + if (dflow->en_scaling && scaler) + dflow->en_split = !in_range(&scaler->hsize, dflow->in_w) || + !in_range(&scaler->hsize, dflow->out_w); +} + +static bool merger_is_available(struct komeda_pipeline *pipe, + struct komeda_data_flow_cfg *dflow) +{ + u32 avail_inputs = pipe->merger ? + pipe->merger->base.supported_inputs : 0; + + return has_bit(dflow->input.component->id, avail_inputs); +} + +int komeda_build_layer_data_flow(struct komeda_layer *layer, + struct komeda_plane_state *kplane_st, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow) +{ + struct drm_plane *plane = kplane_st->base.plane; + struct komeda_pipeline *pipe = layer->base.pipeline; + int err; + + DRM_DEBUG_ATOMIC("%s handling [PLANE:%d:%s]: src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]", + layer->base.name, plane->base.id, plane->name, + dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h, + dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h); + + err = komeda_layer_validate(layer, kplane_st, dflow); + if (err) + return err; + + err = komeda_scaler_validate(plane, kcrtc_st, dflow); + if (err) + return err; + + /* if split, check if can put the data flow into merger */ + if (dflow->en_split && merger_is_available(pipe, dflow)) + return 0; + + err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow); + + return err; +} + +/* + * Split is introduced for workaround scaler's input/output size limitation. + * The idea is simple, if one scaler can not fit the requirement, use two. + * So split splits the big source image to two half parts (left/right) and do + * the scaling by two scaler separately and independently. + * But split also imports an edge problem in the middle of the image when + * scaling, to avoid it, split isn't a simple half-and-half, but add an extra + * pixels (overlap) to both side, after split the left/right will be: + * - left: [0, src_length/2 + overlap] + * - right: [src_length/2 - overlap, src_length] + * The extra overlap do eliminate the edge problem, but which may also generates + * unnecessary pixels when scaling, we need to crop them before scaler output + * the result to the next stage. and for the how to crop, it depends on the + * unneeded pixels, another words the position where overlay has been added. + * - left: crop the right + * - right: crop the left + * + * The diagram for how to do the split + * + * <---------------------left->out_w ----------------> + * |--------------------------------|---right_crop-----| <- left after split + * \ \ / + * \ \<--overlap--->/ + * |-----------------|-------------|(Middle)------|-----------------| <- src + * /<---overlap--->\ \ + * / \ \ + * right after split->|-----left_crop---|--------------------------------| + * ^<------------------- right->out_w --------------->^ + * + * NOTE: To consistent with HW the output_w always contains the crop size. + */ + +static void komeda_split_data_flow(struct komeda_scaler *scaler, + struct komeda_data_flow_cfg *dflow, + struct komeda_data_flow_cfg *l_dflow, + struct komeda_data_flow_cfg *r_dflow) +{ + bool r90 = drm_rotation_90_or_270(dflow->rot); + bool flip_h = has_flip_h(dflow->rot); + u32 l_out, r_out, overlap; + + memcpy(l_dflow, dflow, sizeof(*dflow)); + memcpy(r_dflow, dflow, sizeof(*dflow)); + + l_dflow->right_part = false; + r_dflow->right_part = true; + r_dflow->blending_zorder = dflow->blending_zorder + 1; + + overlap = 0; + if (dflow->en_scaling && scaler) + overlap += scaler->scaling_split_overlap; + + /* original dflow may fed into splitter, and which doesn't need + * enhancement overlap + */ + dflow->overlap = overlap; + + if (dflow->en_img_enhancement && scaler) + overlap += scaler->enh_split_overlap; + + l_dflow->overlap = overlap; + r_dflow->overlap = overlap; + + /* split the origin content */ + /* left/right here always means the left/right part of display image, + * not the source Image + */ + /* DRM rotation is anti-clockwise */ + if (r90) { + if (dflow->en_scaling) { + l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap; + r_dflow->in_h = l_dflow->in_h; + } else if (dflow->en_img_enhancement) { + /* enhancer only */ + l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap; + r_dflow->in_h = dflow->in_h / 2 + r_dflow->overlap; + } else { + /* split without scaler, no overlap */ + l_dflow->in_h = ALIGN(((dflow->in_h + 1) >> 1), 2); + r_dflow->in_h = dflow->in_h - l_dflow->in_h; + } + + /* Consider YUV format, after split, the split source w/h + * may not aligned to 2. we have two choices for such case. + * 1. scaler is enabled (overlap != 0), we can do a alignment + * both left/right and crop the extra data by scaler. + * 2. scaler is not enabled, only align the split left + * src/disp, and the rest part assign to right + */ + if ((overlap != 0) && dflow->is_yuv) { + l_dflow->in_h = ALIGN(l_dflow->in_h, 2); + r_dflow->in_h = ALIGN(r_dflow->in_h, 2); + } + + if (flip_h) + l_dflow->in_y = dflow->in_y + dflow->in_h - l_dflow->in_h; + else + r_dflow->in_y = dflow->in_y + dflow->in_h - r_dflow->in_h; + } else { + if (dflow->en_scaling) { + l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap; + r_dflow->in_w = l_dflow->in_w; + } else if (dflow->en_img_enhancement) { + l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap; + r_dflow->in_w = dflow->in_w / 2 + r_dflow->overlap; + } else { + l_dflow->in_w = ALIGN(((dflow->in_w + 1) >> 1), 2); + r_dflow->in_w = dflow->in_w - l_dflow->in_w; + } + + /* do YUV alignment when scaler enabled */ + if ((overlap != 0) && dflow->is_yuv) { + l_dflow->in_w = ALIGN(l_dflow->in_w, 2); + r_dflow->in_w = ALIGN(r_dflow->in_w, 2); + } + + /* on flip_h, the left display content from the right-source */ + if (flip_h) + l_dflow->in_x = dflow->in_w + dflow->in_x - l_dflow->in_w; + else + r_dflow->in_x = dflow->in_w + dflow->in_x - r_dflow->in_w; + } + + /* split the disp_rect */ + if (dflow->en_scaling || dflow->en_img_enhancement) + l_dflow->out_w = ((dflow->out_w + 1) >> 1); + else + l_dflow->out_w = ALIGN(((dflow->out_w + 1) >> 1), 2); + + r_dflow->out_w = dflow->out_w - l_dflow->out_w; + + l_dflow->out_x = dflow->out_x; + r_dflow->out_x = l_dflow->out_w + l_dflow->out_x; + + /* calculate the scaling crop */ + /* left scaler output more data and do crop */ + if (r90) { + l_out = (dflow->out_w * l_dflow->in_h) / dflow->in_h; + r_out = (dflow->out_w * r_dflow->in_h) / dflow->in_h; + } else { + l_out = (dflow->out_w * l_dflow->in_w) / dflow->in_w; + r_out = (dflow->out_w * r_dflow->in_w) / dflow->in_w; + } + + l_dflow->left_crop = 0; + l_dflow->right_crop = l_out - l_dflow->out_w; + r_dflow->left_crop = r_out - r_dflow->out_w; + r_dflow->right_crop = 0; + + /* out_w includes the crop length */ + l_dflow->out_w += l_dflow->right_crop + l_dflow->left_crop; + r_dflow->out_w += r_dflow->right_crop + r_dflow->left_crop; +} + +/* For layer split, a plane state will be split to two data flows and handled + * by two separated komeda layer input pipelines. komeda supports two types of + * layer split: + * - none-scaling split: + * / layer-left -> \ + * plane_state compiz-> ... + * \ layer-right-> / + * + * - scaling split: + * / layer-left -> scaler->\ + * plane_state merger -> compiz-> ... + * \ layer-right-> scaler->/ + * + * Since merger only supports scaler as input, so for none-scaling split, two + * layer data flows will be output to compiz directly. for scaling_split, two + * data flow will be merged by merger firstly, then merger outputs one merged + * data flow to compiz. + */ +int komeda_build_layer_split_data_flow(struct komeda_layer *left, + struct komeda_plane_state *kplane_st, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow) +{ + struct drm_plane *plane = kplane_st->base.plane; + struct komeda_pipeline *pipe = left->base.pipeline; + struct komeda_layer *right = left->right; + struct komeda_data_flow_cfg l_dflow, r_dflow; + int err; + + komeda_split_data_flow(pipe->scalers[0], dflow, &l_dflow, &r_dflow); + + DRM_DEBUG_ATOMIC("Assign %s + %s to [PLANE:%d:%s]: " + "src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]", + left->base.name, right->base.name, + plane->base.id, plane->name, + dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h, + dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h); + + err = komeda_build_layer_data_flow(left, kplane_st, kcrtc_st, &l_dflow); + if (err) + return err; + + err = komeda_build_layer_data_flow(right, kplane_st, kcrtc_st, &r_dflow); + if (err) + return err; + + /* The rotation has been handled by layer, so adjusted the data flow */ + komeda_rotate_data_flow(dflow, dflow->rot); + + /* left and right dflow has been merged to compiz already, + * no need merger to merge them anymore. + */ + if (r_dflow.input.component == l_dflow.input.component) + return 0; + + /* line merger path */ + err = komeda_merger_validate(pipe->merger, plane, kcrtc_st, + &l_dflow, &r_dflow, dflow); + if (err) + return err; + + err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow); + + return err; +} + +/* writeback data path: compiz -> scaler -> wb_layer -> memory */ +int komeda_build_wb_data_flow(struct komeda_layer *wb_layer, + struct drm_connector_state *conn_st, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow) +{ + struct drm_connector *conn = conn_st->connector; + int err; + + err = komeda_scaler_validate(conn, kcrtc_st, dflow); + if (err) + return err; + + return komeda_wb_layer_validate(wb_layer, conn_st, dflow); +} + +/* writeback scaling split data path: + * /-> scaler ->\ + * compiz -> splitter merger -> wb_layer -> memory + * \-> scaler ->/ + */ +int komeda_build_wb_split_data_flow(struct komeda_layer *wb_layer, + struct drm_connector_state *conn_st, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow) +{ + struct komeda_pipeline *pipe = wb_layer->base.pipeline; + struct drm_connector *conn = conn_st->connector; + struct komeda_data_flow_cfg l_dflow, r_dflow; + int err; + + err = komeda_splitter_validate(pipe->splitter, conn_st, + dflow, &l_dflow, &r_dflow); + if (err) + return err; + err = komeda_scaler_validate(conn, kcrtc_st, &l_dflow); + if (err) + return err; + + err = komeda_scaler_validate(conn, kcrtc_st, &r_dflow); + if (err) + return err; + + err = komeda_merger_validate(pipe->merger, conn_st, kcrtc_st, + &l_dflow, &r_dflow, dflow); + if (err) + return err; + + return komeda_wb_layer_validate(wb_layer, conn_st, dflow); +} + +/* build display output data flow, the data path is: + * compiz -> improc -> timing_ctrlr + */ +int komeda_build_display_data_flow(struct komeda_crtc *kcrtc, + struct komeda_crtc_state *kcrtc_st) +{ + struct komeda_pipeline *master = kcrtc->master; + struct komeda_pipeline *slave = kcrtc->slave; + struct komeda_data_flow_cfg m_dflow; /* master data flow */ + struct komeda_data_flow_cfg s_dflow; /* slave data flow */ + int err; + + memset(&m_dflow, 0, sizeof(m_dflow)); + memset(&s_dflow, 0, sizeof(s_dflow)); + + if (slave && has_bit(slave->id, kcrtc_st->active_pipes)) { + err = komeda_compiz_validate(slave->compiz, kcrtc_st, &s_dflow); + if (err) + return err; + + /* merge the slave dflow into master pipeline */ + err = komeda_compiz_set_input(master->compiz, kcrtc_st, + &s_dflow); + if (err) + return err; + } + + err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow); + if (err) + return err; + + err = komeda_improc_validate(master->improc, kcrtc_st, &m_dflow); + if (err) + return err; + + err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow); + if (err) + return err; + + return 0; +} + +static int +komeda_pipeline_unbound_components(struct komeda_pipeline *pipe, + struct komeda_pipeline_state *new) +{ + struct drm_atomic_state *drm_st = new->obj.state; + struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state); + struct komeda_component_state *c_st; + struct komeda_component *c; + u32 id; + unsigned long disabling_comps; + + WARN_ON(!old); + + disabling_comps = (~new->active_comps) & old->active_comps; + + /* unbound all disabling component */ + for_each_set_bit(id, &disabling_comps, 32) { + c = komeda_pipeline_get_component(pipe, id); + c_st = komeda_component_get_state_and_set_user(c, + drm_st, NULL, new->crtc); + if (PTR_ERR(c_st) == -EDEADLK) + return -EDEADLK; + WARN_ON(IS_ERR(c_st)); + } + + return 0; +} + +/* release unclaimed pipeline resource */ +int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe, + struct komeda_crtc_state *kcrtc_st) +{ + struct drm_atomic_state *drm_st = kcrtc_st->base.state; + struct komeda_pipeline_state *st; + + /* ignore the pipeline which is not affected */ + if (!pipe || !has_bit(pipe->id, kcrtc_st->affected_pipes)) + return 0; + + if (has_bit(pipe->id, kcrtc_st->active_pipes)) + st = komeda_pipeline_get_new_state(pipe, drm_st); + else + st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL); + + if (WARN_ON(IS_ERR_OR_NULL(st))) + return -EINVAL; + + return komeda_pipeline_unbound_components(pipe, st); + +} + +/* Since standalong disabled components must be disabled separately and in the + * last, So a complete disable operation may needs to call pipeline_disable + * twice (two phase disabling). + * Phase 1: disable the common components, flush it. + * Phase 2: disable the standalone disabled components, flush it. + * + * RETURNS: + * true: disable is not complete, needs a phase 2 disable. + * false: disable is complete. + */ +bool komeda_pipeline_disable(struct komeda_pipeline *pipe, + struct drm_atomic_state *old_state) +{ + struct komeda_pipeline_state *old; + struct komeda_component *c; + struct komeda_component_state *c_st; + u32 id; + unsigned long disabling_comps; + + old = komeda_pipeline_get_old_state(pipe, old_state); + + disabling_comps = old->active_comps & + (~pipe->standalone_disabled_comps); + if (!disabling_comps) + disabling_comps = old->active_comps & + pipe->standalone_disabled_comps; + + DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%lx.\n", + pipe->id, old->active_comps, disabling_comps); + + for_each_set_bit(id, &disabling_comps, 32) { + c = komeda_pipeline_get_component(pipe, id); + c_st = priv_to_comp_st(c->obj.state); + + /* + * If we disabled a component then all active_inputs should be + * put in the list of changed_active_inputs, so they get + * re-enabled. + * This usually happens during a modeset when the pipeline is + * first disabled and then the actual state gets committed + * again. + */ + c_st->changed_active_inputs |= c_st->active_inputs; + + c->funcs->disable(c); + } + + /* Update the pipeline state, if there are components that are still + * active, return true for calling the phase 2 disable. + */ + old->active_comps &= ~disabling_comps; + + return old->active_comps ? true : false; +} + +void komeda_pipeline_update(struct komeda_pipeline *pipe, + struct drm_atomic_state *old_state) +{ + struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state); + struct komeda_pipeline_state *old; + struct komeda_component *c; + u32 id; + unsigned long changed_comps; + + old = komeda_pipeline_get_old_state(pipe, old_state); + + changed_comps = new->active_comps | old->active_comps; + + DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n", + pipe->id, new->active_comps, changed_comps); + + for_each_set_bit(id, &changed_comps, 32) { + c = komeda_pipeline_get_component(pipe, id); + + if (new->active_comps & BIT(c->id)) + c->funcs->update(c, priv_to_comp_st(c->obj.state)); + else + c->funcs->disable(c); + } +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c new file mode 100644 index 000000000..bc3f42e91 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> +#include "komeda_dev.h" +#include "komeda_kms.h" +#include "komeda_framebuffer.h" + +static int +komeda_plane_init_data_flow(struct drm_plane_state *st, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow) +{ + struct komeda_plane *kplane = to_kplane(st->plane); + struct drm_framebuffer *fb = st->fb; + const struct komeda_format_caps *caps = to_kfb(fb)->format_caps; + struct komeda_pipeline *pipe = kplane->layer->base.pipeline; + + memset(dflow, 0, sizeof(*dflow)); + + dflow->blending_zorder = st->normalized_zpos; + if (pipe == to_kcrtc(st->crtc)->master) + dflow->blending_zorder -= kcrtc_st->max_slave_zorder; + if (dflow->blending_zorder < 0) { + DRM_DEBUG_ATOMIC("%s zorder:%d < max_slave_zorder: %d.\n", + st->plane->name, st->normalized_zpos, + kcrtc_st->max_slave_zorder); + return -EINVAL; + } + + dflow->pixel_blend_mode = st->pixel_blend_mode; + dflow->layer_alpha = st->alpha >> 8; + + dflow->out_x = st->crtc_x; + dflow->out_y = st->crtc_y; + dflow->out_w = st->crtc_w; + dflow->out_h = st->crtc_h; + + dflow->in_x = st->src_x >> 16; + dflow->in_y = st->src_y >> 16; + dflow->in_w = st->src_w >> 16; + dflow->in_h = st->src_h >> 16; + + dflow->rot = drm_rotation_simplify(st->rotation, caps->supported_rots); + if (!has_bits(dflow->rot, caps->supported_rots)) { + DRM_DEBUG_ATOMIC("rotation(0x%x) isn't supported by %s.\n", + dflow->rot, + komeda_get_format_name(caps->fourcc, + fb->modifier)); + return -EINVAL; + } + + komeda_complete_data_flow_cfg(kplane->layer, dflow, fb); + + return 0; +} + +/** + * komeda_plane_atomic_check - build input data flow + * @plane: DRM plane + * @state: the plane state object + * + * RETURNS: + * Zero for success or -errno + */ +static int +komeda_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct komeda_plane *kplane = to_kplane(plane); + struct komeda_plane_state *kplane_st = to_kplane_st(state); + struct komeda_layer *layer = kplane->layer; + struct drm_crtc_state *crtc_st; + struct komeda_crtc_state *kcrtc_st; + struct komeda_data_flow_cfg dflow; + int err; + + if (!state->crtc || !state->fb) + return 0; + + crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc); + if (IS_ERR(crtc_st) || !crtc_st->enable) { + DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n"); + return -EINVAL; + } + + /* crtc is inactive, skip the resource assignment */ + if (!crtc_st->active) + return 0; + + kcrtc_st = to_kcrtc_st(crtc_st); + + err = komeda_plane_init_data_flow(state, kcrtc_st, &dflow); + if (err) + return err; + + if (dflow.en_split) + err = komeda_build_layer_split_data_flow(layer, + kplane_st, kcrtc_st, &dflow); + else + err = komeda_build_layer_data_flow(layer, + kplane_st, kcrtc_st, &dflow); + + return err; +} + +/* plane doesn't represent a real HW, so there is no HW update for plane. + * komeda handles all the HW update in crtc->atomic_flush + */ +static void +komeda_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ +} + +static const struct drm_plane_helper_funcs komeda_plane_helper_funcs = { + .atomic_check = komeda_plane_atomic_check, + .atomic_update = komeda_plane_atomic_update, +}; + +static void komeda_plane_destroy(struct drm_plane *plane) +{ + drm_plane_cleanup(plane); + + kfree(to_kplane(plane)); +} + +static void komeda_plane_reset(struct drm_plane *plane) +{ + struct komeda_plane_state *state; + struct komeda_plane *kplane = to_kplane(plane); + + if (plane->state) + __drm_atomic_helper_plane_destroy_state(plane->state); + + kfree(plane->state); + plane->state = NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) { + state->base.rotation = DRM_MODE_ROTATE_0; + state->base.pixel_blend_mode = DRM_MODE_BLEND_PREMULTI; + state->base.alpha = DRM_BLEND_ALPHA_OPAQUE; + state->base.zpos = kplane->layer->base.id; + state->base.color_encoding = DRM_COLOR_YCBCR_BT601; + state->base.color_range = DRM_COLOR_YCBCR_LIMITED_RANGE; + plane->state = &state->base; + plane->state->plane = plane; + } +} + +static struct drm_plane_state * +komeda_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct komeda_plane_state *new; + + if (WARN_ON(!plane->state)) + return NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &new->base); + + return &new->base; +} + +static void +komeda_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + __drm_atomic_helper_plane_destroy_state(state); + kfree(to_kplane_st(state)); +} + +static bool +komeda_plane_format_mod_supported(struct drm_plane *plane, + u32 format, u64 modifier) +{ + struct komeda_dev *mdev = plane->dev->dev_private; + struct komeda_plane *kplane = to_kplane(plane); + u32 layer_type = kplane->layer->layer_type; + + return komeda_format_mod_supported(&mdev->fmt_tbl, layer_type, + format, modifier, 0); +} + +static const struct drm_plane_funcs komeda_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = komeda_plane_destroy, + .reset = komeda_plane_reset, + .atomic_duplicate_state = komeda_plane_atomic_duplicate_state, + .atomic_destroy_state = komeda_plane_atomic_destroy_state, + .format_mod_supported = komeda_plane_format_mod_supported, +}; + +/* for komeda, which is pipeline can be share between crtcs */ +static u32 get_possible_crtcs(struct komeda_kms_dev *kms, + struct komeda_pipeline *pipe) +{ + struct komeda_crtc *crtc; + u32 possible_crtcs = 0; + int i; + + for (i = 0; i < kms->n_crtcs; i++) { + crtc = &kms->crtcs[i]; + + if ((pipe == crtc->master) || (pipe == crtc->slave)) + possible_crtcs |= BIT(i); + } + + return possible_crtcs; +} + +static void +komeda_set_crtc_plane_mask(struct komeda_kms_dev *kms, + struct komeda_pipeline *pipe, + struct drm_plane *plane) +{ + struct komeda_crtc *kcrtc; + int i; + + for (i = 0; i < kms->n_crtcs; i++) { + kcrtc = &kms->crtcs[i]; + + if (pipe == kcrtc->slave) + kcrtc->slave_planes |= BIT(drm_plane_index(plane)); + } +} + +/* use Layer0 as primary */ +static u32 get_plane_type(struct komeda_kms_dev *kms, + struct komeda_component *c) +{ + bool is_primary = (c->id == KOMEDA_COMPONENT_LAYER0); + + return is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; +} + +static int komeda_plane_add(struct komeda_kms_dev *kms, + struct komeda_layer *layer) +{ + struct komeda_dev *mdev = kms->base.dev_private; + struct komeda_component *c = &layer->base; + struct komeda_plane *kplane; + struct drm_plane *plane; + u32 *formats, n_formats = 0; + int err; + + kplane = kzalloc(sizeof(*kplane), GFP_KERNEL); + if (!kplane) + return -ENOMEM; + + plane = &kplane->base; + kplane->layer = layer; + + formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl, + layer->layer_type, &n_formats); + if (!formats) { + kfree(kplane); + return -ENOMEM; + } + + err = drm_universal_plane_init(&kms->base, plane, + get_possible_crtcs(kms, c->pipeline), + &komeda_plane_funcs, + formats, n_formats, komeda_supported_modifiers, + get_plane_type(kms, c), + "%s", c->name); + + komeda_put_fourcc_list(formats); + + if (err) { + kfree(kplane); + return err; + } + + drm_plane_helper_add(plane, &komeda_plane_helper_funcs); + + err = drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, + layer->supported_rots); + if (err) + goto cleanup; + + err = drm_plane_create_alpha_property(plane); + if (err) + goto cleanup; + + err = drm_plane_create_blend_mode_property(plane, + BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE)); + if (err) + goto cleanup; + + err = drm_plane_create_color_properties(plane, + BIT(DRM_COLOR_YCBCR_BT601) | + BIT(DRM_COLOR_YCBCR_BT709) | + BIT(DRM_COLOR_YCBCR_BT2020), + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | + BIT(DRM_COLOR_YCBCR_FULL_RANGE), + DRM_COLOR_YCBCR_BT601, + DRM_COLOR_YCBCR_LIMITED_RANGE); + if (err) + goto cleanup; + + err = drm_plane_create_zpos_property(plane, layer->base.id, 0, 8); + if (err) + goto cleanup; + + komeda_set_crtc_plane_mask(kms, c->pipeline, plane); + + return 0; +cleanup: + komeda_plane_destroy(plane); + return err; +} + +int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev) +{ + struct komeda_pipeline *pipe; + int i, j, err; + + for (i = 0; i < mdev->n_pipelines; i++) { + pipe = mdev->pipelines[i]; + + for (j = 0; j < pipe->n_layers; j++) { + err = komeda_plane_add(kms, pipe->layers[j]); + if (err) + return err; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c new file mode 100644 index 000000000..914400c4a --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include "komeda_dev.h" +#include "komeda_kms.h" + +static void +komeda_component_state_reset(struct komeda_component_state *st) +{ + st->binding_user = NULL; + st->affected_inputs = st->active_inputs; + st->active_inputs = 0; + st->changed_active_inputs = 0; +} + +static struct drm_private_state * +komeda_layer_atomic_duplicate_state(struct drm_private_obj *obj) +{ + struct komeda_layer_state *st; + + st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL); + if (!st) + return NULL; + + komeda_component_state_reset(&st->base); + __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj); + + return &st->base.obj; +} + +static void +komeda_layer_atomic_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct komeda_layer_state *st = to_layer_st(priv_to_comp_st(state)); + + kfree(st); +} + +static const struct drm_private_state_funcs komeda_layer_obj_funcs = { + .atomic_duplicate_state = komeda_layer_atomic_duplicate_state, + .atomic_destroy_state = komeda_layer_atomic_destroy_state, +}; + +static int komeda_layer_obj_add(struct komeda_kms_dev *kms, + struct komeda_layer *layer) +{ + struct komeda_layer_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->base.component = &layer->base; + drm_atomic_private_obj_init(&kms->base, &layer->base.obj, &st->base.obj, + &komeda_layer_obj_funcs); + return 0; +} + +static struct drm_private_state * +komeda_scaler_atomic_duplicate_state(struct drm_private_obj *obj) +{ + struct komeda_scaler_state *st; + + st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL); + if (!st) + return NULL; + + komeda_component_state_reset(&st->base); + __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj); + + return &st->base.obj; +} + +static void +komeda_scaler_atomic_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + kfree(to_scaler_st(priv_to_comp_st(state))); +} + +static const struct drm_private_state_funcs komeda_scaler_obj_funcs = { + .atomic_duplicate_state = komeda_scaler_atomic_duplicate_state, + .atomic_destroy_state = komeda_scaler_atomic_destroy_state, +}; + +static int komeda_scaler_obj_add(struct komeda_kms_dev *kms, + struct komeda_scaler *scaler) +{ + struct komeda_scaler_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->base.component = &scaler->base; + drm_atomic_private_obj_init(&kms->base, + &scaler->base.obj, &st->base.obj, + &komeda_scaler_obj_funcs); + return 0; +} + +static struct drm_private_state * +komeda_compiz_atomic_duplicate_state(struct drm_private_obj *obj) +{ + struct komeda_compiz_state *st; + + st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL); + if (!st) + return NULL; + + komeda_component_state_reset(&st->base); + __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj); + + return &st->base.obj; +} + +static void +komeda_compiz_atomic_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + kfree(to_compiz_st(priv_to_comp_st(state))); +} + +static const struct drm_private_state_funcs komeda_compiz_obj_funcs = { + .atomic_duplicate_state = komeda_compiz_atomic_duplicate_state, + .atomic_destroy_state = komeda_compiz_atomic_destroy_state, +}; + +static int komeda_compiz_obj_add(struct komeda_kms_dev *kms, + struct komeda_compiz *compiz) +{ + struct komeda_compiz_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->base.component = &compiz->base; + drm_atomic_private_obj_init(&kms->base, &compiz->base.obj, &st->base.obj, + &komeda_compiz_obj_funcs); + + return 0; +} + +static struct drm_private_state * +komeda_splitter_atomic_duplicate_state(struct drm_private_obj *obj) +{ + struct komeda_splitter_state *st; + + st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL); + if (!st) + return NULL; + + komeda_component_state_reset(&st->base); + __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj); + + return &st->base.obj; +} + +static void +komeda_splitter_atomic_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + kfree(to_splitter_st(priv_to_comp_st(state))); +} + +static const struct drm_private_state_funcs komeda_splitter_obj_funcs = { + .atomic_duplicate_state = komeda_splitter_atomic_duplicate_state, + .atomic_destroy_state = komeda_splitter_atomic_destroy_state, +}; + +static int komeda_splitter_obj_add(struct komeda_kms_dev *kms, + struct komeda_splitter *splitter) +{ + struct komeda_splitter_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->base.component = &splitter->base; + drm_atomic_private_obj_init(&kms->base, + &splitter->base.obj, &st->base.obj, + &komeda_splitter_obj_funcs); + + return 0; +} + +static struct drm_private_state * +komeda_merger_atomic_duplicate_state(struct drm_private_obj *obj) +{ + struct komeda_merger_state *st; + + st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL); + if (!st) + return NULL; + + komeda_component_state_reset(&st->base); + __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj); + + return &st->base.obj; +} + +static void komeda_merger_atomic_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + kfree(to_merger_st(priv_to_comp_st(state))); +} + +static const struct drm_private_state_funcs komeda_merger_obj_funcs = { + .atomic_duplicate_state = komeda_merger_atomic_duplicate_state, + .atomic_destroy_state = komeda_merger_atomic_destroy_state, +}; + +static int komeda_merger_obj_add(struct komeda_kms_dev *kms, + struct komeda_merger *merger) +{ + struct komeda_merger_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->base.component = &merger->base; + drm_atomic_private_obj_init(&kms->base, + &merger->base.obj, &st->base.obj, + &komeda_merger_obj_funcs); + + return 0; +} + +static struct drm_private_state * +komeda_improc_atomic_duplicate_state(struct drm_private_obj *obj) +{ + struct komeda_improc_state *st; + + st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL); + if (!st) + return NULL; + + komeda_component_state_reset(&st->base); + __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj); + + return &st->base.obj; +} + +static void +komeda_improc_atomic_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + kfree(to_improc_st(priv_to_comp_st(state))); +} + +static const struct drm_private_state_funcs komeda_improc_obj_funcs = { + .atomic_duplicate_state = komeda_improc_atomic_duplicate_state, + .atomic_destroy_state = komeda_improc_atomic_destroy_state, +}; + +static int komeda_improc_obj_add(struct komeda_kms_dev *kms, + struct komeda_improc *improc) +{ + struct komeda_improc_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->base.component = &improc->base; + drm_atomic_private_obj_init(&kms->base, &improc->base.obj, &st->base.obj, + &komeda_improc_obj_funcs); + + return 0; +} + +static struct drm_private_state * +komeda_timing_ctrlr_atomic_duplicate_state(struct drm_private_obj *obj) +{ + struct komeda_timing_ctrlr_state *st; + + st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL); + if (!st) + return NULL; + + komeda_component_state_reset(&st->base); + __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj); + + return &st->base.obj; +} + +static void +komeda_timing_ctrlr_atomic_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + kfree(to_ctrlr_st(priv_to_comp_st(state))); +} + +static const struct drm_private_state_funcs komeda_timing_ctrlr_obj_funcs = { + .atomic_duplicate_state = komeda_timing_ctrlr_atomic_duplicate_state, + .atomic_destroy_state = komeda_timing_ctrlr_atomic_destroy_state, +}; + +static int komeda_timing_ctrlr_obj_add(struct komeda_kms_dev *kms, + struct komeda_timing_ctrlr *ctrlr) +{ + struct komeda_compiz_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->base.component = &ctrlr->base; + drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj, &st->base.obj, + &komeda_timing_ctrlr_obj_funcs); + + return 0; +} + +static struct drm_private_state * +komeda_pipeline_atomic_duplicate_state(struct drm_private_obj *obj) +{ + struct komeda_pipeline_state *st; + + st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL); + if (!st) + return NULL; + + st->active_comps = 0; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &st->obj); + + return &st->obj; +} + +static void +komeda_pipeline_atomic_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + kfree(priv_to_pipe_st(state)); +} + +static const struct drm_private_state_funcs komeda_pipeline_obj_funcs = { + .atomic_duplicate_state = komeda_pipeline_atomic_duplicate_state, + .atomic_destroy_state = komeda_pipeline_atomic_destroy_state, +}; + +static int komeda_pipeline_obj_add(struct komeda_kms_dev *kms, + struct komeda_pipeline *pipe) +{ + struct komeda_pipeline_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->pipe = pipe; + drm_atomic_private_obj_init(&kms->base, &pipe->obj, &st->obj, + &komeda_pipeline_obj_funcs); + + return 0; +} + +int komeda_kms_add_private_objs(struct komeda_kms_dev *kms, + struct komeda_dev *mdev) +{ + struct komeda_pipeline *pipe; + int i, j, err; + + for (i = 0; i < mdev->n_pipelines; i++) { + pipe = mdev->pipelines[i]; + + err = komeda_pipeline_obj_add(kms, pipe); + if (err) + return err; + + for (j = 0; j < pipe->n_layers; j++) { + err = komeda_layer_obj_add(kms, pipe->layers[j]); + if (err) + return err; + } + + if (pipe->wb_layer) { + err = komeda_layer_obj_add(kms, pipe->wb_layer); + if (err) + return err; + } + + for (j = 0; j < pipe->n_scalers; j++) { + err = komeda_scaler_obj_add(kms, pipe->scalers[j]); + if (err) + return err; + } + + err = komeda_compiz_obj_add(kms, pipe->compiz); + if (err) + return err; + + if (pipe->splitter) { + err = komeda_splitter_obj_add(kms, pipe->splitter); + if (err) + return err; + } + + if (pipe->merger) { + err = komeda_merger_obj_add(kms, pipe->merger); + if (err) + return err; + } + + err = komeda_improc_obj_add(kms, pipe->improc); + if (err) + return err; + + err = komeda_timing_ctrlr_obj_add(kms, pipe->ctrlr); + if (err) + return err; + } + + return 0; +} + +void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms) +{ + struct drm_mode_config *config = &kms->base.mode_config; + struct drm_private_obj *obj, *next; + + list_for_each_entry_safe(obj, next, &config->privobj_list, head) + drm_atomic_private_obj_fini(obj); +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c new file mode 100644 index 000000000..e465cc487 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include "komeda_dev.h" +#include "komeda_kms.h" + +static int +komeda_wb_init_data_flow(struct komeda_layer *wb_layer, + struct drm_connector_state *conn_st, + struct komeda_crtc_state *kcrtc_st, + struct komeda_data_flow_cfg *dflow) +{ + struct drm_framebuffer *fb = conn_st->writeback_job->fb; + + memset(dflow, 0, sizeof(*dflow)); + + dflow->out_w = fb->width; + dflow->out_h = fb->height; + + /* the write back data comes from the compiz */ + pipeline_composition_size(kcrtc_st, &dflow->in_w, &dflow->in_h); + dflow->input.component = &wb_layer->base.pipeline->compiz->base; + /* compiz doesn't output alpha */ + dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE; + dflow->rot = DRM_MODE_ROTATE_0; + + komeda_complete_data_flow_cfg(wb_layer, dflow, fb); + + return 0; +} + +static int +komeda_wb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_st, + struct drm_connector_state *conn_st) +{ + struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st); + struct drm_writeback_job *writeback_job = conn_st->writeback_job; + struct komeda_layer *wb_layer; + struct komeda_data_flow_cfg dflow; + int err; + + if (!writeback_job) + return 0; + + if (!crtc_st->active) { + DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n"); + return -EINVAL; + } + + wb_layer = to_kconn(to_wb_conn(conn_st->connector))->wb_layer; + + /* + * No need for a full modested when the only connector changed is the + * writeback connector. + */ + if (crtc_st->connectors_changed && + is_only_changed_connector(crtc_st, conn_st->connector)) + crtc_st->connectors_changed = false; + + err = komeda_wb_init_data_flow(wb_layer, conn_st, kcrtc_st, &dflow); + if (err) + return err; + + if (dflow.en_split) + err = komeda_build_wb_split_data_flow(wb_layer, + conn_st, kcrtc_st, &dflow); + else + err = komeda_build_wb_data_flow(wb_layer, + conn_st, kcrtc_st, &dflow); + + return err; +} + +static const struct drm_encoder_helper_funcs komeda_wb_encoder_helper_funcs = { + .atomic_check = komeda_wb_encoder_atomic_check, +}; + +static int +komeda_wb_connector_get_modes(struct drm_connector *connector) +{ + return 0; +} + +static enum drm_mode_status +komeda_wb_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct drm_device *dev = connector->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + int w = mode->hdisplay, h = mode->vdisplay; + + if ((w < mode_config->min_width) || (w > mode_config->max_width)) + return MODE_BAD_HVALUE; + + if ((h < mode_config->min_height) || (h > mode_config->max_height)) + return MODE_BAD_VVALUE; + + return MODE_OK; +} + +static const struct drm_connector_helper_funcs komeda_wb_conn_helper_funcs = { + .get_modes = komeda_wb_connector_get_modes, + .mode_valid = komeda_wb_connector_mode_valid, +}; + +static enum drm_connector_status +komeda_wb_connector_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +static int +komeda_wb_connector_fill_modes(struct drm_connector *connector, + uint32_t maxX, uint32_t maxY) +{ + return 0; +} + +static void komeda_wb_connector_destroy(struct drm_connector *connector) +{ + drm_connector_cleanup(connector); + kfree(to_kconn(to_wb_conn(connector))); +} + +static const struct drm_connector_funcs komeda_wb_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .detect = komeda_wb_connector_detect, + .fill_modes = komeda_wb_connector_fill_modes, + .destroy = komeda_wb_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int komeda_wb_connector_add(struct komeda_kms_dev *kms, + struct komeda_crtc *kcrtc) +{ + struct komeda_dev *mdev = kms->base.dev_private; + struct komeda_wb_connector *kwb_conn; + struct drm_writeback_connector *wb_conn; + struct drm_display_info *info; + u32 *formats, n_formats = 0; + int err; + + if (!kcrtc->master->wb_layer) + return 0; + + kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL); + if (!kwb_conn) + return -ENOMEM; + + kwb_conn->wb_layer = kcrtc->master->wb_layer; + + wb_conn = &kwb_conn->base; + wb_conn->encoder.possible_crtcs = BIT(drm_crtc_index(&kcrtc->base)); + + formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl, + kwb_conn->wb_layer->layer_type, + &n_formats); + + err = drm_writeback_connector_init(&kms->base, wb_conn, + &komeda_wb_connector_funcs, + &komeda_wb_encoder_helper_funcs, + formats, n_formats); + komeda_put_fourcc_list(formats); + if (err) { + kfree(kwb_conn); + return err; + } + + drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs); + + info = &kwb_conn->base.base.display_info; + info->bpc = __fls(kcrtc->master->improc->supported_color_depths); + info->color_formats = kcrtc->master->improc->supported_color_formats; + + kcrtc->wb_conn = kwb_conn; + + return 0; +} + +int komeda_kms_add_wb_connectors(struct komeda_kms_dev *kms, + struct komeda_dev *mdev) +{ + int i, err; + + for (i = 0; i < kms->n_crtcs; i++) { + err = komeda_wb_connector_add(kms, &kms->crtcs[i]); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c new file mode 100644 index 000000000..af67fefed --- /dev/null +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -0,0 +1,337 @@ +/* + * Copyright (C) 2013-2015 ARM Limited + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * Implementation of a CRTC class for the HDLCD driver. + */ + +#include <linux/clk.h> +#include <linux/of_graph.h> +#include <linux/platform_data/simplefb.h> + +#include <video/videomode.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "hdlcd_drv.h" +#include "hdlcd_regs.h" + +/* + * The HDLCD controller is a dumb RGB streamer that gets connected to + * a single HDMI transmitter or in the case of the ARM Models it gets + * emulated by the software that does the actual rendering. + * + */ + +static void hdlcd_crtc_cleanup(struct drm_crtc *crtc) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + + /* stop the controller on cleanup */ + hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); + drm_crtc_cleanup(crtc); +} + +static int hdlcd_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); + + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask | HDLCD_INTERRUPT_VSYNC); + + return 0; +} + +static void hdlcd_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); + + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask & ~HDLCD_INTERRUPT_VSYNC); +} + +static const struct drm_crtc_funcs hdlcd_crtc_funcs = { + .destroy = hdlcd_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = hdlcd_crtc_enable_vblank, + .disable_vblank = hdlcd_crtc_disable_vblank, +}; + +static struct simplefb_format supported_formats[] = SIMPLEFB_FORMATS; + +/* + * Setup the HDLCD registers for decoding the pixels out of the framebuffer + */ +static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc) +{ + unsigned int btpp; + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + const struct drm_framebuffer *fb = crtc->primary->state->fb; + uint32_t pixel_format; + struct simplefb_format *format = NULL; + int i; + + pixel_format = fb->format->format; + + for (i = 0; i < ARRAY_SIZE(supported_formats); i++) { + if (supported_formats[i].fourcc == pixel_format) + format = &supported_formats[i]; + } + + if (WARN_ON(!format)) + return 0; + + /* HDLCD uses 'bytes per pixel', zero means 1 byte */ + btpp = (format->bits_per_pixel + 7) / 8; + hdlcd_write(hdlcd, HDLCD_REG_PIXEL_FORMAT, (btpp - 1) << 3); + + /* + * The format of the HDLCD_REG_<color>_SELECT register is: + * - bits[23:16] - default value for that color component + * - bits[11:8] - number of bits to extract for each color component + * - bits[4:0] - index of the lowest bit to extract + * + * The default color value is used when bits[11:8] are zero, when the + * pixel is outside the visible frame area or when there is a + * buffer underrun. + */ + hdlcd_write(hdlcd, HDLCD_REG_RED_SELECT, format->red.offset | +#ifdef CONFIG_DRM_HDLCD_SHOW_UNDERRUN + 0x00ff0000 | /* show underruns in red */ +#endif + ((format->red.length & 0xf) << 8)); + hdlcd_write(hdlcd, HDLCD_REG_GREEN_SELECT, format->green.offset | + ((format->green.length & 0xf) << 8)); + hdlcd_write(hdlcd, HDLCD_REG_BLUE_SELECT, format->blue.offset | + ((format->blue.length & 0xf) << 8)); + + return 0; +} + +static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + struct drm_display_mode *m = &crtc->state->adjusted_mode; + struct videomode vm; + unsigned int polarities, err; + + vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay; + vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end; + vm.vsync_len = m->crtc_vsync_end - m->crtc_vsync_start; + vm.hfront_porch = m->crtc_hsync_start - m->crtc_hdisplay; + vm.hback_porch = m->crtc_htotal - m->crtc_hsync_end; + vm.hsync_len = m->crtc_hsync_end - m->crtc_hsync_start; + + polarities = HDLCD_POLARITY_DATAEN | HDLCD_POLARITY_DATA; + + if (m->flags & DRM_MODE_FLAG_PHSYNC) + polarities |= HDLCD_POLARITY_HSYNC; + if (m->flags & DRM_MODE_FLAG_PVSYNC) + polarities |= HDLCD_POLARITY_VSYNC; + + /* Allow max number of outstanding requests and largest burst size */ + hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS, + HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); + + hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1); + hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1); + hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1); + hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1); + hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); + hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1); + hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1); + hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1); + hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities); + + err = hdlcd_set_pxl_fmt(crtc); + if (err) + return; + + clk_set_rate(hdlcd->clk, m->crtc_clock * 1000); +} + +static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + + clk_prepare_enable(hdlcd->clk); + hdlcd_crtc_mode_set_nofb(crtc); + hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); + drm_crtc_vblank_on(crtc); +} + +static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + + drm_crtc_vblank_off(crtc); + hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); + clk_disable_unprepare(hdlcd->clk); +} + +static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + long rate, clk_rate = mode->clock * 1000; + + rate = clk_round_rate(hdlcd->clk, clk_rate); + /* 0.1% seems a close enough tolerance for the TDA19988 on Juno */ + if (abs(rate - clk_rate) * 1000 > clk_rate) { + /* clock required by mode not supported by hardware */ + return MODE_NOCLOCK; + } + + return MODE_OK; +} + +static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct drm_pending_vblank_event *event = crtc->state->event; + + if (event) { + crtc->state->event = NULL; + + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } +} + +static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { + .mode_valid = hdlcd_crtc_mode_valid, + .atomic_begin = hdlcd_crtc_atomic_begin, + .atomic_enable = hdlcd_crtc_atomic_enable, + .atomic_disable = hdlcd_crtc_atomic_disable, +}; + +static int hdlcd_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + u32 src_h = state->src_h >> 16; + + /* only the HDLCD_REG_FB_LINE_COUNT register has a limit */ + if (src_h >= HDLCD_MAX_YRES) { + DRM_DEBUG_KMS("Invalid source width: %d\n", src_h); + return -EINVAL; + } + + for_each_new_crtc_in_state(state->state, crtc, crtc_state, i) { + /* we cannot disable the plane while the CRTC is active */ + if (!state->fb && crtc_state->active) + return -EINVAL; + return drm_atomic_helper_check_plane_state(state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true); + } + + return 0; +} + +static void hdlcd_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_framebuffer *fb = plane->state->fb; + struct hdlcd_drm_private *hdlcd; + u32 dest_h; + dma_addr_t scanout_start; + + if (!fb) + return; + + dest_h = drm_rect_height(&plane->state->dst); + scanout_start = drm_fb_cma_get_gem_addr(fb, plane->state, 0); + + hdlcd = plane->dev->dev_private; + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, fb->pitches[0]); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1); + hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); +} + +static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { + .atomic_check = hdlcd_plane_atomic_check, + .atomic_update = hdlcd_plane_atomic_update, +}; + +static const struct drm_plane_funcs hdlcd_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct drm_plane *plane = NULL; + u32 formats[ARRAY_SIZE(supported_formats)], i; + int ret; + + plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < ARRAY_SIZE(supported_formats); i++) + formats[i] = supported_formats[i].fourcc; + + ret = drm_universal_plane_init(drm, plane, 0xff, &hdlcd_plane_funcs, + formats, ARRAY_SIZE(formats), + NULL, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ERR_PTR(ret); + + drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs); + hdlcd->plane = plane; + + return plane; +} + +int hdlcd_setup_crtc(struct drm_device *drm) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct drm_plane *primary; + int ret; + + primary = hdlcd_plane_init(drm); + if (IS_ERR(primary)) + return PTR_ERR(primary); + + ret = drm_crtc_init_with_planes(drm, &hdlcd->crtc, primary, NULL, + &hdlcd_crtc_funcs, NULL); + if (ret) + return ret; + + drm_crtc_helper_add(&hdlcd->crtc, &hdlcd_crtc_helper_funcs); + return 0; +} diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c new file mode 100644 index 000000000..faa8a5a75 --- /dev/null +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -0,0 +1,423 @@ +/* + * Copyright (C) 2013-2015 ARM Limited + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * ARM HDLCD Driver + */ + +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/console.h> +#include <linux/dma-mapping.h> +#include <linux/list.h> +#include <linux/of_graph.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_debugfs.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_irq.h> +#include <drm/drm_modeset_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "hdlcd_drv.h" +#include "hdlcd_regs.h" + +static int hdlcd_load(struct drm_device *drm, unsigned long flags) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct platform_device *pdev = to_platform_device(drm->dev); + struct resource *res; + u32 version; + int ret; + + hdlcd->clk = devm_clk_get(drm->dev, "pxlclk"); + if (IS_ERR(hdlcd->clk)) + return PTR_ERR(hdlcd->clk); + +#ifdef CONFIG_DEBUG_FS + atomic_set(&hdlcd->buffer_underrun_count, 0); + atomic_set(&hdlcd->bus_error_count, 0); + atomic_set(&hdlcd->vsync_count, 0); + atomic_set(&hdlcd->dma_end_count, 0); +#endif + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hdlcd->mmio = devm_ioremap_resource(drm->dev, res); + if (IS_ERR(hdlcd->mmio)) { + DRM_ERROR("failed to map control registers area\n"); + ret = PTR_ERR(hdlcd->mmio); + hdlcd->mmio = NULL; + return ret; + } + + version = hdlcd_read(hdlcd, HDLCD_REG_VERSION); + if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) { + DRM_ERROR("unknown product id: 0x%x\n", version); + return -EINVAL; + } + DRM_INFO("found ARM HDLCD version r%dp%d\n", + (version & HDLCD_VERSION_MAJOR_MASK) >> 8, + version & HDLCD_VERSION_MINOR_MASK); + + /* Get the optional framebuffer memory resource */ + ret = of_reserved_mem_device_init(drm->dev); + if (ret && ret != -ENODEV) + return ret; + + ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); + if (ret) + goto setup_fail; + + ret = hdlcd_setup_crtc(drm); + if (ret < 0) { + DRM_ERROR("failed to create crtc\n"); + goto setup_fail; + } + + ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); + if (ret < 0) { + DRM_ERROR("failed to install IRQ handler\n"); + goto irq_fail; + } + + return 0; + +irq_fail: + drm_crtc_cleanup(&hdlcd->crtc); +setup_fail: + of_reserved_mem_device_release(drm->dev); + + return ret; +} + +static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void hdlcd_setup_mode_config(struct drm_device *drm) +{ + drm_mode_config_init(drm); + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + drm->mode_config.max_width = HDLCD_MAX_XRES; + drm->mode_config.max_height = HDLCD_MAX_YRES; + drm->mode_config.funcs = &hdlcd_mode_config_funcs; +} + +static irqreturn_t hdlcd_irq(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct hdlcd_drm_private *hdlcd = drm->dev_private; + unsigned long irq_status; + + irq_status = hdlcd_read(hdlcd, HDLCD_REG_INT_STATUS); + +#ifdef CONFIG_DEBUG_FS + if (irq_status & HDLCD_INTERRUPT_UNDERRUN) + atomic_inc(&hdlcd->buffer_underrun_count); + + if (irq_status & HDLCD_INTERRUPT_DMA_END) + atomic_inc(&hdlcd->dma_end_count); + + if (irq_status & HDLCD_INTERRUPT_BUS_ERROR) + atomic_inc(&hdlcd->bus_error_count); + + if (irq_status & HDLCD_INTERRUPT_VSYNC) + atomic_inc(&hdlcd->vsync_count); + +#endif + if (irq_status & HDLCD_INTERRUPT_VSYNC) + drm_crtc_handle_vblank(&hdlcd->crtc); + + /* acknowledge interrupt(s) */ + hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); + + return IRQ_HANDLED; +} + +static void hdlcd_irq_preinstall(struct drm_device *drm) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + /* Ensure interrupts are disabled */ + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0); + hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, ~0); +} + +static int hdlcd_irq_postinstall(struct drm_device *drm) +{ +#ifdef CONFIG_DEBUG_FS + struct hdlcd_drm_private *hdlcd = drm->dev_private; + unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); + + /* enable debug interrupts */ + irq_mask |= HDLCD_DEBUG_INT_MASK; + + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask); +#endif + return 0; +} + +static void hdlcd_irq_uninstall(struct drm_device *drm) +{ + struct hdlcd_drm_private *hdlcd = drm->dev_private; + /* disable all the interrupts that we might have enabled */ + unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK); + +#ifdef CONFIG_DEBUG_FS + /* disable debug interrupts */ + irq_mask &= ~HDLCD_DEBUG_INT_MASK; +#endif + + /* disable vsync interrupts */ + irq_mask &= ~HDLCD_INTERRUPT_VSYNC; + + hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask); +} + +#ifdef CONFIG_DEBUG_FS +static int hdlcd_show_underrun_count(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *drm = node->minor->dev; + struct hdlcd_drm_private *hdlcd = drm->dev_private; + + seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count)); + seq_printf(m, "dma_end : %d\n", atomic_read(&hdlcd->dma_end_count)); + seq_printf(m, "bus_error: %d\n", atomic_read(&hdlcd->bus_error_count)); + seq_printf(m, "vsync : %d\n", atomic_read(&hdlcd->vsync_count)); + return 0; +} + +static int hdlcd_show_pxlclock(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *drm = node->minor->dev; + struct hdlcd_drm_private *hdlcd = drm->dev_private; + unsigned long clkrate = clk_get_rate(hdlcd->clk); + unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000; + + seq_printf(m, "hw : %lu\n", clkrate); + seq_printf(m, "mode: %lu\n", mode_clock); + return 0; +} + +static struct drm_info_list hdlcd_debugfs_list[] = { + { "interrupt_count", hdlcd_show_underrun_count, 0 }, + { "clocks", hdlcd_show_pxlclock, 0 }, +}; + +static void hdlcd_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_create_files(hdlcd_debugfs_list, + ARRAY_SIZE(hdlcd_debugfs_list), + minor->debugfs_root, minor); +} +#endif + +DEFINE_DRM_GEM_CMA_FOPS(fops); + +static struct drm_driver hdlcd_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .irq_handler = hdlcd_irq, + .irq_preinstall = hdlcd_irq_preinstall, + .irq_postinstall = hdlcd_irq_postinstall, + .irq_uninstall = hdlcd_irq_uninstall, + DRM_GEM_CMA_DRIVER_OPS, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = hdlcd_debugfs_init, +#endif + .fops = &fops, + .name = "hdlcd", + .desc = "ARM HDLCD Controller DRM", + .date = "20151021", + .major = 1, + .minor = 0, +}; + +static int hdlcd_drm_bind(struct device *dev) +{ + struct drm_device *drm; + struct hdlcd_drm_private *hdlcd; + int ret; + + hdlcd = devm_kzalloc(dev, sizeof(*hdlcd), GFP_KERNEL); + if (!hdlcd) + return -ENOMEM; + + drm = drm_dev_alloc(&hdlcd_driver, dev); + if (IS_ERR(drm)) + return PTR_ERR(drm); + + drm->dev_private = hdlcd; + dev_set_drvdata(dev, drm); + + hdlcd_setup_mode_config(drm); + ret = hdlcd_load(drm, 0); + if (ret) + goto err_free; + + /* Set the CRTC's port so that the encoder component can find it */ + hdlcd->crtc.port = of_graph_get_port_by_id(dev->of_node, 0); + + ret = component_bind_all(dev, drm); + if (ret) { + DRM_ERROR("Failed to bind all components\n"); + goto err_unload; + } + + ret = pm_runtime_set_active(dev); + if (ret) + goto err_pm_active; + + pm_runtime_enable(dev); + + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (ret < 0) { + DRM_ERROR("failed to initialise vblank\n"); + goto err_vblank; + } + + drm_mode_config_reset(drm); + drm_kms_helper_poll_init(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + goto err_register; + + drm_fbdev_generic_setup(drm, 32); + + return 0; + +err_register: + drm_kms_helper_poll_fini(drm); +err_vblank: + pm_runtime_disable(drm->dev); +err_pm_active: + drm_atomic_helper_shutdown(drm); + component_unbind_all(dev, drm); +err_unload: + of_node_put(hdlcd->crtc.port); + hdlcd->crtc.port = NULL; + drm_irq_uninstall(drm); + of_reserved_mem_device_release(drm->dev); +err_free: + drm_mode_config_cleanup(drm); + dev_set_drvdata(dev, NULL); + drm_dev_put(drm); + + return ret; +} + +static void hdlcd_drm_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct hdlcd_drm_private *hdlcd = drm->dev_private; + + drm_dev_unregister(drm); + drm_kms_helper_poll_fini(drm); + component_unbind_all(dev, drm); + of_node_put(hdlcd->crtc.port); + hdlcd->crtc.port = NULL; + pm_runtime_get_sync(dev); + drm_atomic_helper_shutdown(drm); + drm_irq_uninstall(drm); + pm_runtime_put(dev); + if (pm_runtime_enabled(dev)) + pm_runtime_disable(dev); + of_reserved_mem_device_release(dev); + drm_mode_config_cleanup(drm); + drm->dev_private = NULL; + dev_set_drvdata(dev, NULL); + drm_dev_put(drm); +} + +static const struct component_master_ops hdlcd_master_ops = { + .bind = hdlcd_drm_bind, + .unbind = hdlcd_drm_unbind, +}; + +static int compare_dev(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static int hdlcd_probe(struct platform_device *pdev) +{ + struct device_node *port; + struct component_match *match = NULL; + + /* there is only one output port inside each device, find it */ + port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0); + if (!port) + return -ENODEV; + + drm_of_component_match_add(&pdev->dev, &match, compare_dev, port); + of_node_put(port); + + return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops, + match); +} + +static int hdlcd_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &hdlcd_master_ops); + return 0; +} + +static const struct of_device_id hdlcd_of_match[] = { + { .compatible = "arm,hdlcd" }, + {}, +}; +MODULE_DEVICE_TABLE(of, hdlcd_of_match); + +static int __maybe_unused hdlcd_pm_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + return drm_mode_config_helper_suspend(drm); +} + +static int __maybe_unused hdlcd_pm_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + drm_mode_config_helper_resume(drm); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume); + +static struct platform_driver hdlcd_platform_driver = { + .probe = hdlcd_probe, + .remove = hdlcd_remove, + .driver = { + .name = "hdlcd", + .pm = &hdlcd_pm_ops, + .of_match_table = hdlcd_of_match, + }, +}; + +module_platform_driver(hdlcd_platform_driver); + +MODULE_AUTHOR("Liviu Dudau"); +MODULE_DESCRIPTION("ARM HDLCD DRM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h new file mode 100644 index 000000000..fd438d177 --- /dev/null +++ b/drivers/gpu/drm/arm/hdlcd_drv.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ARM HDLCD Controller register definition + */ + +#ifndef __HDLCD_DRV_H__ +#define __HDLCD_DRV_H__ + +struct hdlcd_drm_private { + void __iomem *mmio; + struct clk *clk; + struct drm_crtc crtc; + struct drm_plane *plane; +#ifdef CONFIG_DEBUG_FS + atomic_t buffer_underrun_count; + atomic_t bus_error_count; + atomic_t vsync_count; + atomic_t dma_end_count; +#endif +}; + +#define crtc_to_hdlcd_priv(x) container_of(x, struct hdlcd_drm_private, crtc) + +static inline void hdlcd_write(struct hdlcd_drm_private *hdlcd, + unsigned int reg, u32 value) +{ + writel(value, hdlcd->mmio + reg); +} + +static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg) +{ + return readl(hdlcd->mmio + reg); +} + +int hdlcd_setup_crtc(struct drm_device *dev); +void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd); + +#endif /* __HDLCD_DRV_H__ */ diff --git a/drivers/gpu/drm/arm/hdlcd_regs.h b/drivers/gpu/drm/arm/hdlcd_regs.h new file mode 100644 index 000000000..66799ebef --- /dev/null +++ b/drivers/gpu/drm/arm/hdlcd_regs.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2013,2014 ARM Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * ARM HDLCD Controller register definition + */ + +#ifndef __HDLCD_REGS_H__ +#define __HDLCD_REGS_H__ + +/* register offsets */ +#define HDLCD_REG_VERSION 0x0000 /* ro */ +#define HDLCD_REG_INT_RAWSTAT 0x0010 /* rw */ +#define HDLCD_REG_INT_CLEAR 0x0014 /* wo */ +#define HDLCD_REG_INT_MASK 0x0018 /* rw */ +#define HDLCD_REG_INT_STATUS 0x001c /* ro */ +#define HDLCD_REG_FB_BASE 0x0100 /* rw */ +#define HDLCD_REG_FB_LINE_LENGTH 0x0104 /* rw */ +#define HDLCD_REG_FB_LINE_COUNT 0x0108 /* rw */ +#define HDLCD_REG_FB_LINE_PITCH 0x010c /* rw */ +#define HDLCD_REG_BUS_OPTIONS 0x0110 /* rw */ +#define HDLCD_REG_V_SYNC 0x0200 /* rw */ +#define HDLCD_REG_V_BACK_PORCH 0x0204 /* rw */ +#define HDLCD_REG_V_DATA 0x0208 /* rw */ +#define HDLCD_REG_V_FRONT_PORCH 0x020c /* rw */ +#define HDLCD_REG_H_SYNC 0x0210 /* rw */ +#define HDLCD_REG_H_BACK_PORCH 0x0214 /* rw */ +#define HDLCD_REG_H_DATA 0x0218 /* rw */ +#define HDLCD_REG_H_FRONT_PORCH 0x021c /* rw */ +#define HDLCD_REG_POLARITIES 0x0220 /* rw */ +#define HDLCD_REG_COMMAND 0x0230 /* rw */ +#define HDLCD_REG_PIXEL_FORMAT 0x0240 /* rw */ +#define HDLCD_REG_RED_SELECT 0x0244 /* rw */ +#define HDLCD_REG_GREEN_SELECT 0x0248 /* rw */ +#define HDLCD_REG_BLUE_SELECT 0x024c /* rw */ + +/* version */ +#define HDLCD_PRODUCT_ID 0x1CDC0000 +#define HDLCD_PRODUCT_MASK 0xFFFF0000 +#define HDLCD_VERSION_MAJOR_MASK 0x0000FF00 +#define HDLCD_VERSION_MINOR_MASK 0x000000FF + +/* interrupts */ +#define HDLCD_INTERRUPT_DMA_END (1 << 0) +#define HDLCD_INTERRUPT_BUS_ERROR (1 << 1) +#define HDLCD_INTERRUPT_VSYNC (1 << 2) +#define HDLCD_INTERRUPT_UNDERRUN (1 << 3) +#define HDLCD_DEBUG_INT_MASK (HDLCD_INTERRUPT_DMA_END | \ + HDLCD_INTERRUPT_BUS_ERROR | \ + HDLCD_INTERRUPT_UNDERRUN) + +/* polarities */ +#define HDLCD_POLARITY_VSYNC (1 << 0) +#define HDLCD_POLARITY_HSYNC (1 << 1) +#define HDLCD_POLARITY_DATAEN (1 << 2) +#define HDLCD_POLARITY_DATA (1 << 3) +#define HDLCD_POLARITY_PIXELCLK (1 << 4) + +/* commands */ +#define HDLCD_COMMAND_DISABLE (0 << 0) +#define HDLCD_COMMAND_ENABLE (1 << 0) + +/* pixel format */ +#define HDLCD_PIXEL_FMT_LITTLE_ENDIAN (0 << 31) +#define HDLCD_PIXEL_FMT_BIG_ENDIAN (1 << 31) +#define HDLCD_BYTES_PER_PIXEL_MASK (3 << 3) + +/* bus options */ +#define HDLCD_BUS_BURST_MASK 0x01f +#define HDLCD_BUS_MAX_OUTSTAND 0xf00 +#define HDLCD_BUS_BURST_NONE (0 << 0) +#define HDLCD_BUS_BURST_1 (1 << 0) +#define HDLCD_BUS_BURST_2 (1 << 1) +#define HDLCD_BUS_BURST_4 (1 << 2) +#define HDLCD_BUS_BURST_8 (1 << 3) +#define HDLCD_BUS_BURST_16 (1 << 4) + +/* Max resolution supported is 4096x4096, 32bpp */ +#define HDLCD_MAX_XRES 4096 +#define HDLCD_MAX_YRES 4096 + +#define NR_PALETTE 256 + +#endif /* __HDLCD_REGS_H__ */ diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c new file mode 100644 index 000000000..af7290942 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -0,0 +1,560 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * ARM Mali DP500/DP550/DP650 driver (crtc operations) + */ + +#include <linux/clk.h> +#include <linux/pm_runtime.h> + +#include <video/videomode.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "malidp_drv.h" +#include "malidp_hw.h" + +static enum drm_mode_status malidp_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + + /* + * check that the hardware can drive the required clock rate, + * but skip the check if the clock is meant to be disabled (req_rate = 0) + */ + long rate, req_rate = mode->crtc_clock * 1000; + + if (req_rate) { + rate = clk_round_rate(hwdev->pxlclk, req_rate); + if (rate != req_rate) { + DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n", + req_rate); + return MODE_NOCLOCK; + } + } + + return MODE_OK; +} + +static void malidp_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + struct videomode vm; + int err = pm_runtime_get_sync(crtc->dev->dev); + + if (err < 0) { + DRM_DEBUG_DRIVER("Failed to enable runtime power management: %d\n", err); + return; + } + + drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm); + clk_prepare_enable(hwdev->pxlclk); + + /* We rely on firmware to set mclk to a sensible level. */ + clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); + + hwdev->hw->modeset(hwdev, &vm); + hwdev->hw->leave_config_mode(hwdev); + drm_crtc_vblank_on(crtc); +} + +static void malidp_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + int err; + + /* always disable planes on the CRTC that is being turned off */ + drm_atomic_helper_disable_planes_on_crtc(old_state, false); + + drm_crtc_vblank_off(crtc); + hwdev->hw->enter_config_mode(hwdev); + + clk_disable_unprepare(hwdev->pxlclk); + + err = pm_runtime_put(crtc->dev->dev); + if (err < 0) { + DRM_DEBUG_DRIVER("Failed to disable runtime power management: %d\n", err); + } +} + +static const struct gamma_curve_segment { + u16 start; + u16 end; +} segments[MALIDP_COEFFTAB_NUM_COEFFS] = { + /* sector 0 */ + { 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 }, + { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, + { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 }, + { 12, 12 }, { 13, 13 }, { 14, 14 }, { 15, 15 }, + /* sector 1 */ + { 16, 19 }, { 20, 23 }, { 24, 27 }, { 28, 31 }, + /* sector 2 */ + { 32, 39 }, { 40, 47 }, { 48, 55 }, { 56, 63 }, + /* sector 3 */ + { 64, 79 }, { 80, 95 }, { 96, 111 }, { 112, 127 }, + /* sector 4 */ + { 128, 159 }, { 160, 191 }, { 192, 223 }, { 224, 255 }, + /* sector 5 */ + { 256, 319 }, { 320, 383 }, { 384, 447 }, { 448, 511 }, + /* sector 6 */ + { 512, 639 }, { 640, 767 }, { 768, 895 }, { 896, 1023 }, + { 1024, 1151 }, { 1152, 1279 }, { 1280, 1407 }, { 1408, 1535 }, + { 1536, 1663 }, { 1664, 1791 }, { 1792, 1919 }, { 1920, 2047 }, + { 2048, 2175 }, { 2176, 2303 }, { 2304, 2431 }, { 2432, 2559 }, + { 2560, 2687 }, { 2688, 2815 }, { 2816, 2943 }, { 2944, 3071 }, + { 3072, 3199 }, { 3200, 3327 }, { 3328, 3455 }, { 3456, 3583 }, + { 3584, 3711 }, { 3712, 3839 }, { 3840, 3967 }, { 3968, 4095 }, +}; + +#define DE_COEFTAB_DATA(a, b) ((((a) & 0xfff) << 16) | (((b) & 0xfff))) + +static void malidp_generate_gamma_table(struct drm_property_blob *lut_blob, + u32 coeffs[MALIDP_COEFFTAB_NUM_COEFFS]) +{ + struct drm_color_lut *lut = (struct drm_color_lut *)lut_blob->data; + int i; + + for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) { + u32 a, b, delta_in, out_start, out_end; + + delta_in = segments[i].end - segments[i].start; + /* DP has 12-bit internal precision for its LUTs. */ + out_start = drm_color_lut_extract(lut[segments[i].start].green, + 12); + out_end = drm_color_lut_extract(lut[segments[i].end].green, 12); + a = (delta_in == 0) ? 0 : ((out_end - out_start) * 256) / delta_in; + b = out_start; + coeffs[i] = DE_COEFTAB_DATA(a, b); + } +} + +/* + * Check if there is a new gamma LUT and if it is of an acceptable size. Also, + * reject any LUTs that use distinct red, green, and blue curves. + */ +static int malidp_crtc_atomic_check_gamma(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct malidp_crtc_state *mc = to_malidp_crtc_state(state); + struct drm_color_lut *lut; + size_t lut_size; + int i; + + if (!state->color_mgmt_changed || !state->gamma_lut) + return 0; + + if (crtc->state->gamma_lut && + (crtc->state->gamma_lut->base.id == state->gamma_lut->base.id)) + return 0; + + if (state->gamma_lut->length % sizeof(struct drm_color_lut)) + return -EINVAL; + + lut_size = state->gamma_lut->length / sizeof(struct drm_color_lut); + if (lut_size != MALIDP_GAMMA_LUT_SIZE) + return -EINVAL; + + lut = (struct drm_color_lut *)state->gamma_lut->data; + for (i = 0; i < lut_size; ++i) + if (!((lut[i].red == lut[i].green) && + (lut[i].red == lut[i].blue))) + return -EINVAL; + + if (!state->mode_changed) { + int ret; + + state->mode_changed = true; + /* + * Kerneldoc for drm_atomic_helper_check_modeset mandates that + * it be invoked when the driver sets ->mode_changed. Since + * changing the gamma LUT doesn't depend on any external + * resources, it is safe to call it only once. + */ + ret = drm_atomic_helper_check_modeset(crtc->dev, state->state); + if (ret) + return ret; + } + + malidp_generate_gamma_table(state->gamma_lut, mc->gamma_coeffs); + return 0; +} + +/* + * Check if there is a new CTM and if it contains valid input. Valid here means + * that the number is inside the representable range for a Q3.12 number, + * excluding truncating the fractional part of the input data. + * + * The COLORADJ registers can be changed atomically. + */ +static int malidp_crtc_atomic_check_ctm(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct malidp_crtc_state *mc = to_malidp_crtc_state(state); + struct drm_color_ctm *ctm; + int i; + + if (!state->color_mgmt_changed) + return 0; + + if (!state->ctm) + return 0; + + if (crtc->state->ctm && (crtc->state->ctm->base.id == + state->ctm->base.id)) + return 0; + + /* + * The size of the ctm is checked in + * drm_atomic_replace_property_blob_from_id. + */ + ctm = (struct drm_color_ctm *)state->ctm->data; + for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) { + /* Convert from S31.32 to Q3.12. */ + s64 val = ctm->matrix[i]; + u32 mag = ((((u64)val) & ~BIT_ULL(63)) >> 20) & + GENMASK_ULL(14, 0); + + /* + * Convert to 2s complement and check the destination's top bit + * for overflow. NB: Can't check before converting or it'd + * incorrectly reject the case: + * sign == 1 + * mag == 0x2000 + */ + if (val & BIT_ULL(63)) + mag = ~mag + 1; + if (!!(val & BIT_ULL(63)) != !!(mag & BIT(14))) + return -EINVAL; + mc->coloradj_coeffs[i] = mag; + } + + return 0; +} + +static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + struct malidp_crtc_state *cs = to_malidp_crtc_state(state); + struct malidp_se_config *s = &cs->scaler_config; + struct drm_plane *plane; + struct videomode vm; + const struct drm_plane_state *pstate; + u32 h_upscale_factor = 0; /* U16.16 */ + u32 v_upscale_factor = 0; /* U16.16 */ + u8 scaling = cs->scaled_planes_mask; + int ret; + + if (!scaling) { + s->scale_enable = false; + goto mclk_calc; + } + + /* The scaling engine can only handle one plane at a time. */ + if (scaling & (scaling - 1)) + return -EINVAL; + + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + struct malidp_plane *mp = to_malidp_plane(plane); + u32 phase; + + if (!(mp->layer->id & scaling)) + continue; + + /* + * Convert crtc_[w|h] to U32.32, then divide by U16.16 src_[w|h] + * to get the U16.16 result. + */ + h_upscale_factor = div_u64((u64)pstate->crtc_w << 32, + pstate->src_w); + v_upscale_factor = div_u64((u64)pstate->crtc_h << 32, + pstate->src_h); + + s->enhancer_enable = ((h_upscale_factor >> 16) >= 2 || + (v_upscale_factor >> 16) >= 2); + + if (pstate->rotation & MALIDP_ROTATED_MASK) { + s->input_w = pstate->src_h >> 16; + s->input_h = pstate->src_w >> 16; + } else { + s->input_w = pstate->src_w >> 16; + s->input_h = pstate->src_h >> 16; + } + + s->output_w = pstate->crtc_w; + s->output_h = pstate->crtc_h; + +#define SE_N_PHASE 4 +#define SE_SHIFT_N_PHASE 12 + /* Calculate initial_phase and delta_phase for horizontal. */ + phase = s->input_w; + s->h_init_phase = + ((phase << SE_N_PHASE) / s->output_w + 1) / 2; + + phase = s->input_w; + phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE); + s->h_delta_phase = phase / s->output_w; + + /* Same for vertical. */ + phase = s->input_h; + s->v_init_phase = + ((phase << SE_N_PHASE) / s->output_h + 1) / 2; + + phase = s->input_h; + phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE); + s->v_delta_phase = phase / s->output_h; +#undef SE_N_PHASE +#undef SE_SHIFT_N_PHASE + s->plane_src_id = mp->layer->id; + } + + s->scale_enable = true; + s->hcoeff = malidp_se_select_coeffs(h_upscale_factor); + s->vcoeff = malidp_se_select_coeffs(v_upscale_factor); + +mclk_calc: + drm_display_mode_to_videomode(&state->adjusted_mode, &vm); + ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm); + if (ret < 0) + return -EINVAL; + return 0; +} + +static int malidp_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + struct drm_plane *plane; + const struct drm_plane_state *pstate; + u32 rot_mem_free, rot_mem_usable; + int rotated_planes = 0; + int ret; + + /* + * check if there is enough rotation memory available for planes + * that need 90° and 270° rotion or planes that are compressed. + * Each plane has set its required memory size in the ->plane_check() + * callback, here we only make sure that the sums are less that the + * total usable memory. + * + * The rotation memory allocation algorithm (for each plane): + * a. If no more rotated or compressed planes exist, all remaining + * rotate memory in the bank is available for use by the plane. + * b. If other rotated or compressed planes exist, and plane's + * layer ID is DE_VIDEO1, it can use all the memory from first bank + * if secondary rotation memory bank is available, otherwise it can + * use up to half the bank's memory. + * c. If other rotated or compressed planes exist, and plane's layer ID + * is not DE_VIDEO1, it can use half of the available memory. + * + * Note: this algorithm assumes that the order in which the planes are + * checked always has DE_VIDEO1 plane first in the list if it is + * rotated. Because that is how we create the planes in the first + * place, under current DRM version things work, but if ever the order + * in which drm_atomic_crtc_state_for_each_plane() iterates over planes + * changes, we need to pre-sort the planes before validation. + */ + + /* first count the number of rotated planes */ + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + struct drm_framebuffer *fb = pstate->fb; + + if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier) + rotated_planes++; + } + + rot_mem_free = hwdev->rotation_memory[0]; + /* + * if we have more than 1 plane using rotation memory, use the second + * block of rotation memory as well + */ + if (rotated_planes > 1) + rot_mem_free += hwdev->rotation_memory[1]; + + /* now validate the rotation memory requirements */ + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + struct malidp_plane *mp = to_malidp_plane(plane); + struct malidp_plane_state *ms = to_malidp_plane_state(pstate); + struct drm_framebuffer *fb = pstate->fb; + + if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier) { + /* process current plane */ + rotated_planes--; + + if (!rotated_planes) { + /* no more rotated planes, we can use what's left */ + rot_mem_usable = rot_mem_free; + } else { + if ((mp->layer->id != DE_VIDEO1) || + (hwdev->rotation_memory[1] == 0)) + rot_mem_usable = rot_mem_free / 2; + else + rot_mem_usable = hwdev->rotation_memory[0]; + } + + rot_mem_free -= rot_mem_usable; + + if (ms->rotmem_size > rot_mem_usable) + return -EINVAL; + } + } + + /* If only the writeback routing has changed, we don't need a modeset */ + if (state->connectors_changed) { + u32 old_mask = crtc->state->connector_mask; + u32 new_mask = state->connector_mask; + + if ((old_mask ^ new_mask) == + (1 << drm_connector_index(&malidp->mw_connector.base))) + state->connectors_changed = false; + } + + ret = malidp_crtc_atomic_check_gamma(crtc, state); + ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state); + ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state); + + return ret; +} + +static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = { + .mode_valid = malidp_crtc_mode_valid, + .atomic_check = malidp_crtc_atomic_check, + .atomic_enable = malidp_crtc_atomic_enable, + .atomic_disable = malidp_crtc_atomic_disable, +}; + +static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct malidp_crtc_state *state, *old_state; + + if (WARN_ON(!crtc->state)) + return NULL; + + old_state = to_malidp_crtc_state(crtc->state); + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); + memcpy(state->gamma_coeffs, old_state->gamma_coeffs, + sizeof(state->gamma_coeffs)); + memcpy(state->coloradj_coeffs, old_state->coloradj_coeffs, + sizeof(state->coloradj_coeffs)); + memcpy(&state->scaler_config, &old_state->scaler_config, + sizeof(state->scaler_config)); + state->scaled_planes_mask = 0; + + return &state->base; +} + +static void malidp_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct malidp_crtc_state *mali_state = NULL; + + if (state) { + mali_state = to_malidp_crtc_state(state); + __drm_atomic_helper_crtc_destroy_state(state); + } + + kfree(mali_state); +} + +static void malidp_crtc_reset(struct drm_crtc *crtc) +{ + struct malidp_crtc_state *state = + kzalloc(sizeof(*state), GFP_KERNEL); + + if (crtc->state) + malidp_crtc_destroy_state(crtc, crtc->state); + + if (state) + __drm_atomic_helper_crtc_reset(crtc, &state->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); +} + +static int malidp_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + + malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->hw->map.de_irq_map.vsync_irq); + return 0; +} + +static void malidp_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + + malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->hw->map.de_irq_map.vsync_irq); +} + +static const struct drm_crtc_funcs malidp_crtc_funcs = { + .gamma_set = drm_atomic_helper_legacy_gamma_set, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = malidp_crtc_reset, + .atomic_duplicate_state = malidp_crtc_duplicate_state, + .atomic_destroy_state = malidp_crtc_destroy_state, + .enable_vblank = malidp_crtc_enable_vblank, + .disable_vblank = malidp_crtc_disable_vblank, +}; + +int malidp_crtc_init(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + struct drm_plane *primary = NULL, *plane; + int ret; + + ret = malidp_de_planes_init(drm); + if (ret < 0) { + DRM_ERROR("Failed to initialise planes\n"); + return ret; + } + + drm_for_each_plane(plane, drm) { + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + primary = plane; + break; + } + } + + if (!primary) { + DRM_ERROR("no primary plane found\n"); + return -EINVAL; + } + + ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL, + &malidp_crtc_funcs, NULL); + if (ret) + return ret; + + drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs); + drm_mode_crtc_set_gamma_size(&malidp->crtc, MALIDP_GAMMA_LUT_SIZE); + /* No inverse-gamma: it is per-plane. */ + drm_crtc_enable_color_mgmt(&malidp->crtc, 0, true, MALIDP_GAMMA_LUT_SIZE); + + malidp_se_set_enh_coeffs(malidp->dev); + + return 0; +} diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c new file mode 100644 index 000000000..69fee05c2 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -0,0 +1,1017 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * ARM Mali DP500/DP550/DP650 KMS/DRM driver + */ + +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/of_reserved_mem.h> +#include <linux/pm_runtime.h> +#include <linux/debugfs.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_modeset_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "malidp_drv.h" +#include "malidp_mw.h" +#include "malidp_regs.h" +#include "malidp_hw.h" + +#define MALIDP_CONF_VALID_TIMEOUT 250 +#define AFBC_HEADER_SIZE 16 +#define AFBC_SUPERBLK_ALIGNMENT 128 + +static void malidp_write_gamma_table(struct malidp_hw_device *hwdev, + u32 data[MALIDP_COEFFTAB_NUM_COEFFS]) +{ + int i; + /* Update all channels with a single gamma curve. */ + const u32 gamma_write_mask = GENMASK(18, 16); + /* + * Always write an entire table, so the address field in + * DE_COEFFTAB_ADDR is 0 and we can use the gamma_write_mask bitmask + * directly. + */ + malidp_hw_write(hwdev, gamma_write_mask, + hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR); + for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) + malidp_hw_write(hwdev, data[i], + hwdev->hw->map.coeffs_base + + MALIDP_COEF_TABLE_DATA); +} + +static void malidp_atomic_commit_update_gamma(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + + if (!crtc->state->color_mgmt_changed) + return; + + if (!crtc->state->gamma_lut) { + malidp_hw_clearbits(hwdev, + MALIDP_DISP_FUNC_GAMMA, + MALIDP_DE_DISPLAY_FUNC); + } else { + struct malidp_crtc_state *mc = + to_malidp_crtc_state(crtc->state); + + if (!old_state->gamma_lut || (crtc->state->gamma_lut->base.id != + old_state->gamma_lut->base.id)) + malidp_write_gamma_table(hwdev, mc->gamma_coeffs); + + malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_GAMMA, + MALIDP_DE_DISPLAY_FUNC); + } +} + +static +void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + int i; + + if (!crtc->state->color_mgmt_changed) + return; + + if (!crtc->state->ctm) { + malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_CADJ, + MALIDP_DE_DISPLAY_FUNC); + } else { + struct malidp_crtc_state *mc = + to_malidp_crtc_state(crtc->state); + + if (!old_state->ctm || (crtc->state->ctm->base.id != + old_state->ctm->base.id)) + for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i) + malidp_hw_write(hwdev, + mc->coloradj_coeffs[i], + hwdev->hw->map.coeffs_base + + MALIDP_COLOR_ADJ_COEF + 4 * i); + + malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ, + MALIDP_DE_DISPLAY_FUNC); + } +} + +static void malidp_atomic_commit_se_config(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct malidp_crtc_state *cs = to_malidp_crtc_state(crtc->state); + struct malidp_crtc_state *old_cs = to_malidp_crtc_state(old_state); + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + struct malidp_se_config *s = &cs->scaler_config; + struct malidp_se_config *old_s = &old_cs->scaler_config; + u32 se_control = hwdev->hw->map.se_base + + ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? + 0x10 : 0xC); + u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL; + u32 scr = se_control + MALIDP_SE_SCALING_CONTROL; + u32 val; + + /* Set SE_CONTROL */ + if (!s->scale_enable) { + val = malidp_hw_read(hwdev, se_control); + val &= ~MALIDP_SE_SCALING_EN; + malidp_hw_write(hwdev, val, se_control); + return; + } + + hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s); + val = malidp_hw_read(hwdev, se_control); + val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN; + + val &= ~MALIDP_SE_ENH(MALIDP_SE_ENH_MASK); + val |= s->enhancer_enable ? MALIDP_SE_ENH(3) : 0; + + val |= MALIDP_SE_RGBO_IF_EN; + malidp_hw_write(hwdev, val, se_control); + + /* Set IN_SIZE & OUT_SIZE. */ + val = MALIDP_SE_SET_V_SIZE(s->input_h) | + MALIDP_SE_SET_H_SIZE(s->input_w); + malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_IN_SIZE); + val = MALIDP_SE_SET_V_SIZE(s->output_h) | + MALIDP_SE_SET_H_SIZE(s->output_w); + malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_OUT_SIZE); + + /* Set phase regs. */ + malidp_hw_write(hwdev, s->h_init_phase, scr + MALIDP_SE_H_INIT_PH); + malidp_hw_write(hwdev, s->h_delta_phase, scr + MALIDP_SE_H_DELTA_PH); + malidp_hw_write(hwdev, s->v_init_phase, scr + MALIDP_SE_V_INIT_PH); + malidp_hw_write(hwdev, s->v_delta_phase, scr + MALIDP_SE_V_DELTA_PH); +} + +/* + * set the "config valid" bit and wait until the hardware acts on it + */ +static int malidp_set_and_wait_config_valid(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + int ret; + + hwdev->hw->set_config_valid(hwdev, 1); + /* don't wait for config_valid flag if we are in config mode */ + if (hwdev->hw->in_config_mode(hwdev)) { + atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE); + return 0; + } + + ret = wait_event_interruptible_timeout(malidp->wq, + atomic_read(&malidp->config_valid) == MALIDP_CONFIG_VALID_DONE, + msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT)); + + return (ret > 0) ? 0 : -ETIMEDOUT; +} + +static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) +{ + struct drm_device *drm = state->dev; + struct malidp_drm *malidp = drm->dev_private; + int loop = 5; + + malidp->event = malidp->crtc.state->event; + malidp->crtc.state->event = NULL; + + if (malidp->crtc.state->active) { + /* + * if we have an event to deliver to userspace, make sure + * the vblank is enabled as we are sending it from the IRQ + * handler. + */ + if (malidp->event) + drm_crtc_vblank_get(&malidp->crtc); + + /* only set config_valid if the CRTC is enabled */ + if (malidp_set_and_wait_config_valid(drm) < 0) { + /* + * make a loop around the second CVAL setting and + * try 5 times before giving up. + */ + while (loop--) { + if (!malidp_set_and_wait_config_valid(drm)) + break; + } + DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); + } + + } else if (malidp->event) { + /* CRTC inactive means vblank IRQ is disabled, send event directly */ + spin_lock_irq(&drm->event_lock); + drm_crtc_send_vblank_event(&malidp->crtc, malidp->event); + malidp->event = NULL; + spin_unlock_irq(&drm->event_lock); + } + drm_atomic_helper_commit_hw_done(state); +} + +static void malidp_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *drm = state->dev; + struct malidp_drm *malidp = drm->dev_private; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + int i; + + pm_runtime_get_sync(drm->dev); + + /* + * set config_valid to a special value to let IRQ handlers + * know that we are updating registers + */ + atomic_set(&malidp->config_valid, MALIDP_CONFIG_START); + malidp->dev->hw->set_config_valid(malidp->dev, 0); + + drm_atomic_helper_commit_modeset_disables(drm, state); + + for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { + malidp_atomic_commit_update_gamma(crtc, old_crtc_state); + malidp_atomic_commit_update_coloradj(crtc, old_crtc_state); + malidp_atomic_commit_se_config(crtc, old_crtc_state); + } + + drm_atomic_helper_commit_planes(drm, state, DRM_PLANE_COMMIT_ACTIVE_ONLY); + + malidp_mw_atomic_commit(drm, state); + + drm_atomic_helper_commit_modeset_enables(drm, state); + + malidp_atomic_commit_hw_done(state); + + pm_runtime_put(drm->dev); + + drm_atomic_helper_cleanup_planes(drm, state); +} + +static const struct drm_mode_config_helper_funcs malidp_mode_config_helpers = { + .atomic_commit_tail = malidp_atomic_commit_tail, +}; + +static bool +malidp_verify_afbc_framebuffer_caps(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + if (malidp_format_mod_supported(dev, mode_cmd->pixel_format, + mode_cmd->modifier[0]) == false) + return false; + + if (mode_cmd->offsets[0] != 0) { + DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n"); + return false; + } + + switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { + case AFBC_SIZE_16X16: + if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) { + DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n"); + return false; + } + break; + default: + DRM_DEBUG_KMS("Unsupported AFBC block size\n"); + return false; + } + + return true; +} + +static bool +malidp_verify_afbc_framebuffer_size(struct drm_device *dev, + struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + int n_superblocks = 0; + const struct drm_format_info *info; + struct drm_gem_object *objs = NULL; + u32 afbc_superblock_size = 0, afbc_superblock_height = 0; + u32 afbc_superblock_width = 0, afbc_size = 0; + int bpp = 0; + + switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { + case AFBC_SIZE_16X16: + afbc_superblock_height = 16; + afbc_superblock_width = 16; + break; + default: + DRM_DEBUG_KMS("AFBC superblock size is not supported\n"); + return false; + } + + info = drm_get_format_info(dev, mode_cmd); + + n_superblocks = (mode_cmd->width / afbc_superblock_width) * + (mode_cmd->height / afbc_superblock_height); + + bpp = malidp_format_get_bpp(info->format); + + afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height) + / BITS_PER_BYTE; + + afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT); + afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT); + + if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) { + DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) " + "should be same as width (=%u) * bpp (=%u)\n", + (mode_cmd->pitches[0] * BITS_PER_BYTE), + mode_cmd->width, bpp); + return false; + } + + objs = drm_gem_object_lookup(file, mode_cmd->handles[0]); + if (!objs) { + DRM_DEBUG_KMS("Failed to lookup GEM object\n"); + return false; + } + + if (objs->size < afbc_size) { + DRM_DEBUG_KMS("buffer size (%zu) too small for AFBC buffer size = %u\n", + objs->size, afbc_size); + drm_gem_object_put(objs); + return false; + } + + drm_gem_object_put(objs); + + return true; +} + +static bool +malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd)) + return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd); + + return false; +} + +static struct drm_framebuffer * +malidp_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + if (mode_cmd->modifier[0]) { + if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd)) + return ERR_PTR(-EINVAL); + } + + return drm_gem_fb_create(dev, file, mode_cmd); +} + +static const struct drm_mode_config_funcs malidp_mode_config_funcs = { + .fb_create = malidp_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static int malidp_init(struct drm_device *drm) +{ + int ret; + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + drm_mode_config_init(drm); + + drm->mode_config.min_width = hwdev->min_line_size; + drm->mode_config.min_height = hwdev->min_line_size; + drm->mode_config.max_width = hwdev->max_line_size; + drm->mode_config.max_height = hwdev->max_line_size; + drm->mode_config.funcs = &malidp_mode_config_funcs; + drm->mode_config.helper_private = &malidp_mode_config_helpers; + drm->mode_config.allow_fb_modifiers = true; + + ret = malidp_crtc_init(drm); + if (ret) + goto crtc_fail; + + ret = malidp_mw_connector_init(drm); + if (ret) + goto crtc_fail; + + return 0; + +crtc_fail: + drm_mode_config_cleanup(drm); + return ret; +} + +static void malidp_fini(struct drm_device *drm) +{ + drm_mode_config_cleanup(drm); +} + +static int malidp_irq_init(struct platform_device *pdev) +{ + int irq_de, irq_se, ret = 0; + struct drm_device *drm = dev_get_drvdata(&pdev->dev); + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + /* fetch the interrupts from DT */ + irq_de = platform_get_irq_byname(pdev, "DE"); + if (irq_de < 0) { + DRM_ERROR("no 'DE' IRQ specified!\n"); + return irq_de; + } + irq_se = platform_get_irq_byname(pdev, "SE"); + if (irq_se < 0) { + DRM_ERROR("no 'SE' IRQ specified!\n"); + return irq_se; + } + + ret = malidp_de_irq_init(drm, irq_de); + if (ret) + return ret; + + ret = malidp_se_irq_init(drm, irq_se); + if (ret) { + malidp_de_irq_fini(hwdev); + return ret; + } + + return 0; +} + +DEFINE_DRM_GEM_CMA_FOPS(fops); + +static int malidp_dumb_create(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args) +{ + struct malidp_drm *malidp = drm->dev_private; + /* allocate for the worst case scenario, i.e. rotated buffers */ + u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 1); + + args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), alignment); + + return drm_gem_cma_dumb_create_internal(file_priv, drm, args); +} + +#ifdef CONFIG_DEBUG_FS + +static void malidp_error_stats_init(struct malidp_error_stats *error_stats) +{ + error_stats->num_errors = 0; + error_stats->last_error_status = 0; + error_stats->last_error_vblank = -1; +} + +void malidp_error(struct malidp_drm *malidp, + struct malidp_error_stats *error_stats, u32 status, + u64 vblank) +{ + unsigned long irqflags; + + spin_lock_irqsave(&malidp->errors_lock, irqflags); + error_stats->last_error_status = status; + error_stats->last_error_vblank = vblank; + error_stats->num_errors++; + spin_unlock_irqrestore(&malidp->errors_lock, irqflags); +} + +static void malidp_error_stats_dump(const char *prefix, + struct malidp_error_stats error_stats, + struct seq_file *m) +{ + seq_printf(m, "[%s] num_errors : %d\n", prefix, + error_stats.num_errors); + seq_printf(m, "[%s] last_error_status : 0x%08x\n", prefix, + error_stats.last_error_status); + seq_printf(m, "[%s] last_error_vblank : %lld\n", prefix, + error_stats.last_error_vblank); +} + +static int malidp_show_stats(struct seq_file *m, void *arg) +{ + struct drm_device *drm = m->private; + struct malidp_drm *malidp = drm->dev_private; + unsigned long irqflags; + struct malidp_error_stats de_errors, se_errors; + + spin_lock_irqsave(&malidp->errors_lock, irqflags); + de_errors = malidp->de_errors; + se_errors = malidp->se_errors; + spin_unlock_irqrestore(&malidp->errors_lock, irqflags); + malidp_error_stats_dump("DE", de_errors, m); + malidp_error_stats_dump("SE", se_errors, m); + return 0; +} + +static int malidp_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, malidp_show_stats, inode->i_private); +} + +static ssize_t malidp_debugfs_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_device *drm = m->private; + struct malidp_drm *malidp = drm->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&malidp->errors_lock, irqflags); + malidp_error_stats_init(&malidp->de_errors); + malidp_error_stats_init(&malidp->se_errors); + spin_unlock_irqrestore(&malidp->errors_lock, irqflags); + return len; +} + +static const struct file_operations malidp_debugfs_fops = { + .owner = THIS_MODULE, + .open = malidp_debugfs_open, + .read = seq_read, + .write = malidp_debugfs_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static void malidp_debugfs_init(struct drm_minor *minor) +{ + struct malidp_drm *malidp = minor->dev->dev_private; + + malidp_error_stats_init(&malidp->de_errors); + malidp_error_stats_init(&malidp->se_errors); + spin_lock_init(&malidp->errors_lock); + debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root, + minor->dev, &malidp_debugfs_fops); +} + +#endif //CONFIG_DEBUG_FS + +static struct drm_driver malidp_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create), +#ifdef CONFIG_DEBUG_FS + .debugfs_init = malidp_debugfs_init, +#endif + .fops = &fops, + .name = "mali-dp", + .desc = "ARM Mali Display Processor driver", + .date = "20160106", + .major = 1, + .minor = 0, +}; + +static const struct of_device_id malidp_drm_of_match[] = { + { + .compatible = "arm,mali-dp500", + .data = &malidp_device[MALIDP_500] + }, + { + .compatible = "arm,mali-dp550", + .data = &malidp_device[MALIDP_550] + }, + { + .compatible = "arm,mali-dp650", + .data = &malidp_device[MALIDP_650] + }, + {}, +}; +MODULE_DEVICE_TABLE(of, malidp_drm_of_match); + +static bool malidp_is_compatible_hw_id(struct malidp_hw_device *hwdev, + const struct of_device_id *dev_id) +{ + u32 core_id; + const char *compatstr_dp500 = "arm,mali-dp500"; + bool is_dp500; + bool dt_is_dp500; + + /* + * The DP500 CORE_ID register is in a different location, so check it + * first. If the product id field matches, then this is DP500, otherwise + * check the DP550/650 CORE_ID register. + */ + core_id = malidp_hw_read(hwdev, MALIDP500_DC_BASE + MALIDP_DE_CORE_ID); + /* Offset 0x18 will never read 0x500 on products other than DP500. */ + is_dp500 = (MALIDP_PRODUCT_ID(core_id) == 0x500); + dt_is_dp500 = strnstr(dev_id->compatible, compatstr_dp500, + sizeof(dev_id->compatible)) != NULL; + if (is_dp500 != dt_is_dp500) { + DRM_ERROR("Device-tree expects %s, but hardware %s DP500.\n", + dev_id->compatible, is_dp500 ? "is" : "is not"); + return false; + } else if (!dt_is_dp500) { + u16 product_id; + char buf[32]; + + core_id = malidp_hw_read(hwdev, + MALIDP550_DC_BASE + MALIDP_DE_CORE_ID); + product_id = MALIDP_PRODUCT_ID(core_id); + snprintf(buf, sizeof(buf), "arm,mali-dp%X", product_id); + if (!strnstr(dev_id->compatible, buf, + sizeof(dev_id->compatible))) { + DRM_ERROR("Device-tree expects %s, but hardware is DP%03X.\n", + dev_id->compatible, product_id); + return false; + } + } + return true; +} + +static bool malidp_has_sufficient_address_space(const struct resource *res, + const struct of_device_id *dev_id) +{ + resource_size_t res_size = resource_size(res); + const char *compatstr_dp500 = "arm,mali-dp500"; + + if (!strnstr(dev_id->compatible, compatstr_dp500, + sizeof(dev_id->compatible))) + return res_size >= MALIDP550_ADDR_SPACE_SIZE; + else if (res_size < MALIDP500_ADDR_SPACE_SIZE) + return false; + return true; +} + +static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct malidp_drm *malidp = drm->dev_private; + + return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id); +} + +static DEVICE_ATTR_RO(core_id); + +static struct attribute *mali_dp_attrs[] = { + &dev_attr_core_id.attr, + NULL, +}; +ATTRIBUTE_GROUPS(mali_dp); + +#define MAX_OUTPUT_CHANNELS 3 + +static int malidp_runtime_pm_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + /* we can only suspend if the hardware is in config mode */ + WARN_ON(!hwdev->hw->in_config_mode(hwdev)); + + malidp_se_irq_fini(hwdev); + malidp_de_irq_fini(hwdev); + hwdev->pm_suspended = true; + clk_disable_unprepare(hwdev->mclk); + clk_disable_unprepare(hwdev->aclk); + clk_disable_unprepare(hwdev->pclk); + + return 0; +} + +static int malidp_runtime_pm_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + clk_prepare_enable(hwdev->pclk); + clk_prepare_enable(hwdev->aclk); + clk_prepare_enable(hwdev->mclk); + hwdev->pm_suspended = false; + malidp_de_irq_hw_init(hwdev); + malidp_se_irq_hw_init(hwdev); + + return 0; +} + +static int malidp_bind(struct device *dev) +{ + struct resource *res; + struct drm_device *drm; + struct malidp_drm *malidp; + struct malidp_hw_device *hwdev; + struct platform_device *pdev = to_platform_device(dev); + struct of_device_id const *dev_id; + struct drm_encoder *encoder; + /* number of lines for the R, G and B output */ + u8 output_width[MAX_OUTPUT_CHANNELS]; + int ret = 0, i; + u32 version, out_depth = 0; + + malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL); + if (!malidp) + return -ENOMEM; + + hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return -ENOMEM; + + hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev); + malidp->dev = hwdev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hwdev->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(hwdev->regs)) + return PTR_ERR(hwdev->regs); + + hwdev->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(hwdev->pclk)) + return PTR_ERR(hwdev->pclk); + + hwdev->aclk = devm_clk_get(dev, "aclk"); + if (IS_ERR(hwdev->aclk)) + return PTR_ERR(hwdev->aclk); + + hwdev->mclk = devm_clk_get(dev, "mclk"); + if (IS_ERR(hwdev->mclk)) + return PTR_ERR(hwdev->mclk); + + hwdev->pxlclk = devm_clk_get(dev, "pxlclk"); + if (IS_ERR(hwdev->pxlclk)) + return PTR_ERR(hwdev->pxlclk); + + /* Get the optional framebuffer memory resource */ + ret = of_reserved_mem_device_init(dev); + if (ret && ret != -ENODEV) + return ret; + + drm = drm_dev_alloc(&malidp_driver, dev); + if (IS_ERR(drm)) { + ret = PTR_ERR(drm); + goto alloc_fail; + } + + drm->dev_private = malidp; + dev_set_drvdata(dev, drm); + + /* Enable power management */ + pm_runtime_enable(dev); + + /* Resume device to enable the clocks */ + if (pm_runtime_enabled(dev)) + pm_runtime_get_sync(dev); + else + malidp_runtime_pm_resume(dev); + + dev_id = of_match_device(malidp_drm_of_match, dev); + if (!dev_id) { + ret = -EINVAL; + goto query_hw_fail; + } + + if (!malidp_has_sufficient_address_space(res, dev_id)) { + DRM_ERROR("Insufficient address space in device-tree.\n"); + ret = -EINVAL; + goto query_hw_fail; + } + + if (!malidp_is_compatible_hw_id(hwdev, dev_id)) { + ret = -EINVAL; + goto query_hw_fail; + } + + ret = hwdev->hw->query_hw(hwdev); + if (ret) { + DRM_ERROR("Invalid HW configuration\n"); + goto query_hw_fail; + } + + version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID); + DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, + (version >> 12) & 0xf, (version >> 8) & 0xf); + + malidp->core_id = version; + + ret = of_property_read_u32(dev->of_node, + "arm,malidp-arqos-value", + &hwdev->arqos_value); + if (ret) + hwdev->arqos_value = 0x0; + + /* set the number of lines used for output of RGB data */ + ret = of_property_read_u8_array(dev->of_node, + "arm,malidp-output-port-lines", + output_width, MAX_OUTPUT_CHANNELS); + if (ret) + goto query_hw_fail; + + for (i = 0; i < MAX_OUTPUT_CHANNELS; i++) + out_depth = (out_depth << 8) | (output_width[i] & 0xf); + malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base); + hwdev->output_color_depth = out_depth; + + atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_INIT); + init_waitqueue_head(&malidp->wq); + + ret = malidp_init(drm); + if (ret < 0) + goto query_hw_fail; + + /* Set the CRTC's port so that the encoder component can find it */ + malidp->crtc.port = of_graph_get_port_by_id(dev->of_node, 0); + + ret = component_bind_all(dev, drm); + if (ret) { + DRM_ERROR("Failed to bind all components\n"); + goto bind_fail; + } + + /* We expect to have a maximum of two encoders one for the actual + * display and a virtual one for the writeback connector + */ + WARN_ON(drm->mode_config.num_encoder > 2); + list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) { + encoder->possible_clones = + (1 << drm->mode_config.num_encoder) - 1; + } + + ret = malidp_irq_init(pdev); + if (ret < 0) + goto irq_init_fail; + + drm->irq_enabled = true; + + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (ret < 0) { + DRM_ERROR("failed to initialise vblank\n"); + goto vblank_fail; + } + pm_runtime_put(dev); + + drm_mode_config_reset(drm); + + drm_kms_helper_poll_init(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + goto register_fail; + + drm_fbdev_generic_setup(drm, 32); + + return 0; + +register_fail: + drm_kms_helper_poll_fini(drm); + pm_runtime_get_sync(dev); +vblank_fail: + malidp_se_irq_fini(hwdev); + malidp_de_irq_fini(hwdev); + drm->irq_enabled = false; +irq_init_fail: + drm_atomic_helper_shutdown(drm); + component_unbind_all(dev, drm); +bind_fail: + of_node_put(malidp->crtc.port); + malidp->crtc.port = NULL; + malidp_fini(drm); +query_hw_fail: + pm_runtime_put(dev); + if (pm_runtime_enabled(dev)) + pm_runtime_disable(dev); + else + malidp_runtime_pm_suspend(dev); + drm->dev_private = NULL; + dev_set_drvdata(dev, NULL); + drm_dev_put(drm); +alloc_fail: + of_reserved_mem_device_release(dev); + + return ret; +} + +static void malidp_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + drm_dev_unregister(drm); + drm_kms_helper_poll_fini(drm); + pm_runtime_get_sync(dev); + drm_atomic_helper_shutdown(drm); + malidp_se_irq_fini(hwdev); + malidp_de_irq_fini(hwdev); + drm->irq_enabled = false; + component_unbind_all(dev, drm); + of_node_put(malidp->crtc.port); + malidp->crtc.port = NULL; + malidp_fini(drm); + pm_runtime_put(dev); + if (pm_runtime_enabled(dev)) + pm_runtime_disable(dev); + else + malidp_runtime_pm_suspend(dev); + drm->dev_private = NULL; + dev_set_drvdata(dev, NULL); + drm_dev_put(drm); + of_reserved_mem_device_release(dev); +} + +static const struct component_master_ops malidp_master_ops = { + .bind = malidp_bind, + .unbind = malidp_unbind, +}; + +static int malidp_compare_dev(struct device *dev, void *data) +{ + struct device_node *np = data; + + return dev->of_node == np; +} + +static int malidp_platform_probe(struct platform_device *pdev) +{ + struct device_node *port; + struct component_match *match = NULL; + + if (!pdev->dev.of_node) + return -ENODEV; + + /* there is only one output port inside each device, find it */ + port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0); + if (!port) + return -ENODEV; + + drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev, + port); + of_node_put(port); + return component_master_add_with_match(&pdev->dev, &malidp_master_ops, + match); +} + +static int malidp_platform_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &malidp_master_ops); + return 0; +} + +static int __maybe_unused malidp_pm_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + return drm_mode_config_helper_suspend(drm); +} + +static int __maybe_unused malidp_pm_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + drm_mode_config_helper_resume(drm); + + return 0; +} + +static int __maybe_unused malidp_pm_suspend_late(struct device *dev) +{ + if (!pm_runtime_status_suspended(dev)) { + malidp_runtime_pm_suspend(dev); + pm_runtime_set_suspended(dev); + } + return 0; +} + +static int __maybe_unused malidp_pm_resume_early(struct device *dev) +{ + malidp_runtime_pm_resume(dev); + pm_runtime_set_active(dev); + return 0; +} + +static const struct dev_pm_ops malidp_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \ + SET_LATE_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend_late, malidp_pm_resume_early) \ + SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL) +}; + +static struct platform_driver malidp_platform_driver = { + .probe = malidp_platform_probe, + .remove = malidp_platform_remove, + .driver = { + .name = "mali-dp", + .pm = &malidp_pm_ops, + .of_match_table = malidp_drm_of_match, + .dev_groups = mali_dp_groups, + }, +}; + +module_platform_driver(malidp_platform_driver); + +MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>"); +MODULE_DESCRIPTION("ARM Mali DP DRM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h new file mode 100644 index 000000000..cdfddfabf --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_drv.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * ARM Mali DP500/DP550/DP650 KMS/DRM driver structures + */ + +#ifndef __MALIDP_DRV_H__ +#define __MALIDP_DRV_H__ + +#include <linux/mutex.h> +#include <linux/wait.h> +#include <linux/spinlock.h> + +#include <drm/drm_writeback.h> +#include <drm/drm_encoder.h> + +#include "malidp_hw.h" + +#define MALIDP_CONFIG_VALID_INIT 0 +#define MALIDP_CONFIG_VALID_DONE 1 +#define MALIDP_CONFIG_START 0xd0 + +struct malidp_error_stats { + s32 num_errors; + u32 last_error_status; + s64 last_error_vblank; +}; + +struct malidp_drm { + struct malidp_hw_device *dev; + struct drm_crtc crtc; + struct drm_writeback_connector mw_connector; + wait_queue_head_t wq; + struct drm_pending_vblank_event *event; + atomic_t config_valid; + u32 core_id; +#ifdef CONFIG_DEBUG_FS + struct malidp_error_stats de_errors; + struct malidp_error_stats se_errors; + /* Protects errors stats */ + spinlock_t errors_lock; +#endif +}; + +#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc) + +struct malidp_plane { + struct drm_plane base; + struct malidp_hw_device *hwdev; + const struct malidp_layer *layer; +}; + +enum mmu_prefetch_mode { + MALIDP_PREFETCH_MODE_NONE, + MALIDP_PREFETCH_MODE_PARTIAL, + MALIDP_PREFETCH_MODE_FULL, +}; + +struct malidp_plane_state { + struct drm_plane_state base; + + /* size of the required rotation memory if plane is rotated */ + u32 rotmem_size; + /* internal format ID */ + u8 format; + u8 n_planes; + enum mmu_prefetch_mode mmu_prefetch_mode; + u32 mmu_prefetch_pgsize; +}; + +#define to_malidp_plane(x) container_of(x, struct malidp_plane, base) +#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base) + +struct malidp_crtc_state { + struct drm_crtc_state base; + u32 gamma_coeffs[MALIDP_COEFFTAB_NUM_COEFFS]; + u32 coloradj_coeffs[MALIDP_COLORADJ_NUM_COEFFS]; + struct malidp_se_config scaler_config; + /* Bitfield of all the planes that have requested a scaled output. */ + u8 scaled_planes_mask; +}; + +#define to_malidp_crtc_state(x) container_of(x, struct malidp_crtc_state, base) + +int malidp_de_planes_init(struct drm_device *drm); +int malidp_crtc_init(struct drm_device *drm); + +bool malidp_hw_format_is_linear_only(u32 format); +bool malidp_hw_format_is_afbc_only(u32 format); + +bool malidp_format_mod_supported(struct drm_device *drm, + u32 format, u64 modifier); + +#ifdef CONFIG_DEBUG_FS +void malidp_error(struct malidp_drm *malidp, + struct malidp_error_stats *error_stats, u32 status, + u64 vblank); +#endif + +/* often used combination of rotational bits */ +#define MALIDP_ROTATED_MASK (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270) + +#endif /* __MALIDP_DRV_H__ */ diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c new file mode 100644 index 000000000..e9de542f9 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -0,0 +1,1392 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * ARM Mali DP500/DP550/DP650 hardware manipulation routines. This is where + * the difference between various versions of the hardware is being dealt with + * in an attempt to provide to the rest of the driver code a unified view + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/types.h> +#include <linux/io.h> + +#include <video/videomode.h> +#include <video/display_timing.h> + +#include <drm/drm_fourcc.h> +#include <drm/drm_vblank.h> +#include <drm/drm_print.h> + +#include "malidp_drv.h" +#include "malidp_hw.h" +#include "malidp_mw.h" + +enum { + MW_NOT_ENABLED = 0, /* SE writeback not enabled */ + MW_ONESHOT, /* SE in one-shot mode for writeback */ + MW_START, /* SE started writeback */ + MW_RESTART, /* SE will start another writeback after this one */ + MW_STOP, /* SE needs to stop after this writeback */ +}; + +static const struct malidp_format_id malidp500_de_formats[] = { + /* fourcc, layers supporting the format, internal id */ + { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 0 }, + { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 1 }, + { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 2 }, + { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 3 }, + { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 4 }, + { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 5 }, + { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 6 }, + { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 7 }, + { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 8 }, + { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 9 }, + { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 10 }, + { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 }, + { DRM_FORMAT_UYVY, DE_VIDEO1, 12 }, + { DRM_FORMAT_YUYV, DE_VIDEO1, 13 }, + { DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 }, + { DRM_FORMAT_YUV420, DE_VIDEO1, 15 }, + { DRM_FORMAT_XYUV8888, DE_VIDEO1, 16 }, + /* These are supported with AFBC only */ + { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1, 14 }, + { DRM_FORMAT_VUY888, DE_VIDEO1, 16 }, + { DRM_FORMAT_VUY101010, DE_VIDEO1, 17 }, + { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1, 18 } +}; + +#define MALIDP_ID(__group, __format) \ + ((((__group) & 0x7) << 3) | ((__format) & 0x7)) + +#define AFBC_YUV_422_FORMAT_ID MALIDP_ID(5, 1) + +#define MALIDP_COMMON_FORMATS \ + /* fourcc, layers supporting the format, internal id */ \ + { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \ + { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 1) }, \ + { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 2) }, \ + { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 3) }, \ + { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \ + { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \ + { DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \ + { DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \ + { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 0) }, \ + { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 1) }, \ + { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 2) }, \ + { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 3) }, \ + { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 0) }, \ + { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 1) }, \ + { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \ + { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \ + { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \ + { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \ + /* This is only supported with linear modifier */ \ + { DRM_FORMAT_XYUV8888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) },\ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_VUY888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) }, \ + { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \ + /* This is only supported with linear modifier */ \ + { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \ + { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \ + { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \ + /* This is only supported with linear modifier */ \ + { DRM_FORMAT_XVYU2101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_VUY101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \ + { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}, \ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}, \ + { DRM_FORMAT_P010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)} + +static const struct malidp_format_id malidp550_de_formats[] = { + MALIDP_COMMON_FORMATS, +}; + +static const struct malidp_format_id malidp650_de_formats[] = { + MALIDP_COMMON_FORMATS, + { DRM_FORMAT_X0L0, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 4)}, +}; + +static const struct malidp_layer malidp500_layers[] = { + /* id, base address, fb pointer address base, stride offset, + * yuv2rgb matrix offset, mmu control register offset, rotation_features + */ + { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, + MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY, + MALIDP500_DE_LV_AD_CTRL }, + { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, + MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY, + MALIDP500_DE_LG1_AD_CTRL }, + { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, + MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY, + MALIDP500_DE_LG2_AD_CTRL }, +}; + +static const struct malidp_layer malidp550_layers[] = { + /* id, base address, fb pointer address base, stride offset, + * yuv2rgb matrix offset, mmu control register offset, rotation_features + */ + { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, + MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY, + MALIDP550_DE_LV1_AD_CTRL }, + { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, + MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY, + MALIDP550_DE_LG_AD_CTRL }, + { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, + MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY, + MALIDP550_DE_LV2_AD_CTRL }, + { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, + MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE, 0 }, +}; + +static const struct malidp_layer malidp650_layers[] = { + /* id, base address, fb pointer address base, stride offset, + * yuv2rgb matrix offset, mmu control register offset, + * rotation_features + */ + { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, + MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, + MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY, + MALIDP550_DE_LV1_AD_CTRL }, + { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, + MALIDP_DE_LG_STRIDE, 0, MALIDP650_DE_LG_MMU_CTRL, + ROTATE_COMPRESSED, MALIDP550_DE_LG_AD_CTRL }, + { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, + MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, + MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY, + MALIDP550_DE_LV2_AD_CTRL }, + { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, + MALIDP550_DE_LS_R1_STRIDE, 0, MALIDP650_DE_LS_MMU_CTRL, + ROTATE_NONE, 0 }, +}; + +const u64 malidp_format_modifiers[] = { + /* All RGB formats (except XRGB, RGBX, XBGR, BGRX) */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE), + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR), + + /* All RGB formats > 16bpp (except XRGB, RGBX, XBGR, BGRX) */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE | AFBC_SPLIT), + + /* All 8 or 10 bit YUV 444 formats. */ + /* In DP550, 10 bit YUV 420 format also supported */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE | AFBC_SPLIT), + + /* YUV 420, 422 P1 8 bit and YUV 444 8 bit/10 bit formats */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE), + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16), + + /* YUV 420, 422 P1 8, 10 bit formats */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR | AFBC_SPARSE), + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR), + + /* All formats */ + DRM_FORMAT_MOD_LINEAR, + + DRM_FORMAT_MOD_INVALID +}; + +#define SE_N_SCALING_COEFFS 96 +static const u16 dp500_se_scaling_coeffs[][SE_N_SCALING_COEFFS] = { + [MALIDP_UPSCALING_COEFFS - 1] = { + 0x0000, 0x0001, 0x0007, 0x0011, 0x001e, 0x002e, 0x003f, 0x0052, + 0x0064, 0x0073, 0x007d, 0x0080, 0x007a, 0x006c, 0x0053, 0x002f, + 0x0000, 0x3fc6, 0x3f83, 0x3f39, 0x3eea, 0x3e9b, 0x3e4f, 0x3e0a, + 0x3dd4, 0x3db0, 0x3da2, 0x3db1, 0x3dde, 0x3e2f, 0x3ea5, 0x3f40, + 0x0000, 0x00e5, 0x01ee, 0x0315, 0x0456, 0x05aa, 0x0709, 0x086c, + 0x09c9, 0x0b15, 0x0c4a, 0x0d5d, 0x0e4a, 0x0f06, 0x0f91, 0x0fe5, + 0x1000, 0x0fe5, 0x0f91, 0x0f06, 0x0e4a, 0x0d5d, 0x0c4a, 0x0b15, + 0x09c9, 0x086c, 0x0709, 0x05aa, 0x0456, 0x0315, 0x01ee, 0x00e5, + 0x0000, 0x3f40, 0x3ea5, 0x3e2f, 0x3dde, 0x3db1, 0x3da2, 0x3db0, + 0x3dd4, 0x3e0a, 0x3e4f, 0x3e9b, 0x3eea, 0x3f39, 0x3f83, 0x3fc6, + 0x0000, 0x002f, 0x0053, 0x006c, 0x007a, 0x0080, 0x007d, 0x0073, + 0x0064, 0x0052, 0x003f, 0x002e, 0x001e, 0x0011, 0x0007, 0x0001 + }, + [MALIDP_DOWNSCALING_1_5_COEFFS - 1] = { + 0x0059, 0x004f, 0x0041, 0x002e, 0x0016, 0x3ffb, 0x3fd9, 0x3fb4, + 0x3f8c, 0x3f62, 0x3f36, 0x3f09, 0x3edd, 0x3eb3, 0x3e8d, 0x3e6c, + 0x3e52, 0x3e3f, 0x3e35, 0x3e37, 0x3e46, 0x3e61, 0x3e8c, 0x3ec5, + 0x3f0f, 0x3f68, 0x3fd1, 0x004a, 0x00d3, 0x0169, 0x020b, 0x02b8, + 0x036e, 0x042d, 0x04f2, 0x05b9, 0x0681, 0x0745, 0x0803, 0x08ba, + 0x0965, 0x0a03, 0x0a91, 0x0b0d, 0x0b75, 0x0bc6, 0x0c00, 0x0c20, + 0x0c28, 0x0c20, 0x0c00, 0x0bc6, 0x0b75, 0x0b0d, 0x0a91, 0x0a03, + 0x0965, 0x08ba, 0x0803, 0x0745, 0x0681, 0x05b9, 0x04f2, 0x042d, + 0x036e, 0x02b8, 0x020b, 0x0169, 0x00d3, 0x004a, 0x3fd1, 0x3f68, + 0x3f0f, 0x3ec5, 0x3e8c, 0x3e61, 0x3e46, 0x3e37, 0x3e35, 0x3e3f, + 0x3e52, 0x3e6c, 0x3e8d, 0x3eb3, 0x3edd, 0x3f09, 0x3f36, 0x3f62, + 0x3f8c, 0x3fb4, 0x3fd9, 0x3ffb, 0x0016, 0x002e, 0x0041, 0x004f + }, + [MALIDP_DOWNSCALING_2_COEFFS - 1] = { + 0x3f19, 0x3f03, 0x3ef0, 0x3edf, 0x3ed0, 0x3ec5, 0x3ebd, 0x3eb9, + 0x3eb9, 0x3ebf, 0x3eca, 0x3ed9, 0x3eef, 0x3f0a, 0x3f2c, 0x3f52, + 0x3f7f, 0x3fb0, 0x3fe8, 0x0026, 0x006a, 0x00b4, 0x0103, 0x0158, + 0x01b1, 0x020d, 0x026c, 0x02cd, 0x032f, 0x0392, 0x03f4, 0x0455, + 0x04b4, 0x051e, 0x0585, 0x05eb, 0x064c, 0x06a8, 0x06fe, 0x074e, + 0x0796, 0x07d5, 0x080c, 0x0839, 0x085c, 0x0875, 0x0882, 0x0887, + 0x0881, 0x0887, 0x0882, 0x0875, 0x085c, 0x0839, 0x080c, 0x07d5, + 0x0796, 0x074e, 0x06fe, 0x06a8, 0x064c, 0x05eb, 0x0585, 0x051e, + 0x04b4, 0x0455, 0x03f4, 0x0392, 0x032f, 0x02cd, 0x026c, 0x020d, + 0x01b1, 0x0158, 0x0103, 0x00b4, 0x006a, 0x0026, 0x3fe8, 0x3fb0, + 0x3f7f, 0x3f52, 0x3f2c, 0x3f0a, 0x3eef, 0x3ed9, 0x3eca, 0x3ebf, + 0x3eb9, 0x3eb9, 0x3ebd, 0x3ec5, 0x3ed0, 0x3edf, 0x3ef0, 0x3f03 + }, + [MALIDP_DOWNSCALING_2_75_COEFFS - 1] = { + 0x3f51, 0x3f60, 0x3f71, 0x3f84, 0x3f98, 0x3faf, 0x3fc8, 0x3fe3, + 0x0000, 0x001f, 0x0040, 0x0064, 0x008a, 0x00b1, 0x00da, 0x0106, + 0x0133, 0x0160, 0x018e, 0x01bd, 0x01ec, 0x021d, 0x024e, 0x0280, + 0x02b2, 0x02e4, 0x0317, 0x0349, 0x037c, 0x03ad, 0x03df, 0x0410, + 0x0440, 0x0468, 0x048f, 0x04b3, 0x04d6, 0x04f8, 0x0516, 0x0533, + 0x054e, 0x0566, 0x057c, 0x0590, 0x05a0, 0x05ae, 0x05ba, 0x05c3, + 0x05c9, 0x05c3, 0x05ba, 0x05ae, 0x05a0, 0x0590, 0x057c, 0x0566, + 0x054e, 0x0533, 0x0516, 0x04f8, 0x04d6, 0x04b3, 0x048f, 0x0468, + 0x0440, 0x0410, 0x03df, 0x03ad, 0x037c, 0x0349, 0x0317, 0x02e4, + 0x02b2, 0x0280, 0x024e, 0x021d, 0x01ec, 0x01bd, 0x018e, 0x0160, + 0x0133, 0x0106, 0x00da, 0x00b1, 0x008a, 0x0064, 0x0040, 0x001f, + 0x0000, 0x3fe3, 0x3fc8, 0x3faf, 0x3f98, 0x3f84, 0x3f71, 0x3f60 + }, + [MALIDP_DOWNSCALING_4_COEFFS - 1] = { + 0x0094, 0x00a9, 0x00be, 0x00d4, 0x00ea, 0x0101, 0x0118, 0x012f, + 0x0148, 0x0160, 0x017a, 0x0193, 0x01ae, 0x01c8, 0x01e4, 0x01ff, + 0x021c, 0x0233, 0x024a, 0x0261, 0x0278, 0x028f, 0x02a6, 0x02bd, + 0x02d4, 0x02eb, 0x0302, 0x0319, 0x032f, 0x0346, 0x035d, 0x0374, + 0x038a, 0x0397, 0x03a3, 0x03af, 0x03bb, 0x03c6, 0x03d1, 0x03db, + 0x03e4, 0x03ed, 0x03f6, 0x03fe, 0x0406, 0x040d, 0x0414, 0x041a, + 0x0420, 0x041a, 0x0414, 0x040d, 0x0406, 0x03fe, 0x03f6, 0x03ed, + 0x03e4, 0x03db, 0x03d1, 0x03c6, 0x03bb, 0x03af, 0x03a3, 0x0397, + 0x038a, 0x0374, 0x035d, 0x0346, 0x032f, 0x0319, 0x0302, 0x02eb, + 0x02d4, 0x02bd, 0x02a6, 0x028f, 0x0278, 0x0261, 0x024a, 0x0233, + 0x021c, 0x01ff, 0x01e4, 0x01c8, 0x01ae, 0x0193, 0x017a, 0x0160, + 0x0148, 0x012f, 0x0118, 0x0101, 0x00ea, 0x00d4, 0x00be, 0x00a9 + }, +}; + +#define MALIDP_DE_DEFAULT_PREFETCH_START 5 + +static int malidp500_query_hw(struct malidp_hw_device *hwdev) +{ + u32 conf = malidp_hw_read(hwdev, MALIDP500_CONFIG_ID); + /* bit 4 of the CONFIG_ID register holds the line size multiplier */ + u8 ln_size_mult = conf & 0x10 ? 2 : 1; + + hwdev->min_line_size = 2; + hwdev->max_line_size = SZ_2K * ln_size_mult; + hwdev->rotation_memory[0] = SZ_1K * 64 * ln_size_mult; + hwdev->rotation_memory[1] = 0; /* no second rotation memory bank */ + + return 0; +} + +static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) + break; + /* + * entering config mode can take as long as the rendering + * of a full frame, hence the long sleep here + */ + usleep_range(1000, 10000); + count--; + } + WARN(count == 0, "timeout while entering config mode"); +} + +static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID); + malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP500_DC_CONFIG_REQ) == 0) + break; + usleep_range(100, 1000); + count--; + } + WARN(count == 0, "timeout while leaving config mode"); +} + +static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status; + + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) + return true; + + return false; +} + +static void malidp500_set_config_valid(struct malidp_hw_device *hwdev, u8 value) +{ + if (value) + malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID); + else + malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID); +} + +static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode) +{ + u32 val = 0; + + malidp_hw_write(hwdev, hwdev->output_color_depth, + hwdev->hw->map.out_depth_base); + malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL); + if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH) + val |= MALIDP500_HSYNCPOL; + if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH) + val |= MALIDP500_VSYNCPOL; + val |= MALIDP_DE_DEFAULT_PREFETCH_START; + malidp_hw_setbits(hwdev, val, MALIDP500_DC_CONTROL); + + /* + * Mali-DP500 encodes the background color like this: + * - red @ MALIDP500_BGND_COLOR[12:0] + * - green @ MALIDP500_BGND_COLOR[27:16] + * - blue @ (MALIDP500_BGND_COLOR + 4)[12:0] + */ + val = ((MALIDP_BGND_COLOR_G & 0xfff) << 16) | + (MALIDP_BGND_COLOR_R & 0xfff); + malidp_hw_write(hwdev, val, MALIDP500_BGND_COLOR); + malidp_hw_write(hwdev, MALIDP_BGND_COLOR_B, MALIDP500_BGND_COLOR + 4); + + val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) | + MALIDP_DE_H_BACKPORCH(mode->hback_porch); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_H_TIMINGS); + + val = MALIDP500_DE_V_FRONTPORCH(mode->vfront_porch) | + MALIDP_DE_V_BACKPORCH(mode->vback_porch); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_V_TIMINGS); + + val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) | + MALIDP_DE_V_SYNCWIDTH(mode->vsync_len); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH); + + val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE); + + if (mode->flags & DISPLAY_FLAGS_INTERLACED) + malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); + else + malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); + + /* + * Program the RQoS register to avoid high resolutions flicker + * issue on the LS1028A. + */ + if (hwdev->arqos_value) { + val = hwdev->arqos_value; + malidp_hw_setbits(hwdev, val, MALIDP500_RQOS_QUALITY); + } +} + +int malidp_format_get_bpp(u32 fmt) +{ + const struct drm_format_info *info = drm_format_info(fmt); + int bpp = info->cpp[0] * 8; + + if (bpp == 0) { + switch (fmt) { + case DRM_FORMAT_VUY101010: + bpp = 30; + break; + case DRM_FORMAT_YUV420_10BIT: + bpp = 15; + break; + case DRM_FORMAT_YUV420_8BIT: + bpp = 12; + break; + default: + bpp = 0; + } + } + + return bpp; +} + +static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, + u16 h, u32 fmt, bool has_modifier) +{ + /* + * Each layer needs enough rotation memory to fit 8 lines + * worth of pixel data. Required size is then: + * size = rotated_width * (bpp / 8) * 8; + */ + int bpp = malidp_format_get_bpp(fmt); + + return w * bpp; +} + +static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev, + u32 direction, + u16 addr, + u8 coeffs_id) +{ + int i; + u16 scaling_control = MALIDP500_SE_CONTROL + MALIDP_SE_SCALING_CONTROL; + + malidp_hw_write(hwdev, + direction | (addr & MALIDP_SE_COEFFTAB_ADDR_MASK), + scaling_control + MALIDP_SE_COEFFTAB_ADDR); + for (i = 0; i < ARRAY_SIZE(dp500_se_scaling_coeffs); ++i) + malidp_hw_write(hwdev, MALIDP_SE_SET_COEFFTAB_DATA( + dp500_se_scaling_coeffs[coeffs_id][i]), + scaling_control + MALIDP_SE_COEFFTAB_DATA); +} + +static int malidp500_se_set_scaling_coeffs(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct malidp_se_config *old_config) +{ + /* Get array indices into dp500_se_scaling_coeffs. */ + u8 h = (u8)se_config->hcoeff - 1; + u8 v = (u8)se_config->vcoeff - 1; + + if (WARN_ON(h >= ARRAY_SIZE(dp500_se_scaling_coeffs) || + v >= ARRAY_SIZE(dp500_se_scaling_coeffs))) + return -EINVAL; + + if ((h == v) && (se_config->hcoeff != old_config->hcoeff || + se_config->vcoeff != old_config->vcoeff)) { + malidp500_se_write_pp_coefftab(hwdev, + (MALIDP_SE_V_COEFFTAB | + MALIDP_SE_H_COEFFTAB), + 0, v); + } else { + if (se_config->vcoeff != old_config->vcoeff) + malidp500_se_write_pp_coefftab(hwdev, + MALIDP_SE_V_COEFFTAB, + 0, v); + if (se_config->hcoeff != old_config->hcoeff) + malidp500_se_write_pp_coefftab(hwdev, + MALIDP_SE_H_COEFFTAB, + 0, h); + } + + return 0; +} + +static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct videomode *vm) +{ + unsigned long mclk; + unsigned long pxlclk = vm->pixelclock; /* Hz */ + unsigned long htotal = vm->hactive + vm->hfront_porch + + vm->hback_porch + vm->hsync_len; + unsigned long input_size = se_config->input_w * se_config->input_h; + unsigned long a = 10; + long ret; + + /* + * mclk = max(a, 1.5) * pxlclk + * + * To avoid float calculaiton, using 15 instead of 1.5 and div by + * 10 to get mclk. + */ + if (se_config->scale_enable) { + a = 15 * input_size / (htotal * se_config->output_h); + if (a < 15) + a = 15; + } + mclk = a * pxlclk / 10; + ret = clk_get_rate(hwdev->mclk); + if (ret < mclk) { + DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n", + mclk / 1000); + return -EINVAL; + } + return ret; +} + +static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, + dma_addr_t *addrs, s32 *pitches, + int num_planes, u16 w, u16 h, u32 fmt_id, + const s16 *rgb2yuv_coeffs) +{ + u32 base = MALIDP500_SE_MEMWRITE_BASE; + u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); + + /* enable the scaling engine block */ + malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC); + + /* restart the writeback if already enabled */ + if (hwdev->mw_state != MW_NOT_ENABLED) + hwdev->mw_state = MW_RESTART; + else + hwdev->mw_state = MW_START; + + malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT); + switch (num_planes) { + case 2: + malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW); + malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH); + malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE); + fallthrough; + case 1: + malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW); + malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH); + malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE); + break; + default: + WARN(1, "Invalid number of planes"); + } + + malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), + MALIDP500_SE_MEMWRITE_OUT_SIZE); + + if (rgb2yuv_coeffs) { + int i; + + for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) { + malidp_hw_write(hwdev, rgb2yuv_coeffs[i], + MALIDP500_SE_RGB_YUV_COEFFS + i * 4); + } + } + + malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL); + + return 0; +} + +static void malidp500_disable_memwrite(struct malidp_hw_device *hwdev) +{ + u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); + + if (hwdev->mw_state == MW_START || hwdev->mw_state == MW_RESTART) + hwdev->mw_state = MW_STOP; + malidp_hw_clearbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL); + malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC); +} + +static int malidp550_query_hw(struct malidp_hw_device *hwdev) +{ + u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID); + u8 ln_size = (conf >> 4) & 0x3, rsize; + + hwdev->min_line_size = 2; + + switch (ln_size) { + case 0: + hwdev->max_line_size = SZ_2K; + /* two banks of 64KB for rotation memory */ + rsize = 64; + break; + case 1: + hwdev->max_line_size = SZ_4K; + /* two banks of 128KB for rotation memory */ + rsize = 128; + break; + case 2: + hwdev->max_line_size = 1280; + /* two banks of 40KB for rotation memory */ + rsize = 40; + break; + case 3: + /* reserved value */ + hwdev->max_line_size = 0; + return -EINVAL; + } + + hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K; + return 0; +} + +static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) + break; + /* + * entering config mode can take as long as the rendering + * of a full frame, hence the long sleep here + */ + usleep_range(1000, 10000); + count--; + } + WARN(count == 0, "timeout while entering config mode"); +} + +static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID); + malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP550_DC_CONFIG_REQ) == 0) + break; + usleep_range(100, 1000); + count--; + } + WARN(count == 0, "timeout while leaving config mode"); +} + +static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status; + + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) + return true; + + return false; +} + +static void malidp550_set_config_valid(struct malidp_hw_device *hwdev, u8 value) +{ + if (value) + malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID); + else + malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID); +} + +static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode) +{ + u32 val = MALIDP_DE_DEFAULT_PREFETCH_START; + + malidp_hw_write(hwdev, hwdev->output_color_depth, + hwdev->hw->map.out_depth_base); + malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL); + /* + * Mali-DP550 and Mali-DP650 encode the background color like this: + * - red @ MALIDP550_DE_BGND_COLOR[23:16] + * - green @ MALIDP550_DE_BGND_COLOR[15:8] + * - blue @ MALIDP550_DE_BGND_COLOR[7:0] + * + * We need to truncate the least significant 4 bits from the default + * MALIDP_BGND_COLOR_x values + */ + val = (((MALIDP_BGND_COLOR_R >> 4) & 0xff) << 16) | + (((MALIDP_BGND_COLOR_G >> 4) & 0xff) << 8) | + ((MALIDP_BGND_COLOR_B >> 4) & 0xff); + malidp_hw_write(hwdev, val, MALIDP550_DE_BGND_COLOR); + + val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) | + MALIDP_DE_H_BACKPORCH(mode->hback_porch); + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_H_TIMINGS); + + val = MALIDP550_DE_V_FRONTPORCH(mode->vfront_porch) | + MALIDP_DE_V_BACKPORCH(mode->vback_porch); + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_V_TIMINGS); + + val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) | + MALIDP_DE_V_SYNCWIDTH(mode->vsync_len); + if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH) + val |= MALIDP550_HSYNCPOL; + if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH) + val |= MALIDP550_VSYNCPOL; + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH); + + val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive); + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE); + + if (mode->flags & DISPLAY_FLAGS_INTERLACED) + malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); + else + malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); +} + +static int malidpx50_get_bytes_per_column(u32 fmt) +{ + u32 bytes_per_column; + + switch (fmt) { + /* 8 lines at 4 bytes per pixel */ + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + /* 16 lines at 2 bytes per pixel */ + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_X0L0: + bytes_per_column = 32; + break; + /* 16 lines at 1.5 bytes per pixel */ + case DRM_FORMAT_NV12: + case DRM_FORMAT_YUV420: + /* 8 lines at 3 bytes per pixel */ + case DRM_FORMAT_VUY888: + /* 16 lines at 12 bits per pixel */ + case DRM_FORMAT_YUV420_8BIT: + /* 8 lines at 3 bytes per pixel */ + case DRM_FORMAT_P010: + bytes_per_column = 24; + break; + /* 8 lines at 30 bits per pixel */ + case DRM_FORMAT_VUY101010: + /* 16 lines at 15 bits per pixel */ + case DRM_FORMAT_YUV420_10BIT: + bytes_per_column = 30; + break; + default: + return -EINVAL; + } + + return bytes_per_column; +} + +static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, + u16 h, u32 fmt, bool has_modifier) +{ + int bytes_per_column = 0; + + switch (fmt) { + /* 8 lines at 15 bits per pixel */ + case DRM_FORMAT_YUV420_10BIT: + bytes_per_column = 15; + break; + /* Uncompressed YUV 420 10 bit single plane cannot be rotated */ + case DRM_FORMAT_X0L2: + if (has_modifier) + bytes_per_column = 8; + else + return -EINVAL; + break; + default: + bytes_per_column = malidpx50_get_bytes_per_column(fmt); + } + + if (bytes_per_column == -EINVAL) + return bytes_per_column; + + return w * bytes_per_column; +} + +static int malidp650_rotmem_required(struct malidp_hw_device *hwdev, u16 w, + u16 h, u32 fmt, bool has_modifier) +{ + int bytes_per_column = 0; + + switch (fmt) { + /* 16 lines at 2 bytes per pixel */ + case DRM_FORMAT_X0L2: + bytes_per_column = 32; + break; + default: + bytes_per_column = malidpx50_get_bytes_per_column(fmt); + } + + if (bytes_per_column == -EINVAL) + return bytes_per_column; + + return w * bytes_per_column; +} + +static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct malidp_se_config *old_config) +{ + u32 mask = MALIDP550_SE_CTL_VCSEL(MALIDP550_SE_CTL_SEL_MASK) | + MALIDP550_SE_CTL_HCSEL(MALIDP550_SE_CTL_SEL_MASK); + u32 new_value = MALIDP550_SE_CTL_VCSEL(se_config->vcoeff) | + MALIDP550_SE_CTL_HCSEL(se_config->hcoeff); + + malidp_hw_clearbits(hwdev, mask, MALIDP550_SE_CONTROL); + malidp_hw_setbits(hwdev, new_value, MALIDP550_SE_CONTROL); + return 0; +} + +static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct videomode *vm) +{ + unsigned long mclk; + unsigned long pxlclk = vm->pixelclock; + unsigned long htotal = vm->hactive + vm->hfront_porch + + vm->hback_porch + vm->hsync_len; + unsigned long numerator = 1, denominator = 1; + long ret; + + if (se_config->scale_enable) { + numerator = max(se_config->input_w, se_config->output_w) * + se_config->input_h; + numerator += se_config->output_w * + (se_config->output_h - + min(se_config->input_h, se_config->output_h)); + denominator = (htotal - 2) * se_config->output_h; + } + + /* mclk can't be slower than pxlclk. */ + if (numerator < denominator) + numerator = denominator = 1; + mclk = (pxlclk * numerator) / denominator; + ret = clk_get_rate(hwdev->mclk); + if (ret < mclk) { + DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n", + mclk / 1000); + return -EINVAL; + } + return ret; +} + +static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, + dma_addr_t *addrs, s32 *pitches, + int num_planes, u16 w, u16 h, u32 fmt_id, + const s16 *rgb2yuv_coeffs) +{ + u32 base = MALIDP550_SE_MEMWRITE_BASE; + u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); + + /* enable the scaling engine block */ + malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC); + + hwdev->mw_state = MW_ONESHOT; + + malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT); + switch (num_planes) { + case 2: + malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW); + malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH); + malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE); + fallthrough; + case 1: + malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW); + malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH); + malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE); + break; + default: + WARN(1, "Invalid number of planes"); + } + + malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), + MALIDP550_SE_MEMWRITE_OUT_SIZE); + malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN, + MALIDP550_SE_CONTROL); + + if (rgb2yuv_coeffs) { + int i; + + for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) { + malidp_hw_write(hwdev, rgb2yuv_coeffs[i], + MALIDP550_SE_RGB_YUV_COEFFS + i * 4); + } + } + + return 0; +} + +static void malidp550_disable_memwrite(struct malidp_hw_device *hwdev) +{ + u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); + + malidp_hw_clearbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN, + MALIDP550_SE_CONTROL); + malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC); +} + +static int malidp650_query_hw(struct malidp_hw_device *hwdev) +{ + u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID); + u8 ln_size = (conf >> 4) & 0x3, rsize; + + hwdev->min_line_size = 4; + + switch (ln_size) { + case 0: + case 2: + /* reserved values */ + hwdev->max_line_size = 0; + return -EINVAL; + case 1: + hwdev->max_line_size = SZ_4K; + /* two banks of 128KB for rotation memory */ + rsize = 128; + break; + case 3: + hwdev->max_line_size = 2560; + /* two banks of 80KB for rotation memory */ + rsize = 80; + } + + hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K; + return 0; +} + +const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { + [MALIDP_500] = { + .map = { + .coeffs_base = MALIDP500_COEFFS_BASE, + .se_base = MALIDP500_SE_BASE, + .dc_base = MALIDP500_DC_BASE, + .out_depth_base = MALIDP500_OUTPUT_DEPTH, + .features = 0, /* no CLEARIRQ register */ + .n_layers = ARRAY_SIZE(malidp500_layers), + .layers = malidp500_layers, + .de_irq_map = { + .irq_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP500_DE_IRQ_AXI_ERR | + MALIDP500_DE_IRQ_VSYNC | + MALIDP500_DE_IRQ_GLOBAL, + .vsync_irq = MALIDP500_DE_IRQ_VSYNC, + .err_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP500_DE_IRQ_AXI_ERR | + MALIDP500_DE_IRQ_SATURATION, + }, + .se_irq_map = { + .irq_mask = MALIDP500_SE_IRQ_CONF_MODE | + MALIDP500_SE_IRQ_CONF_VALID | + MALIDP500_SE_IRQ_GLOBAL, + .vsync_irq = MALIDP500_SE_IRQ_CONF_VALID, + .err_mask = MALIDP500_SE_IRQ_INIT_BUSY | + MALIDP500_SE_IRQ_AXI_ERROR | + MALIDP500_SE_IRQ_OVERRUN, + }, + .dc_irq_map = { + .irq_mask = MALIDP500_DE_IRQ_CONF_VALID, + .vsync_irq = MALIDP500_DE_IRQ_CONF_VALID, + }, + .pixel_formats = malidp500_de_formats, + .n_pixel_formats = ARRAY_SIZE(malidp500_de_formats), + .bus_align_bytes = 8, + }, + .query_hw = malidp500_query_hw, + .enter_config_mode = malidp500_enter_config_mode, + .leave_config_mode = malidp500_leave_config_mode, + .in_config_mode = malidp500_in_config_mode, + .set_config_valid = malidp500_set_config_valid, + .modeset = malidp500_modeset, + .rotmem_required = malidp500_rotmem_required, + .se_set_scaling_coeffs = malidp500_se_set_scaling_coeffs, + .se_calc_mclk = malidp500_se_calc_mclk, + .enable_memwrite = malidp500_enable_memwrite, + .disable_memwrite = malidp500_disable_memwrite, + .features = MALIDP_DEVICE_LV_HAS_3_STRIDES, + }, + [MALIDP_550] = { + .map = { + .coeffs_base = MALIDP550_COEFFS_BASE, + .se_base = MALIDP550_SE_BASE, + .dc_base = MALIDP550_DC_BASE, + .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, + .features = MALIDP_REGMAP_HAS_CLEARIRQ | + MALIDP_DEVICE_AFBC_SUPPORT_SPLIT | + MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT | + MALIDP_DEVICE_AFBC_YUYV_USE_422_P2, + .n_layers = ARRAY_SIZE(malidp550_layers), + .layers = malidp550_layers, + .de_irq_map = { + .irq_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP550_DE_IRQ_VSYNC, + .vsync_irq = MALIDP550_DE_IRQ_VSYNC, + .err_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP550_DE_IRQ_SATURATION | + MALIDP550_DE_IRQ_AXI_ERR, + }, + .se_irq_map = { + .irq_mask = MALIDP550_SE_IRQ_EOW, + .vsync_irq = MALIDP550_SE_IRQ_EOW, + .err_mask = MALIDP550_SE_IRQ_AXI_ERR | + MALIDP550_SE_IRQ_OVR | + MALIDP550_SE_IRQ_IBSY, + }, + .dc_irq_map = { + .irq_mask = MALIDP550_DC_IRQ_CONF_VALID | + MALIDP550_DC_IRQ_SE, + .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID, + }, + .pixel_formats = malidp550_de_formats, + .n_pixel_formats = ARRAY_SIZE(malidp550_de_formats), + .bus_align_bytes = 8, + }, + .query_hw = malidp550_query_hw, + .enter_config_mode = malidp550_enter_config_mode, + .leave_config_mode = malidp550_leave_config_mode, + .in_config_mode = malidp550_in_config_mode, + .set_config_valid = malidp550_set_config_valid, + .modeset = malidp550_modeset, + .rotmem_required = malidp550_rotmem_required, + .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs, + .se_calc_mclk = malidp550_se_calc_mclk, + .enable_memwrite = malidp550_enable_memwrite, + .disable_memwrite = malidp550_disable_memwrite, + .features = 0, + }, + [MALIDP_650] = { + .map = { + .coeffs_base = MALIDP550_COEFFS_BASE, + .se_base = MALIDP550_SE_BASE, + .dc_base = MALIDP550_DC_BASE, + .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, + .features = MALIDP_REGMAP_HAS_CLEARIRQ | + MALIDP_DEVICE_AFBC_SUPPORT_SPLIT | + MALIDP_DEVICE_AFBC_YUYV_USE_422_P2, + .n_layers = ARRAY_SIZE(malidp650_layers), + .layers = malidp650_layers, + .de_irq_map = { + .irq_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP650_DE_IRQ_DRIFT | + MALIDP550_DE_IRQ_VSYNC, + .vsync_irq = MALIDP550_DE_IRQ_VSYNC, + .err_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP650_DE_IRQ_DRIFT | + MALIDP550_DE_IRQ_SATURATION | + MALIDP550_DE_IRQ_AXI_ERR | + MALIDP650_DE_IRQ_ACEV1 | + MALIDP650_DE_IRQ_ACEV2 | + MALIDP650_DE_IRQ_ACEG | + MALIDP650_DE_IRQ_AXIEP, + }, + .se_irq_map = { + .irq_mask = MALIDP550_SE_IRQ_EOW, + .vsync_irq = MALIDP550_SE_IRQ_EOW, + .err_mask = MALIDP550_SE_IRQ_AXI_ERR | + MALIDP550_SE_IRQ_OVR | + MALIDP550_SE_IRQ_IBSY, + }, + .dc_irq_map = { + .irq_mask = MALIDP550_DC_IRQ_CONF_VALID | + MALIDP550_DC_IRQ_SE, + .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID, + }, + .pixel_formats = malidp650_de_formats, + .n_pixel_formats = ARRAY_SIZE(malidp650_de_formats), + .bus_align_bytes = 16, + }, + .query_hw = malidp650_query_hw, + .enter_config_mode = malidp550_enter_config_mode, + .leave_config_mode = malidp550_leave_config_mode, + .in_config_mode = malidp550_in_config_mode, + .set_config_valid = malidp550_set_config_valid, + .modeset = malidp550_modeset, + .rotmem_required = malidp650_rotmem_required, + .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs, + .se_calc_mclk = malidp550_se_calc_mclk, + .enable_memwrite = malidp550_enable_memwrite, + .disable_memwrite = malidp550_disable_memwrite, + .features = 0, + }, +}; + +u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, + u8 layer_id, u32 format, bool has_modifier) +{ + unsigned int i; + + for (i = 0; i < map->n_pixel_formats; i++) { + if (((map->pixel_formats[i].layer & layer_id) == layer_id) && + (map->pixel_formats[i].format == format)) { + /* + * In some DP550 and DP650, DRM_FORMAT_YUYV + AFBC modifier + * is supported by a different h/w format id than + * DRM_FORMAT_YUYV (only). + */ + if (format == DRM_FORMAT_YUYV && + (has_modifier) && + (map->features & MALIDP_DEVICE_AFBC_YUYV_USE_422_P2)) + return AFBC_YUV_422_FORMAT_ID; + else + return map->pixel_formats[i].id; + } + } + + return MALIDP_INVALID_FORMAT_ID; +} + +bool malidp_hw_format_is_linear_only(u32 format) +{ + switch (format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_RGB888: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_XYUV8888: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_X0L2: + case DRM_FORMAT_X0L0: + return true; + default: + return false; + } +} + +bool malidp_hw_format_is_afbc_only(u32 format) +{ + switch (format) { + case DRM_FORMAT_VUY888: + case DRM_FORMAT_VUY101010: + case DRM_FORMAT_YUV420_8BIT: + case DRM_FORMAT_YUV420_10BIT: + return true; + default: + return false; + } +} + +static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq) +{ + u32 base = malidp_get_block_base(hwdev, block); + + if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) + malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ); + else + malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS); +} + +static irqreturn_t malidp_de_irq(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev; + struct malidp_hw *hw; + const struct malidp_irq_map *de; + u32 status, mask, dc_status; + irqreturn_t ret = IRQ_NONE; + + hwdev = malidp->dev; + hw = hwdev->hw; + de = &hw->map.de_irq_map; + + /* + * if we are suspended it is likely that we were invoked because + * we share an interrupt line with some other driver, don't try + * to read the hardware registers + */ + if (hwdev->pm_suspended) + return IRQ_NONE; + + /* first handle the config valid IRQ */ + dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS); + if (dc_status & hw->map.dc_irq_map.vsync_irq) { + malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status); + /* do we have a page flip event? */ + if (malidp->event != NULL) { + spin_lock(&drm->event_lock); + drm_crtc_send_vblank_event(&malidp->crtc, malidp->event); + malidp->event = NULL; + spin_unlock(&drm->event_lock); + } + atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE); + ret = IRQ_WAKE_THREAD; + } + + status = malidp_hw_read(hwdev, MALIDP_REG_STATUS); + if (!(status & de->irq_mask)) + return ret; + + mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ); + /* keep the status of the enabled interrupts, plus the error bits */ + status &= (mask | de->err_mask); + if ((status & de->vsync_irq) && malidp->crtc.enabled) + drm_crtc_handle_vblank(&malidp->crtc); + +#ifdef CONFIG_DEBUG_FS + if (status & de->err_mask) { + malidp_error(malidp, &malidp->de_errors, status, + drm_crtc_vblank_count(&malidp->crtc)); + } +#endif + malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status); + + return (ret == IRQ_NONE) ? IRQ_HANDLED : ret; +} + +static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct malidp_drm *malidp = drm->dev_private; + + wake_up(&malidp->wq); + + return IRQ_HANDLED; +} + +void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev) +{ + /* ensure interrupts are disabled */ + malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff); + malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff); + + /* first enable the DC block IRQs */ + malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK, + hwdev->hw->map.dc_irq_map.irq_mask); + + /* now enable the DE block IRQs */ + malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->hw->map.de_irq_map.irq_mask); +} + +int malidp_de_irq_init(struct drm_device *drm, int irq) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + int ret; + + /* ensure interrupts are disabled */ + malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff); + malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff); + + ret = devm_request_threaded_irq(drm->dev, irq, malidp_de_irq, + malidp_de_irq_thread_handler, + IRQF_SHARED, "malidp-de", drm); + if (ret < 0) { + DRM_ERROR("failed to install DE IRQ handler\n"); + return ret; + } + + malidp_de_irq_hw_init(hwdev); + + return 0; +} + +void malidp_de_irq_fini(struct malidp_hw_device *hwdev) +{ + malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->hw->map.de_irq_map.irq_mask); + malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, + hwdev->hw->map.dc_irq_map.irq_mask); +} + +static irqreturn_t malidp_se_irq(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + struct malidp_hw *hw = hwdev->hw; + const struct malidp_irq_map *se = &hw->map.se_irq_map; + u32 status, mask; + + /* + * if we are suspended it is likely that we were invoked because + * we share an interrupt line with some other driver, don't try + * to read the hardware registers + */ + if (hwdev->pm_suspended) + return IRQ_NONE; + + status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS); + if (!(status & (se->irq_mask | se->err_mask))) + return IRQ_NONE; + +#ifdef CONFIG_DEBUG_FS + if (status & se->err_mask) + malidp_error(malidp, &malidp->se_errors, status, + drm_crtc_vblank_count(&malidp->crtc)); +#endif + mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ); + status &= mask; + + if (status & se->vsync_irq) { + switch (hwdev->mw_state) { + case MW_ONESHOT: + drm_writeback_signal_completion(&malidp->mw_connector, 0); + break; + case MW_STOP: + drm_writeback_signal_completion(&malidp->mw_connector, 0); + /* disable writeback after stop */ + hwdev->mw_state = MW_NOT_ENABLED; + break; + case MW_RESTART: + drm_writeback_signal_completion(&malidp->mw_connector, 0); + fallthrough; /* to a new start */ + case MW_START: + /* writeback started, need to emulate one-shot mode */ + hw->disable_memwrite(hwdev); + /* + * only set config_valid HW bit if there is no other update + * in progress or if we raced ahead of the DE IRQ handler + * and config_valid flag will not be update until later + */ + status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS); + if ((atomic_read(&malidp->config_valid) != MALIDP_CONFIG_START) || + (status & hw->map.dc_irq_map.vsync_irq)) + hw->set_config_valid(hwdev, 1); + break; + } + } + + malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status); + + return IRQ_HANDLED; +} + +void malidp_se_irq_hw_init(struct malidp_hw_device *hwdev) +{ + /* ensure interrupts are disabled */ + malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff); + + malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK, + hwdev->hw->map.se_irq_map.irq_mask); +} + +static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg) +{ + return IRQ_HANDLED; +} + +int malidp_se_irq_init(struct drm_device *drm, int irq) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + int ret; + + /* ensure interrupts are disabled */ + malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff); + + ret = devm_request_threaded_irq(drm->dev, irq, malidp_se_irq, + malidp_se_irq_thread_handler, + IRQF_SHARED, "malidp-se", drm); + if (ret < 0) { + DRM_ERROR("failed to install SE IRQ handler\n"); + return ret; + } + + hwdev->mw_state = MW_NOT_ENABLED; + malidp_se_irq_hw_init(hwdev); + + return 0; +} + +void malidp_se_irq_fini(struct malidp_hw_device *hwdev) +{ + malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, + hwdev->hw->map.se_irq_map.irq_mask); +} diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h new file mode 100644 index 000000000..e4c36bc90 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_hw.h @@ -0,0 +1,412 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * + * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved. + * + * ARM Mali DP hardware manipulation routines. + */ + +#ifndef __MALIDP_HW_H__ +#define __MALIDP_HW_H__ + +#include <linux/bitops.h> +#include "malidp_regs.h" + +struct videomode; +struct clk; + +/* Mali DP IP blocks */ +enum { + MALIDP_DE_BLOCK = 0, + MALIDP_SE_BLOCK, + MALIDP_DC_BLOCK +}; + +/* Mali DP layer IDs */ +enum { + DE_VIDEO1 = BIT(0), + DE_GRAPHICS1 = BIT(1), + DE_GRAPHICS2 = BIT(2), /* used only in DP500 */ + DE_VIDEO2 = BIT(3), + DE_SMART = BIT(4), + SE_MEMWRITE = BIT(5), +}; + +enum rotation_features { + ROTATE_NONE, /* does not support rotation at all */ + ROTATE_ANY, /* supports rotation on any buffers */ + ROTATE_COMPRESSED, /* supports rotation only on compressed buffers */ +}; + +struct malidp_format_id { + u32 format; /* DRM fourcc */ + u8 layer; /* bitmask of layers supporting it */ + u8 id; /* used internally */ +}; + +#define MALIDP_INVALID_FORMAT_ID 0xff + +/* + * hide the differences between register maps + * by using a common structure to hold the + * base register offsets + */ + +struct malidp_irq_map { + u32 irq_mask; /* mask of IRQs that can be enabled in the block */ + u32 vsync_irq; /* IRQ bit used for signaling during VSYNC */ + u32 err_mask; /* mask of bits that represent errors */ +}; + +struct malidp_layer { + u16 id; /* layer ID */ + u16 base; /* address offset for the register bank */ + u16 ptr; /* address offset for the pointer register */ + u16 stride_offset; /* offset to the first stride register. */ + s16 yuv2rgb_offset; /* offset to the YUV->RGB matrix entries */ + u16 mmu_ctrl_offset; /* offset to the MMU control register */ + enum rotation_features rot; /* type of rotation supported */ + /* address offset for the AFBC decoder registers */ + u16 afbc_decoder_offset; +}; + +enum malidp_scaling_coeff_set { + MALIDP_UPSCALING_COEFFS = 1, + MALIDP_DOWNSCALING_1_5_COEFFS = 2, + MALIDP_DOWNSCALING_2_COEFFS = 3, + MALIDP_DOWNSCALING_2_75_COEFFS = 4, + MALIDP_DOWNSCALING_4_COEFFS = 5, +}; + +struct malidp_se_config { + u8 scale_enable : 1; + u8 enhancer_enable : 1; + u8 hcoeff : 3; + u8 vcoeff : 3; + u8 plane_src_id; + u16 input_w, input_h; + u16 output_w, output_h; + u32 h_init_phase, h_delta_phase; + u32 v_init_phase, v_delta_phase; +}; + +/* regmap features */ +#define MALIDP_REGMAP_HAS_CLEARIRQ BIT(0) +#define MALIDP_DEVICE_AFBC_SUPPORT_SPLIT BIT(1) +#define MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT BIT(2) +#define MALIDP_DEVICE_AFBC_YUYV_USE_422_P2 BIT(3) + +struct malidp_hw_regmap { + /* address offset of the DE register bank */ + /* is always 0x0000 */ + /* address offset of the DE coefficients registers */ + const u16 coeffs_base; + /* address offset of the SE registers bank */ + const u16 se_base; + /* address offset of the DC registers bank */ + const u16 dc_base; + + /* address offset for the output depth register */ + const u16 out_depth_base; + + /* bitmap with register map features */ + const u8 features; + + /* list of supported layers */ + const u8 n_layers; + const struct malidp_layer *layers; + + const struct malidp_irq_map de_irq_map; + const struct malidp_irq_map se_irq_map; + const struct malidp_irq_map dc_irq_map; + + /* list of supported pixel formats for each layer */ + const struct malidp_format_id *pixel_formats; + const u8 n_pixel_formats; + + /* pitch alignment requirement in bytes */ + const u8 bus_align_bytes; +}; + +/* device features */ +/* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */ +#define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0) + +struct malidp_hw_device; + +/* + * Static structure containing hardware specific data and pointers to + * functions that behave differently between various versions of the IP. + */ +struct malidp_hw { + const struct malidp_hw_regmap map; + + /* + * Validate the driver instance against the hardware bits + */ + int (*query_hw)(struct malidp_hw_device *hwdev); + + /* + * Set the hardware into config mode, ready to accept mode changes + */ + void (*enter_config_mode)(struct malidp_hw_device *hwdev); + + /* + * Tell hardware to exit configuration mode + */ + void (*leave_config_mode)(struct malidp_hw_device *hwdev); + + /* + * Query if hardware is in configuration mode + */ + bool (*in_config_mode)(struct malidp_hw_device *hwdev); + + /* + * Set/clear configuration valid flag for hardware parameters that can + * be changed outside the configuration mode to the given value. + * Hardware will use the new settings when config valid is set, + * after the end of the current buffer scanout, and will ignore + * any new values for those parameters if config valid flag is cleared + */ + void (*set_config_valid)(struct malidp_hw_device *hwdev, u8 value); + + /* + * Set a new mode in hardware. Requires the hardware to be in + * configuration mode before this function is called. + */ + void (*modeset)(struct malidp_hw_device *hwdev, struct videomode *m); + + /* + * Calculate the required rotation memory given the active area + * and the buffer format. + */ + int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, + u32 fmt, bool has_modifier); + + int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct malidp_se_config *old_config); + + long (*se_calc_mclk)(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct videomode *vm); + /* + * Enable writing to memory the content of the next frame + * @param hwdev - malidp_hw_device structure containing the HW description + * @param addrs - array of addresses for each plane + * @param pitches - array of pitches for each plane + * @param num_planes - number of planes to be written + * @param w - width of the output frame + * @param h - height of the output frame + * @param fmt_id - internal format ID of output buffer + */ + int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs, + s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id, + const s16 *rgb2yuv_coeffs); + + /* + * Disable the writing to memory of the next frame's content. + */ + void (*disable_memwrite)(struct malidp_hw_device *hwdev); + + u8 features; +}; + +/* Supported variants of the hardware */ +enum { + MALIDP_500 = 0, + MALIDP_550, + MALIDP_650, + /* keep the next entry last */ + MALIDP_MAX_DEVICES +}; + +extern const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES]; + +/* + * Structure used by the driver during runtime operation. + */ +struct malidp_hw_device { + struct malidp_hw *hw; + void __iomem *regs; + + /* APB clock */ + struct clk *pclk; + /* AXI clock */ + struct clk *aclk; + /* main clock for display core */ + struct clk *mclk; + /* pixel clock for display core */ + struct clk *pxlclk; + + u8 min_line_size; + u16 max_line_size; + u32 output_color_depth; + + /* track the device PM state */ + bool pm_suspended; + + /* track the SE memory writeback state */ + u8 mw_state; + + /* size of memory used for rotating layers, up to two banks available */ + u32 rotation_memory[2]; + + /* priority level of RQOS register used for driven the ARQOS signal */ + u32 arqos_value; +}; + +static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg) +{ + WARN_ON(hwdev->pm_suspended); + return readl(hwdev->regs + reg); +} + +static inline void malidp_hw_write(struct malidp_hw_device *hwdev, + u32 value, u32 reg) +{ + WARN_ON(hwdev->pm_suspended); + writel(value, hwdev->regs + reg); +} + +static inline void malidp_hw_setbits(struct malidp_hw_device *hwdev, + u32 mask, u32 reg) +{ + u32 data = malidp_hw_read(hwdev, reg); + + data |= mask; + malidp_hw_write(hwdev, data, reg); +} + +static inline void malidp_hw_clearbits(struct malidp_hw_device *hwdev, + u32 mask, u32 reg) +{ + u32 data = malidp_hw_read(hwdev, reg); + + data &= ~mask; + malidp_hw_write(hwdev, data, reg); +} + +static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev, + u8 block) +{ + switch (block) { + case MALIDP_SE_BLOCK: + return hwdev->hw->map.se_base; + case MALIDP_DC_BLOCK: + return hwdev->hw->map.dc_base; + } + + return 0; +} + +static inline void malidp_hw_disable_irq(struct malidp_hw_device *hwdev, + u8 block, u32 irq) +{ + u32 base = malidp_get_block_base(hwdev, block); + + malidp_hw_clearbits(hwdev, irq, base + MALIDP_REG_MASKIRQ); +} + +static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev, + u8 block, u32 irq) +{ + u32 base = malidp_get_block_base(hwdev, block); + + malidp_hw_setbits(hwdev, irq, base + MALIDP_REG_MASKIRQ); +} + +int malidp_de_irq_init(struct drm_device *drm, int irq); +void malidp_se_irq_hw_init(struct malidp_hw_device *hwdev); +void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev); +void malidp_de_irq_fini(struct malidp_hw_device *hwdev); +int malidp_se_irq_init(struct drm_device *drm, int irq); +void malidp_se_irq_fini(struct malidp_hw_device *hwdev); + +u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, + u8 layer_id, u32 format, bool has_modifier); + +int malidp_format_get_bpp(u32 fmt); + +static inline u8 malidp_hw_get_pitch_align(struct malidp_hw_device *hwdev, bool rotated) +{ + /* + * only hardware that cannot do 8 bytes bus alignments have further + * constraints on rotated planes + */ + if (hwdev->hw->map.bus_align_bytes == 8) + return 8; + else + return hwdev->hw->map.bus_align_bytes << (rotated ? 2 : 0); +} + +/* U16.16 */ +#define FP_1_00000 0x00010000 /* 1.0 */ +#define FP_0_66667 0x0000AAAA /* 0.6667 = 1/1.5 */ +#define FP_0_50000 0x00008000 /* 0.5 = 1/2 */ +#define FP_0_36363 0x00005D17 /* 0.36363 = 1/2.75 */ +#define FP_0_25000 0x00004000 /* 0.25 = 1/4 */ + +static inline enum malidp_scaling_coeff_set +malidp_se_select_coeffs(u32 upscale_factor) +{ + return (upscale_factor >= FP_1_00000) ? MALIDP_UPSCALING_COEFFS : + (upscale_factor >= FP_0_66667) ? MALIDP_DOWNSCALING_1_5_COEFFS : + (upscale_factor >= FP_0_50000) ? MALIDP_DOWNSCALING_2_COEFFS : + (upscale_factor >= FP_0_36363) ? MALIDP_DOWNSCALING_2_75_COEFFS : + MALIDP_DOWNSCALING_4_COEFFS; +} + +#undef FP_0_25000 +#undef FP_0_36363 +#undef FP_0_50000 +#undef FP_0_66667 +#undef FP_1_00000 + +static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev) +{ + static const s32 enhancer_coeffs[] = { + -8, -8, -8, -8, 128, -8, -8, -8, -8 + }; + u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) | + MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL); + u32 image_enh = hwdev->hw->map.se_base + + ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? + 0x10 : 0xC) + MALIDP_SE_IMAGE_ENH; + u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0; + int i; + + malidp_hw_write(hwdev, val, image_enh); + for (i = 0; i < ARRAY_SIZE(enhancer_coeffs); ++i) + malidp_hw_write(hwdev, enhancer_coeffs[i], enh_coeffs + i * 4); +} + +/* + * background color components are defined as 12bits values, + * they will be shifted right when stored on hardware that + * supports only 8bits per channel + */ +#define MALIDP_BGND_COLOR_R 0x000 +#define MALIDP_BGND_COLOR_G 0x000 +#define MALIDP_BGND_COLOR_B 0x000 + +#define MALIDP_COLORADJ_NUM_COEFFS 12 +#define MALIDP_COEFFTAB_NUM_COEFFS 64 + +#define MALIDP_GAMMA_LUT_SIZE 4096 + +#define AFBC_SIZE_MASK AFBC_FORMAT_MOD_BLOCK_SIZE_MASK +#define AFBC_SIZE_16X16 AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 +#define AFBC_YTR AFBC_FORMAT_MOD_YTR +#define AFBC_SPARSE AFBC_FORMAT_MOD_SPARSE +#define AFBC_CBR AFBC_FORMAT_MOD_CBR +#define AFBC_SPLIT AFBC_FORMAT_MOD_SPLIT +#define AFBC_TILED AFBC_FORMAT_MOD_TILED +#define AFBC_SC AFBC_FORMAT_MOD_SC + +#define AFBC_MOD_VALID_BITS (AFBC_SIZE_MASK | AFBC_YTR | AFBC_SPLIT | \ + AFBC_SPARSE | AFBC_CBR | AFBC_TILED | AFBC_SC) + +extern const u64 malidp_format_modifiers[]; + +#endif /* __MALIDP_HW_H__ */ diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c new file mode 100644 index 000000000..7d0e7b031 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Brian Starkey <brian.starkey@arm.com> + * + * ARM Mali DP Writeback connector implementation + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_writeback.h> + +#include "malidp_drv.h" +#include "malidp_hw.h" +#include "malidp_mw.h" + +#define to_mw_state(_state) (struct malidp_mw_connector_state *)(_state) + +struct malidp_mw_connector_state { + struct drm_connector_state base; + dma_addr_t addrs[2]; + s32 pitches[2]; + u8 format; + u8 n_planes; + bool rgb2yuv_initialized; + const s16 *rgb2yuv_coeffs; +}; + +static int malidp_mw_connector_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + return drm_add_modes_noedid(connector, dev->mode_config.max_width, + dev->mode_config.max_height); +} + +static enum drm_mode_status +malidp_mw_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct drm_device *dev = connector->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + int w = mode->hdisplay, h = mode->vdisplay; + + if ((w < mode_config->min_width) || (w > mode_config->max_width)) + return MODE_BAD_HVALUE; + + if ((h < mode_config->min_height) || (h > mode_config->max_height)) + return MODE_BAD_VVALUE; + + return MODE_OK; +} + +static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { + .get_modes = malidp_mw_connector_get_modes, + .mode_valid = malidp_mw_connector_mode_valid, +}; + +static void malidp_mw_connector_reset(struct drm_connector *connector) +{ + struct malidp_mw_connector_state *mw_state = + kzalloc(sizeof(*mw_state), GFP_KERNEL); + + if (connector->state) + __drm_atomic_helper_connector_destroy_state(connector->state); + + kfree(connector->state); + __drm_atomic_helper_connector_reset(connector, &mw_state->base); +} + +static enum drm_connector_status +malidp_mw_connector_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +static void malidp_mw_connector_destroy(struct drm_connector *connector) +{ + drm_connector_cleanup(connector); +} + +static struct drm_connector_state * +malidp_mw_connector_duplicate_state(struct drm_connector *connector) +{ + struct malidp_mw_connector_state *mw_state, *mw_current_state; + + if (WARN_ON(!connector->state)) + return NULL; + + mw_state = kzalloc(sizeof(*mw_state), GFP_KERNEL); + if (!mw_state) + return NULL; + + mw_current_state = to_mw_state(connector->state); + mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs; + mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized; + + __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base); + + return &mw_state->base; +} + +static const struct drm_connector_funcs malidp_mw_connector_funcs = { + .reset = malidp_mw_connector_reset, + .detect = malidp_mw_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = malidp_mw_connector_destroy, + .atomic_duplicate_state = malidp_mw_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = { + 47, 157, 16, + -26, -87, 112, + 112, -102, -10, + 16, 128, 128 +}; + +static int +malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct malidp_mw_connector_state *mw_state = to_mw_state(conn_state); + struct malidp_drm *malidp = encoder->dev->dev_private; + struct drm_framebuffer *fb; + int i, n_planes; + + if (!conn_state->writeback_job) + return 0; + + fb = conn_state->writeback_job->fb; + if ((fb->width != crtc_state->mode.hdisplay) || + (fb->height != crtc_state->mode.vdisplay)) { + DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n", + fb->width, fb->height); + return -EINVAL; + } + + if (fb->modifier) { + DRM_DEBUG_KMS("Writeback framebuffer does not support modifiers\n"); + return -EINVAL; + } + + mw_state->format = + malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE, + fb->format->format, !!fb->modifier); + if (mw_state->format == MALIDP_INVALID_FORMAT_ID) { + struct drm_format_name_buf format_name; + + DRM_DEBUG_KMS("Invalid pixel format %s\n", + drm_get_format_name(fb->format->format, + &format_name)); + return -EINVAL; + } + + n_planes = fb->format->num_planes; + for (i = 0; i < n_planes; i++) { + struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, i); + /* memory write buffers are never rotated */ + u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 0); + + if (fb->pitches[i] & (alignment - 1)) { + DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n", + fb->pitches[i], i); + return -EINVAL; + } + mw_state->pitches[i] = fb->pitches[i]; + mw_state->addrs[i] = obj->paddr + fb->offsets[i]; + } + mw_state->n_planes = n_planes; + + if (fb->format->is_yuv) + mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited; + + return 0; +} + +static const struct drm_encoder_helper_funcs malidp_mw_encoder_helper_funcs = { + .atomic_check = malidp_mw_encoder_atomic_check, +}; + +static u32 *get_writeback_formats(struct malidp_drm *malidp, int *n_formats) +{ + const struct malidp_hw_regmap *map = &malidp->dev->hw->map; + u32 *formats; + int n, i; + + formats = kcalloc(map->n_pixel_formats, sizeof(*formats), + GFP_KERNEL); + if (!formats) + return NULL; + + for (n = 0, i = 0; i < map->n_pixel_formats; i++) { + if (map->pixel_formats[i].layer & SE_MEMWRITE) + formats[n++] = map->pixel_formats[i].format; + } + + *n_formats = n; + + return formats; +} + +int malidp_mw_connector_init(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + u32 *formats; + int ret, n_formats; + + if (!malidp->dev->hw->enable_memwrite) + return 0; + + malidp->mw_connector.encoder.possible_crtcs = 1 << drm_crtc_index(&malidp->crtc); + drm_connector_helper_add(&malidp->mw_connector.base, + &malidp_mw_connector_helper_funcs); + + formats = get_writeback_formats(malidp, &n_formats); + if (!formats) + return -ENOMEM; + + ret = drm_writeback_connector_init(drm, &malidp->mw_connector, + &malidp_mw_connector_funcs, + &malidp_mw_encoder_helper_funcs, + formats, n_formats); + kfree(formats); + if (ret) + return ret; + + return 0; +} + +void malidp_mw_atomic_commit(struct drm_device *drm, + struct drm_atomic_state *old_state) +{ + struct malidp_drm *malidp = drm->dev_private; + struct drm_writeback_connector *mw_conn = &malidp->mw_connector; + struct drm_connector_state *conn_state = mw_conn->base.state; + struct malidp_hw_device *hwdev = malidp->dev; + struct malidp_mw_connector_state *mw_state; + + if (!conn_state) + return; + + mw_state = to_mw_state(conn_state); + + if (conn_state->writeback_job) { + struct drm_framebuffer *fb = conn_state->writeback_job->fb; + + DRM_DEV_DEBUG_DRIVER(drm->dev, + "Enable memwrite %ux%u:%d %pad fmt: %u\n", + fb->width, fb->height, + mw_state->pitches[0], + &mw_state->addrs[0], + mw_state->format); + + drm_writeback_queue_job(mw_conn, conn_state); + hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, + mw_state->pitches, mw_state->n_planes, + fb->width, fb->height, mw_state->format, + !mw_state->rgb2yuv_initialized ? + mw_state->rgb2yuv_coeffs : NULL); + mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs; + } else { + DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n"); + hwdev->hw->disable_memwrite(hwdev); + } +} diff --git a/drivers/gpu/drm/arm/malidp_mw.h b/drivers/gpu/drm/arm/malidp_mw.h new file mode 100644 index 000000000..19a007676 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_mw.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Brian Starkey <brian.starkey@arm.com> + * + */ + +#ifndef __MALIDP_MW_H__ +#define __MALIDP_MW_H__ + +int malidp_mw_connector_init(struct drm_device *drm); +void malidp_mw_atomic_commit(struct drm_device *drm, + struct drm_atomic_state *old_state); +#endif diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c new file mode 100644 index 000000000..24604b410 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -0,0 +1,1045 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * ARM Mali DP plane manipulation routines. + */ + +#include <linux/iommu.h> +#include <linux/platform_device.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> + +#include "malidp_hw.h" +#include "malidp_drv.h" + +/* Layer specific register offsets */ +#define MALIDP_LAYER_FORMAT 0x000 +#define LAYER_FORMAT_MASK 0x3f +#define MALIDP_LAYER_CONTROL 0x004 +#define LAYER_ENABLE (1 << 0) +#define LAYER_FLOWCFG_MASK 7 +#define LAYER_FLOWCFG(x) (((x) & LAYER_FLOWCFG_MASK) << 1) +#define LAYER_FLOWCFG_SCALE_SE 3 +#define LAYER_ROT_OFFSET 8 +#define LAYER_H_FLIP (1 << 10) +#define LAYER_V_FLIP (1 << 11) +#define LAYER_ROT_MASK (0xf << 8) +#define LAYER_COMP_MASK (0x3 << 12) +#define LAYER_COMP_PIXEL (0x3 << 12) +#define LAYER_COMP_PLANE (0x2 << 12) +#define LAYER_PMUL_ENABLE (0x1 << 14) +#define LAYER_ALPHA_OFFSET (16) +#define LAYER_ALPHA_MASK (0xff) +#define LAYER_ALPHA(x) (((x) & LAYER_ALPHA_MASK) << LAYER_ALPHA_OFFSET) +#define MALIDP_LAYER_COMPOSE 0x008 +#define MALIDP_LAYER_SIZE 0x00c +#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0) +#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) +#define MALIDP_LAYER_COMP_SIZE 0x010 +#define MALIDP_LAYER_OFFSET 0x014 +#define MALIDP550_LS_ENABLE 0x01c +#define MALIDP550_LS_R1_IN_SIZE 0x020 + +#define MODIFIERS_COUNT_MAX 15 + +/* + * This 4-entry look-up-table is used to determine the full 8-bit alpha value + * for formats with 1- or 2-bit alpha channels. + * We set it to give 100%/0% opacity for 1-bit formats and 100%/66%/33%/0% + * opacity for 2-bit formats. + */ +#define MALIDP_ALPHA_LUT 0xffaa5500 + +/* page sizes the MMU prefetcher can support */ +#define MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES (SZ_4K | SZ_64K) +#define MALIDP_MMU_PREFETCH_FULL_PGSIZES (SZ_1M | SZ_2M) + +/* readahead for partial-frame prefetch */ +#define MALIDP_MMU_PREFETCH_READAHEAD 8 + +static void malidp_de_plane_destroy(struct drm_plane *plane) +{ + struct malidp_plane *mp = to_malidp_plane(plane); + + drm_plane_cleanup(plane); + kfree(mp); +} + +/* + * Replicate what the default ->reset hook does: free the state pointer and + * allocate a new empty object. We just need enough space to store + * a malidp_plane_state instead of a drm_plane_state. + */ +static void malidp_plane_reset(struct drm_plane *plane) +{ + struct malidp_plane_state *state = to_malidp_plane_state(plane->state); + + if (state) + __drm_atomic_helper_plane_destroy_state(&state->base); + kfree(state); + plane->state = NULL; + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_plane_reset(plane, &state->base); +} + +static struct +drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane) +{ + struct malidp_plane_state *state, *m_state; + + if (!plane->state) + return NULL; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + m_state = to_malidp_plane_state(plane->state); + __drm_atomic_helper_plane_duplicate_state(plane, &state->base); + state->rotmem_size = m_state->rotmem_size; + state->format = m_state->format; + state->n_planes = m_state->n_planes; + + state->mmu_prefetch_mode = m_state->mmu_prefetch_mode; + state->mmu_prefetch_pgsize = m_state->mmu_prefetch_pgsize; + + return &state->base; +} + +static void malidp_destroy_plane_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct malidp_plane_state *m_state = to_malidp_plane_state(state); + + __drm_atomic_helper_plane_destroy_state(state); + kfree(m_state); +} + +static const char * const prefetch_mode_names[] = { + [MALIDP_PREFETCH_MODE_NONE] = "MMU_PREFETCH_NONE", + [MALIDP_PREFETCH_MODE_PARTIAL] = "MMU_PREFETCH_PARTIAL", + [MALIDP_PREFETCH_MODE_FULL] = "MMU_PREFETCH_FULL", +}; + +static void malidp_plane_atomic_print_state(struct drm_printer *p, + const struct drm_plane_state *state) +{ + struct malidp_plane_state *ms = to_malidp_plane_state(state); + + drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size); + drm_printf(p, "\tformat_id=%u\n", ms->format); + drm_printf(p, "\tn_planes=%u\n", ms->n_planes); + drm_printf(p, "\tmmu_prefetch_mode=%s\n", + prefetch_mode_names[ms->mmu_prefetch_mode]); + drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize); +} + +bool malidp_format_mod_supported(struct drm_device *drm, + u32 format, u64 modifier) +{ + const struct drm_format_info *info; + const u64 *modifiers; + struct malidp_drm *malidp = drm->dev_private; + const struct malidp_hw_regmap *map = &malidp->dev->hw->map; + + if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID)) + return false; + + /* Some pixel formats are supported without any modifier */ + if (modifier == DRM_FORMAT_MOD_LINEAR) { + /* + * However these pixel formats need to be supported with + * modifiers only + */ + return !malidp_hw_format_is_afbc_only(format); + } + + if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) { + DRM_ERROR("Unknown modifier (not Arm)\n"); + return false; + } + + if (modifier & + ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) { + DRM_DEBUG_KMS("Unsupported modifiers\n"); + return false; + } + + modifiers = malidp_format_modifiers; + + /* SPLIT buffers must use SPARSE layout */ + if (WARN_ON_ONCE((modifier & AFBC_SPLIT) && !(modifier & AFBC_SPARSE))) + return false; + + /* CBR only applies to YUV formats, where YTR should be always 0 */ + if (WARN_ON_ONCE((modifier & AFBC_CBR) && (modifier & AFBC_YTR))) + return false; + + while (*modifiers != DRM_FORMAT_MOD_INVALID) { + if (*modifiers == modifier) + break; + + modifiers++; + } + + /* return false, if the modifier was not found */ + if (*modifiers == DRM_FORMAT_MOD_INVALID) { + DRM_DEBUG_KMS("Unsupported modifier\n"); + return false; + } + + info = drm_format_info(format); + + if (info->num_planes != 1) { + DRM_DEBUG_KMS("AFBC buffers expect one plane\n"); + return false; + } + + if (malidp_hw_format_is_linear_only(format) == true) { + DRM_DEBUG_KMS("Given format (0x%x) is supported is linear mode only\n", + format); + return false; + } + + /* + * RGB formats need to provide YTR modifier and YUV formats should not + * provide YTR modifier. + */ + if (!(info->is_yuv) != !!(modifier & AFBC_FORMAT_MOD_YTR)) { + DRM_DEBUG_KMS("AFBC_FORMAT_MOD_YTR is %s for %s formats\n", + info->is_yuv ? "disallowed" : "mandatory", + info->is_yuv ? "YUV" : "RGB"); + return false; + } + + if (modifier & AFBC_SPLIT) { + if (!info->is_yuv) { + if (info->cpp[0] <= 2) { + DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n"); + return false; + } + } + + if ((info->hsub != 1) || (info->vsub != 1)) { + if (!(format == DRM_FORMAT_YUV420_10BIT && + (map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) { + DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n"); + return false; + } + } + } + + if (modifier & AFBC_CBR) { + if ((info->hsub == 1) || (info->vsub == 1)) { + DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n"); + return false; + } + } + + return true; +} + +static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane, + u32 format, u64 modifier) +{ + return malidp_format_mod_supported(plane->dev, format, modifier); +} + +static const struct drm_plane_funcs malidp_de_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = malidp_de_plane_destroy, + .reset = malidp_plane_reset, + .atomic_duplicate_state = malidp_duplicate_plane_state, + .atomic_destroy_state = malidp_destroy_plane_state, + .atomic_print_state = malidp_plane_atomic_print_state, + .format_mod_supported = malidp_format_mod_supported_per_plane, +}; + +static int malidp_se_check_scaling(struct malidp_plane *mp, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state = + drm_atomic_get_existing_crtc_state(state->state, state->crtc); + struct malidp_crtc_state *mc; + u32 src_w, src_h; + int ret; + + if (!crtc_state) + return -EINVAL; + + mc = to_malidp_crtc_state(crtc_state); + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, + 0, INT_MAX, true, true); + if (ret) + return ret; + + if (state->rotation & MALIDP_ROTATED_MASK) { + src_w = state->src_h >> 16; + src_h = state->src_w >> 16; + } else { + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + } + + if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) { + /* Scaling not necessary for this plane. */ + mc->scaled_planes_mask &= ~(mp->layer->id); + return 0; + } + + if (mp->layer->id & (DE_SMART | DE_GRAPHICS2)) + return -EINVAL; + + mc->scaled_planes_mask |= mp->layer->id; + /* Defer scaling requirements calculation to the crtc check. */ + return 0; +} + +static u32 malidp_get_pgsize_bitmap(struct malidp_plane *mp) +{ + u32 pgsize_bitmap = 0; + + if (iommu_present(&platform_bus_type)) { + struct iommu_domain *mmu_dom = + iommu_get_domain_for_dev(mp->base.dev->dev); + + if (mmu_dom) + pgsize_bitmap = mmu_dom->pgsize_bitmap; + } + + return pgsize_bitmap; +} + +/* + * Check if the framebuffer is entirely made up of pages at least pgsize in + * size. Only a heuristic: assumes that each scatterlist entry has been aligned + * to the largest page size smaller than its length and that the MMU maps to + * the largest page size possible. + */ +static bool malidp_check_pages_threshold(struct malidp_plane_state *ms, + u32 pgsize) +{ + int i; + + for (i = 0; i < ms->n_planes; i++) { + struct drm_gem_object *obj; + struct drm_gem_cma_object *cma_obj; + struct sg_table *sgt; + struct scatterlist *sgl; + + obj = drm_gem_fb_get_obj(ms->base.fb, i); + cma_obj = to_drm_gem_cma_obj(obj); + + if (cma_obj->sgt) + sgt = cma_obj->sgt; + else + sgt = obj->funcs->get_sg_table(obj); + + if (IS_ERR(sgt)) + return false; + + sgl = sgt->sgl; + + while (sgl) { + if (sgl->length < pgsize) { + if (!cma_obj->sgt) + kfree(sgt); + return false; + } + + sgl = sg_next(sgl); + } + if (!cma_obj->sgt) + kfree(sgt); + } + + return true; +} + +/* + * Check if it is possible to enable partial-frame MMU prefetch given the + * current format, AFBC state and rotation. + */ +static bool malidp_partial_prefetch_supported(u32 format, u64 modifier, + unsigned int rotation) +{ + bool afbc, sparse; + + /* rotation and horizontal flip not supported for partial prefetch */ + if (rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | + DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X)) + return false; + + afbc = modifier & DRM_FORMAT_MOD_ARM_AFBC(0); + sparse = modifier & AFBC_FORMAT_MOD_SPARSE; + + switch (format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_RGB888: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_RGB565: + /* always supported */ + return true; + + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_BGR565: + /* supported, but if AFBC then must be sparse mode */ + return (!afbc) || (afbc && sparse); + + case DRM_FORMAT_BGR888: + /* supported, but not for AFBC */ + return !afbc; + + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_YUV420: + /* not supported */ + return false; + + default: + return false; + } +} + +/* + * Select the preferred MMU prefetch mode. Full-frame prefetch is preferred as + * long as the framebuffer is all large pages. Otherwise partial-frame prefetch + * is selected as long as it is supported for the current format. The selected + * page size for prefetch is returned in pgsize_bitmap. + */ +static enum mmu_prefetch_mode malidp_mmu_prefetch_select_mode + (struct malidp_plane_state *ms, u32 *pgsize_bitmap) +{ + u32 pgsizes; + + /* get the full-frame prefetch page size(s) supported by the MMU */ + pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_FULL_PGSIZES; + + while (pgsizes) { + u32 largest_pgsize = 1 << __fls(pgsizes); + + if (malidp_check_pages_threshold(ms, largest_pgsize)) { + *pgsize_bitmap = largest_pgsize; + return MALIDP_PREFETCH_MODE_FULL; + } + + pgsizes -= largest_pgsize; + } + + /* get the partial-frame prefetch page size(s) supported by the MMU */ + pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES; + + if (malidp_partial_prefetch_supported(ms->base.fb->format->format, + ms->base.fb->modifier, + ms->base.rotation)) { + /* partial prefetch using the smallest page size */ + *pgsize_bitmap = 1 << __ffs(pgsizes); + return MALIDP_PREFETCH_MODE_PARTIAL; + } + *pgsize_bitmap = 0; + return MALIDP_PREFETCH_MODE_NONE; +} + +static u32 malidp_calc_mmu_control_value(enum mmu_prefetch_mode mode, + u8 readahead, u8 n_planes, u32 pgsize) +{ + u32 mmu_ctrl = 0; + + if (mode != MALIDP_PREFETCH_MODE_NONE) { + mmu_ctrl |= MALIDP_MMU_CTRL_EN; + + if (mode == MALIDP_PREFETCH_MODE_PARTIAL) { + mmu_ctrl |= MALIDP_MMU_CTRL_MODE; + mmu_ctrl |= MALIDP_MMU_CTRL_PP_NUM_REQ(readahead); + } + + if (pgsize == SZ_64K || pgsize == SZ_2M) { + int i; + + for (i = 0; i < n_planes; i++) + mmu_ctrl |= MALIDP_MMU_CTRL_PX_PS(i); + } + } + + return mmu_ctrl; +} + +static void malidp_de_prefetch_settings(struct malidp_plane *mp, + struct malidp_plane_state *ms) +{ + if (!mp->layer->mmu_ctrl_offset) + return; + + /* get the page sizes supported by the MMU */ + ms->mmu_prefetch_pgsize = malidp_get_pgsize_bitmap(mp); + ms->mmu_prefetch_mode = + malidp_mmu_prefetch_select_mode(ms, &ms->mmu_prefetch_pgsize); +} + +static int malidp_de_plane_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct malidp_plane *mp = to_malidp_plane(plane); + struct malidp_plane_state *ms = to_malidp_plane_state(state); + bool rotated = state->rotation & MALIDP_ROTATED_MASK; + struct drm_framebuffer *fb; + u16 pixel_alpha = state->pixel_blend_mode; + int i, ret; + unsigned int block_w, block_h; + + if (!state->crtc || WARN_ON(!state->fb)) + return 0; + + fb = state->fb; + + ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map, + mp->layer->id, fb->format->format, + !!fb->modifier); + if (ms->format == MALIDP_INVALID_FORMAT_ID) + return -EINVAL; + + ms->n_planes = fb->format->num_planes; + for (i = 0; i < ms->n_planes; i++) { + u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated); + + if (((fb->pitches[i] * drm_format_info_block_height(fb->format, i)) + & (alignment - 1)) && !(fb->modifier)) { + DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n", + fb->pitches[i], i); + return -EINVAL; + } + } + + block_w = drm_format_info_block_width(fb->format, 0); + block_h = drm_format_info_block_height(fb->format, 0); + if (fb->width % block_w || fb->height % block_h) { + DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes"); + return -EINVAL; + } + if ((state->src_x >> 16) % block_w || (state->src_y >> 16) % block_h) { + DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes"); + return -EINVAL; + } + + if ((state->crtc_w > mp->hwdev->max_line_size) || + (state->crtc_h > mp->hwdev->max_line_size) || + (state->crtc_w < mp->hwdev->min_line_size) || + (state->crtc_h < mp->hwdev->min_line_size)) + return -EINVAL; + + /* + * DP550/650 video layers can accept 3 plane formats only if + * fb->pitches[1] == fb->pitches[2] since they don't have a + * third plane stride register. + */ + if (ms->n_planes == 3 && + !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) && + (state->fb->pitches[1] != state->fb->pitches[2])) + return -EINVAL; + + ret = malidp_se_check_scaling(mp, state); + if (ret) + return ret; + + /* validate the rotation constraints for each layer */ + if (state->rotation != DRM_MODE_ROTATE_0) { + if (mp->layer->rot == ROTATE_NONE) + return -EINVAL; + if ((mp->layer->rot == ROTATE_COMPRESSED) && !(fb->modifier)) + return -EINVAL; + /* + * packed RGB888 / BGR888 can't be rotated or flipped + * unless they are stored in a compressed way + */ + if ((fb->format->format == DRM_FORMAT_RGB888 || + fb->format->format == DRM_FORMAT_BGR888) && !(fb->modifier)) + return -EINVAL; + } + + /* SMART layer does not support AFBC */ + if (mp->layer->id == DE_SMART && fb->modifier) { + DRM_ERROR("AFBC framebuffer not supported in SMART layer"); + return -EINVAL; + } + + ms->rotmem_size = 0; + if (state->rotation & MALIDP_ROTATED_MASK) { + int val; + + val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w, + state->crtc_h, + fb->format->format, + !!(fb->modifier)); + if (val < 0) + return val; + + ms->rotmem_size = val; + } + + /* HW can't support plane + pixel blending */ + if ((state->alpha != DRM_BLEND_ALPHA_OPAQUE) && + (pixel_alpha != DRM_MODE_BLEND_PIXEL_NONE) && + fb->format->has_alpha) + return -EINVAL; + + malidp_de_prefetch_settings(mp, ms); + + return 0; +} + +static void malidp_de_set_plane_pitches(struct malidp_plane *mp, + int num_planes, unsigned int pitches[3]) +{ + int i; + int num_strides = num_planes; + + if (!mp->layer->stride_offset) + return; + + if (num_planes == 3) + num_strides = (mp->hwdev->hw->features & + MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2; + + /* + * The drm convention for pitch is that it needs to cover width * cpp, + * but our hardware wants the pitch/stride to cover all rows included + * in a tile. + */ + for (i = 0; i < num_strides; ++i) { + unsigned int block_h = drm_format_info_block_height(mp->base.state->fb->format, i); + + malidp_hw_write(mp->hwdev, pitches[i] * block_h, + mp->layer->base + + mp->layer->stride_offset + i * 4); + } +} + +static const s16 +malidp_yuv2rgb_coeffs[][DRM_COLOR_RANGE_MAX][MALIDP_COLORADJ_NUM_COEFFS] = { + [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = { + 1192, 0, 1634, + 1192, -401, -832, + 1192, 2066, 0, + 64, 512, 512 + }, + [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = { + 1024, 0, 1436, + 1024, -352, -731, + 1024, 1815, 0, + 0, 512, 512 + }, + [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = { + 1192, 0, 1836, + 1192, -218, -546, + 1192, 2163, 0, + 64, 512, 512 + }, + [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = { + 1024, 0, 1613, + 1024, -192, -479, + 1024, 1900, 0, + 0, 512, 512 + }, + [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_LIMITED_RANGE] = { + 1024, 0, 1476, + 1024, -165, -572, + 1024, 1884, 0, + 0, 512, 512 + }, + [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_FULL_RANGE] = { + 1024, 0, 1510, + 1024, -168, -585, + 1024, 1927, 0, + 0, 512, 512 + } +}; + +static void malidp_de_set_color_encoding(struct malidp_plane *plane, + enum drm_color_encoding enc, + enum drm_color_range range) +{ + unsigned int i; + + for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) { + /* coefficients are signed, two's complement values */ + malidp_hw_write(plane->hwdev, malidp_yuv2rgb_coeffs[enc][range][i], + plane->layer->base + plane->layer->yuv2rgb_offset + + i * 4); + } +} + +static void malidp_de_set_mmu_control(struct malidp_plane *mp, + struct malidp_plane_state *ms) +{ + u32 mmu_ctrl; + + /* check hardware supports MMU prefetch */ + if (!mp->layer->mmu_ctrl_offset) + return; + + mmu_ctrl = malidp_calc_mmu_control_value(ms->mmu_prefetch_mode, + MALIDP_MMU_PREFETCH_READAHEAD, + ms->n_planes, + ms->mmu_prefetch_pgsize); + + malidp_hw_write(mp->hwdev, mmu_ctrl, + mp->layer->base + mp->layer->mmu_ctrl_offset); +} + +static void malidp_set_plane_base_addr(struct drm_framebuffer *fb, + struct malidp_plane *mp, + int plane_index) +{ + dma_addr_t paddr; + u16 ptr; + struct drm_plane *plane = &mp->base; + bool afbc = fb->modifier ? true : false; + + ptr = mp->layer->ptr + (plane_index << 4); + + /* + * drm_fb_cma_get_gem_addr() alters the physical base address of the + * framebuffer as per the plane's src_x, src_y co-ordinates (ie to + * take care of source cropping). + * For AFBC, this is not needed as the cropping is handled by _AD_CROP_H + * and _AD_CROP_V registers. + */ + if (!afbc) { + paddr = drm_fb_cma_get_gem_addr(fb, plane->state, + plane_index); + } else { + struct drm_gem_cma_object *obj; + + obj = drm_fb_cma_get_gem_obj(fb, plane_index); + + if (WARN_ON(!obj)) + return; + paddr = obj->paddr; + } + + malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr); + malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4); +} + +static void malidp_de_set_plane_afbc(struct drm_plane *plane) +{ + struct malidp_plane *mp; + u32 src_w, src_h, val = 0, src_x, src_y; + struct drm_framebuffer *fb = plane->state->fb; + + mp = to_malidp_plane(plane); + + /* no afbc_decoder_offset means AFBC is not supported on this plane */ + if (!mp->layer->afbc_decoder_offset) + return; + + if (!fb->modifier) { + malidp_hw_write(mp->hwdev, 0, mp->layer->afbc_decoder_offset); + return; + } + + /* convert src values from Q16 fixed point to integer */ + src_w = plane->state->src_w >> 16; + src_h = plane->state->src_h >> 16; + src_x = plane->state->src_x >> 16; + src_y = plane->state->src_y >> 16; + + val = ((fb->width - (src_x + src_w)) << MALIDP_AD_CROP_RIGHT_OFFSET) | + src_x; + malidp_hw_write(mp->hwdev, val, + mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_H); + + val = ((fb->height - (src_y + src_h)) << MALIDP_AD_CROP_BOTTOM_OFFSET) | + src_y; + malidp_hw_write(mp->hwdev, val, + mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_V); + + val = MALIDP_AD_EN; + if (fb->modifier & AFBC_FORMAT_MOD_SPLIT) + val |= MALIDP_AD_BS; + if (fb->modifier & AFBC_FORMAT_MOD_YTR) + val |= MALIDP_AD_YTR; + + malidp_hw_write(mp->hwdev, val, mp->layer->afbc_decoder_offset); +} + +static void malidp_de_plane_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct malidp_plane *mp; + struct malidp_plane_state *ms = to_malidp_plane_state(plane->state); + struct drm_plane_state *state = plane->state; + u16 pixel_alpha = state->pixel_blend_mode; + u8 plane_alpha = state->alpha >> 8; + u32 src_w, src_h, dest_w, dest_h, val; + int i; + struct drm_framebuffer *fb = plane->state->fb; + + mp = to_malidp_plane(plane); + + /* + * For AFBC framebuffer, use the framebuffer width and height for + * configuring layer input size register. + */ + if (fb->modifier) { + src_w = fb->width; + src_h = fb->height; + } else { + /* convert src values from Q16 fixed point to integer */ + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + } + + dest_w = state->crtc_w; + dest_h = state->crtc_h; + + val = malidp_hw_read(mp->hwdev, mp->layer->base); + val = (val & ~LAYER_FORMAT_MASK) | ms->format; + malidp_hw_write(mp->hwdev, val, mp->layer->base); + + for (i = 0; i < ms->n_planes; i++) + malidp_set_plane_base_addr(fb, mp, i); + + malidp_de_set_mmu_control(mp, ms); + + malidp_de_set_plane_pitches(mp, ms->n_planes, + state->fb->pitches); + + if ((plane->state->color_encoding != old_state->color_encoding) || + (plane->state->color_range != old_state->color_range)) + malidp_de_set_color_encoding(mp, plane->state->color_encoding, + plane->state->color_range); + + malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h), + mp->layer->base + MALIDP_LAYER_SIZE); + + malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h), + mp->layer->base + MALIDP_LAYER_COMP_SIZE); + + malidp_hw_write(mp->hwdev, LAYER_H_VAL(state->crtc_x) | + LAYER_V_VAL(state->crtc_y), + mp->layer->base + MALIDP_LAYER_OFFSET); + + if (mp->layer->id == DE_SMART) { + /* + * Enable the first rectangle in the SMART layer to be + * able to use it as a drm plane. + */ + malidp_hw_write(mp->hwdev, 1, + mp->layer->base + MALIDP550_LS_ENABLE); + malidp_hw_write(mp->hwdev, + LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h), + mp->layer->base + MALIDP550_LS_R1_IN_SIZE); + } + + malidp_de_set_plane_afbc(plane); + + /* first clear the rotation bits */ + val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); + val &= ~LAYER_ROT_MASK; + + /* setup the rotation and axis flip bits */ + if (state->rotation & DRM_MODE_ROTATE_MASK) + val |= ilog2(plane->state->rotation & DRM_MODE_ROTATE_MASK) << + LAYER_ROT_OFFSET; + if (state->rotation & DRM_MODE_REFLECT_X) + val |= LAYER_H_FLIP; + if (state->rotation & DRM_MODE_REFLECT_Y) + val |= LAYER_V_FLIP; + + val &= ~(LAYER_COMP_MASK | LAYER_PMUL_ENABLE | LAYER_ALPHA(0xff)); + + if (state->alpha != DRM_BLEND_ALPHA_OPAQUE) { + val |= LAYER_COMP_PLANE; + } else if (state->fb->format->has_alpha) { + /* We only care about blend mode if the format has alpha */ + switch (pixel_alpha) { + case DRM_MODE_BLEND_PREMULTI: + val |= LAYER_COMP_PIXEL | LAYER_PMUL_ENABLE; + break; + case DRM_MODE_BLEND_COVERAGE: + val |= LAYER_COMP_PIXEL; + break; + } + } + val |= LAYER_ALPHA(plane_alpha); + + val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK); + if (state->crtc) { + struct malidp_crtc_state *m = + to_malidp_crtc_state(state->crtc->state); + + if (m->scaler_config.scale_enable && + m->scaler_config.plane_src_id == mp->layer->id) + val |= LAYER_FLOWCFG(LAYER_FLOWCFG_SCALE_SE); + } + + /* set the 'enable layer' bit */ + val |= LAYER_ENABLE; + + malidp_hw_write(mp->hwdev, val, + mp->layer->base + MALIDP_LAYER_CONTROL); +} + +static void malidp_de_plane_disable(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct malidp_plane *mp = to_malidp_plane(plane); + + malidp_hw_clearbits(mp->hwdev, + LAYER_ENABLE | LAYER_FLOWCFG(LAYER_FLOWCFG_MASK), + mp->layer->base + MALIDP_LAYER_CONTROL); +} + +static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = { + .atomic_check = malidp_de_plane_check, + .atomic_update = malidp_de_plane_update, + .atomic_disable = malidp_de_plane_disable, +}; + +static const uint64_t linear_only_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +int malidp_de_planes_init(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + const struct malidp_hw_regmap *map = &malidp->dev->hw->map; + struct malidp_plane *plane = NULL; + enum drm_plane_type plane_type; + unsigned long crtcs = BIT(drm->mode_config.num_crtc); + unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | + DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; + unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE); + u32 *formats; + int ret, i = 0, j = 0, n; + u64 supported_modifiers[MODIFIERS_COUNT_MAX]; + const u64 *modifiers; + + modifiers = malidp_format_modifiers; + + if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) { + /* + * Since our hardware does not support SPLIT, so build the list + * of supported modifiers excluding SPLIT ones. + */ + while (*modifiers != DRM_FORMAT_MOD_INVALID) { + if (!(*modifiers & AFBC_SPLIT)) + supported_modifiers[j++] = *modifiers; + + modifiers++; + } + supported_modifiers[j++] = DRM_FORMAT_MOD_INVALID; + modifiers = supported_modifiers; + } + + formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL); + if (!formats) { + ret = -ENOMEM; + goto cleanup; + } + + for (i = 0; i < map->n_layers; i++) { + u8 id = map->layers[i].id; + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) { + ret = -ENOMEM; + goto cleanup; + } + + /* build the list of DRM supported formats based on the map */ + for (n = 0, j = 0; j < map->n_pixel_formats; j++) { + if ((map->pixel_formats[j].layer & id) == id) + formats[n++] = map->pixel_formats[j].format; + } + + plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : + DRM_PLANE_TYPE_OVERLAY; + + /* + * All the layers except smart layer supports AFBC modifiers. + */ + ret = drm_universal_plane_init(drm, &plane->base, crtcs, + &malidp_de_plane_funcs, formats, n, + (id == DE_SMART) ? linear_only_modifiers : modifiers, + plane_type, NULL); + + if (ret < 0) + goto cleanup; + + drm_plane_helper_add(&plane->base, + &malidp_de_plane_helper_funcs); + plane->hwdev = malidp->dev; + plane->layer = &map->layers[i]; + + drm_plane_create_alpha_property(&plane->base); + drm_plane_create_blend_mode_property(&plane->base, blend_caps); + + if (id == DE_SMART) { + /* Skip the features which the SMART layer doesn't have. */ + continue; + } + + drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, flags); + malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT, + plane->layer->base + MALIDP_LAYER_COMPOSE); + + /* Attach the YUV->RGB property only to video layers */ + if (id & (DE_VIDEO1 | DE_VIDEO2)) { + /* default encoding for YUV->RGB is BT601 NARROW */ + enum drm_color_encoding enc = DRM_COLOR_YCBCR_BT601; + enum drm_color_range range = DRM_COLOR_YCBCR_LIMITED_RANGE; + + ret = drm_plane_create_color_properties(&plane->base, + BIT(DRM_COLOR_YCBCR_BT601) | \ + BIT(DRM_COLOR_YCBCR_BT709) | \ + BIT(DRM_COLOR_YCBCR_BT2020), + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | \ + BIT(DRM_COLOR_YCBCR_FULL_RANGE), + enc, range); + if (!ret) + /* program the HW registers */ + malidp_de_set_color_encoding(plane, enc, range); + else + DRM_WARN("Failed to create video layer %d color properties\n", id); + } + } + + kfree(formats); + + return 0; + +cleanup: + kfree(formats); + + return ret; +} diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h new file mode 100644 index 000000000..514c50dcb --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -0,0 +1,301 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * ARM Mali DP500/DP550/DP650 registers definition. + */ + +#ifndef __MALIDP_REGS_H__ +#define __MALIDP_REGS_H__ + +/* + * abbreviations used: + * - DC - display core (general settings) + * - DE - display engine + * - SE - scaling engine + */ + +/* interrupt bit masks */ +#define MALIDP_DE_IRQ_UNDERRUN (1 << 0) + +#define MALIDP500_DE_IRQ_AXI_ERR (1 << 4) +#define MALIDP500_DE_IRQ_VSYNC (1 << 5) +#define MALIDP500_DE_IRQ_PROG_LINE (1 << 6) +#define MALIDP500_DE_IRQ_SATURATION (1 << 7) +#define MALIDP500_DE_IRQ_CONF_VALID (1 << 8) +#define MALIDP500_DE_IRQ_CONF_MODE (1 << 11) +#define MALIDP500_DE_IRQ_CONF_ACTIVE (1 << 17) +#define MALIDP500_DE_IRQ_PM_ACTIVE (1 << 18) +#define MALIDP500_DE_IRQ_TESTMODE_ACTIVE (1 << 19) +#define MALIDP500_DE_IRQ_FORCE_BLNK_ACTIVE (1 << 24) +#define MALIDP500_DE_IRQ_AXI_BUSY (1 << 28) +#define MALIDP500_DE_IRQ_GLOBAL (1 << 31) +#define MALIDP500_SE_IRQ_CONF_MODE (1 << 0) +#define MALIDP500_SE_IRQ_CONF_VALID (1 << 4) +#define MALIDP500_SE_IRQ_INIT_BUSY (1 << 5) +#define MALIDP500_SE_IRQ_AXI_ERROR (1 << 8) +#define MALIDP500_SE_IRQ_OVERRUN (1 << 9) +#define MALIDP500_SE_IRQ_PROG_LINE1 (1 << 12) +#define MALIDP500_SE_IRQ_PROG_LINE2 (1 << 13) +#define MALIDP500_SE_IRQ_CONF_ACTIVE (1 << 17) +#define MALIDP500_SE_IRQ_PM_ACTIVE (1 << 18) +#define MALIDP500_SE_IRQ_AXI_BUSY (1 << 28) +#define MALIDP500_SE_IRQ_GLOBAL (1 << 31) + +#define MALIDP550_DE_IRQ_SATURATION (1 << 8) +#define MALIDP550_DE_IRQ_VSYNC (1 << 12) +#define MALIDP550_DE_IRQ_PROG_LINE (1 << 13) +#define MALIDP550_DE_IRQ_AXI_ERR (1 << 16) +#define MALIDP550_SE_IRQ_EOW (1 << 0) +#define MALIDP550_SE_IRQ_AXI_ERR (1 << 16) +#define MALIDP550_SE_IRQ_OVR (1 << 17) +#define MALIDP550_SE_IRQ_IBSY (1 << 18) +#define MALIDP550_DC_IRQ_CONF_VALID (1 << 0) +#define MALIDP550_DC_IRQ_CONF_MODE (1 << 4) +#define MALIDP550_DC_IRQ_CONF_ACTIVE (1 << 16) +#define MALIDP550_DC_IRQ_DE (1 << 20) +#define MALIDP550_DC_IRQ_SE (1 << 24) + +#define MALIDP650_DE_IRQ_DRIFT (1 << 4) +#define MALIDP650_DE_IRQ_ACEV1 (1 << 17) +#define MALIDP650_DE_IRQ_ACEV2 (1 << 18) +#define MALIDP650_DE_IRQ_ACEG (1 << 19) +#define MALIDP650_DE_IRQ_AXIEP (1 << 28) + +/* bit masks that are common between products */ +#define MALIDP_CFG_VALID (1 << 0) +#define MALIDP_DISP_FUNC_GAMMA (1 << 0) +#define MALIDP_DISP_FUNC_CADJ (1 << 4) +#define MALIDP_DISP_FUNC_ILACED (1 << 8) +#define MALIDP_SCALE_ENGINE_EN (1 << 16) +#define MALIDP_SE_MEMWRITE_EN (2 << 5) + +/* register offsets for IRQ management */ +#define MALIDP_REG_STATUS 0x00000 +#define MALIDP_REG_SETIRQ 0x00004 +#define MALIDP_REG_MASKIRQ 0x00008 +#define MALIDP_REG_CLEARIRQ 0x0000c + +/* register offsets */ +#define MALIDP_DE_CORE_ID 0x00018 +#define MALIDP_DE_DISPLAY_FUNC 0x00020 + +/* these offsets are relative to MALIDP5x0_TIMINGS_BASE */ +#define MALIDP_DE_H_TIMINGS 0x0 +#define MALIDP_DE_V_TIMINGS 0x4 +#define MALIDP_DE_SYNC_WIDTH 0x8 +#define MALIDP_DE_HV_ACTIVE 0xc + +/* Stride register offsets relative to Lx_BASE */ +#define MALIDP_DE_LG_STRIDE 0x18 +#define MALIDP_DE_LV_STRIDE0 0x18 +#define MALIDP550_DE_LS_R1_STRIDE 0x28 + +/* macros to set values into registers */ +#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0) +#define MALIDP_DE_H_BACKPORCH(x) (((x) & 0x3ff) << 16) +#define MALIDP500_DE_V_FRONTPORCH(x) (((x) & 0xff) << 0) +#define MALIDP550_DE_V_FRONTPORCH(x) (((x) & 0xfff) << 0) +#define MALIDP_DE_V_BACKPORCH(x) (((x) & 0xff) << 16) +#define MALIDP_DE_H_SYNCWIDTH(x) (((x) & 0x3ff) << 0) +#define MALIDP_DE_V_SYNCWIDTH(x) (((x) & 0xff) << 16) +#define MALIDP_DE_H_ACTIVE(x) (((x) & 0x1fff) << 0) +#define MALIDP_DE_V_ACTIVE(x) (((x) & 0x1fff) << 16) + +#define MALIDP_PRODUCT_ID(__core_id) ((u32)(__core_id) >> 16) + +/* register offsets relative to MALIDP5x0_COEFFS_BASE */ +#define MALIDP_COLOR_ADJ_COEF 0x00000 +#define MALIDP_COEF_TABLE_ADDR 0x00030 +#define MALIDP_COEF_TABLE_DATA 0x00034 + +/* Scaling engine registers and masks. */ +#define MALIDP_SE_SCALING_EN (1 << 0) +#define MALIDP_SE_ALPHA_EN (1 << 1) +#define MALIDP_SE_ENH_MASK 3 +#define MALIDP_SE_ENH(x) (((x) & MALIDP_SE_ENH_MASK) << 2) +#define MALIDP_SE_RGBO_IF_EN (1 << 4) +#define MALIDP550_SE_CTL_SEL_MASK 7 +#define MALIDP550_SE_CTL_VCSEL(x) \ + (((x) & MALIDP550_SE_CTL_SEL_MASK) << 20) +#define MALIDP550_SE_CTL_HCSEL(x) \ + (((x) & MALIDP550_SE_CTL_SEL_MASK) << 16) + +/* Blocks with offsets from SE_CONTROL register. */ +#define MALIDP_SE_LAYER_CONTROL 0x14 +#define MALIDP_SE_L0_IN_SIZE 0x00 +#define MALIDP_SE_L0_OUT_SIZE 0x04 +#define MALIDP_SE_SET_V_SIZE(x) (((x) & 0x1fff) << 16) +#define MALIDP_SE_SET_H_SIZE(x) (((x) & 0x1fff) << 0) +#define MALIDP_SE_SCALING_CONTROL 0x24 +#define MALIDP_SE_H_INIT_PH 0x00 +#define MALIDP_SE_H_DELTA_PH 0x04 +#define MALIDP_SE_V_INIT_PH 0x08 +#define MALIDP_SE_V_DELTA_PH 0x0c +#define MALIDP_SE_COEFFTAB_ADDR 0x10 +#define MALIDP_SE_COEFFTAB_ADDR_MASK 0x7f +#define MALIDP_SE_V_COEFFTAB (1 << 8) +#define MALIDP_SE_H_COEFFTAB (1 << 9) +#define MALIDP_SE_SET_V_COEFFTAB_ADDR(x) \ + (MALIDP_SE_V_COEFFTAB | ((x) & MALIDP_SE_COEFFTAB_ADDR_MASK)) +#define MALIDP_SE_SET_H_COEFFTAB_ADDR(x) \ + (MALIDP_SE_H_COEFFTAB | ((x) & MALIDP_SE_COEFFTAB_ADDR_MASK)) +#define MALIDP_SE_COEFFTAB_DATA 0x14 +#define MALIDP_SE_COEFFTAB_DATA_MASK 0x3fff +#define MALIDP_SE_SET_COEFFTAB_DATA(x) \ + ((x) & MALIDP_SE_COEFFTAB_DATA_MASK) +/* Enhance coeffents reigster offset */ +#define MALIDP_SE_IMAGE_ENH 0x3C +/* ENH_LIMITS offset 0x0 */ +#define MALIDP_SE_ENH_LOW_LEVEL 24 +#define MALIDP_SE_ENH_HIGH_LEVEL 63 +#define MALIDP_SE_ENH_LIMIT_MASK 0xfff +#define MALIDP_SE_SET_ENH_LIMIT_LOW(x) \ + ((x) & MALIDP_SE_ENH_LIMIT_MASK) +#define MALIDP_SE_SET_ENH_LIMIT_HIGH(x) \ + (((x) & MALIDP_SE_ENH_LIMIT_MASK) << 16) +#define MALIDP_SE_ENH_COEFF0 0x04 + + +/* register offsets relative to MALIDP5x0_SE_MEMWRITE_BASE */ +#define MALIDP_MW_FORMAT 0x00000 +#define MALIDP_MW_P1_STRIDE 0x00004 +#define MALIDP_MW_P2_STRIDE 0x00008 +#define MALIDP_MW_P1_PTR_LOW 0x0000c +#define MALIDP_MW_P1_PTR_HIGH 0x00010 +#define MALIDP_MW_P2_PTR_LOW 0x0002c +#define MALIDP_MW_P2_PTR_HIGH 0x00030 + +/* register offsets and bits specific to DP500 */ +#define MALIDP500_ADDR_SPACE_SIZE 0x01000 +#define MALIDP500_DC_BASE 0x00000 +#define MALIDP500_DC_CONTROL 0x0000c +#define MALIDP500_DC_CONFIG_REQ (1 << 17) +#define MALIDP500_HSYNCPOL (1 << 20) +#define MALIDP500_VSYNCPOL (1 << 21) +#define MALIDP500_DC_CLEAR_MASK 0x300fff +#define MALIDP500_DE_LINE_COUNTER 0x00010 +#define MALIDP500_DE_AXI_CONTROL 0x00014 +#define MALIDP500_DE_SECURE_CTRL 0x0001c +#define MALIDP500_DE_CHROMA_KEY 0x00024 +#define MALIDP500_TIMINGS_BASE 0x00028 + +#define MALIDP500_CONFIG_3D 0x00038 +#define MALIDP500_BGND_COLOR 0x0003c +#define MALIDP500_OUTPUT_DEPTH 0x00044 +#define MALIDP500_COEFFS_BASE 0x00078 + +/* + * The YUV2RGB coefficients on the DP500 are not in the video layer's register + * block. They belong in a separate block above the layer's registers, hence + * the negative offset. + */ +#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8)) +#define MALIDP500_DE_LV_BASE 0x00100 +#define MALIDP500_DE_LV_PTR_BASE 0x00124 +#define MALIDP500_DE_LV_AD_CTRL 0x00400 +#define MALIDP500_DE_LG1_BASE 0x00200 +#define MALIDP500_DE_LG1_PTR_BASE 0x0021c +#define MALIDP500_DE_LG1_AD_CTRL 0x0040c +#define MALIDP500_DE_LG2_BASE 0x00300 +#define MALIDP500_DE_LG2_PTR_BASE 0x0031c +#define MALIDP500_DE_LG2_AD_CTRL 0x00418 +#define MALIDP500_SE_BASE 0x00c00 +#define MALIDP500_SE_CONTROL 0x00c0c +#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c +#define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74 +#define MALIDP500_SE_MEMWRITE_BASE 0x00e00 +#define MALIDP500_DC_IRQ_BASE 0x00f00 +#define MALIDP500_CONFIG_VALID 0x00f00 +#define MALIDP500_CONFIG_ID 0x00fd4 + +/* + * The quality of service (QoS) register on the DP500. RQOS register values + * are driven by the ARQOS signal, using AXI transacations, dependent on the + * FIFO input level. + * The RQOS register can also set QoS levels for: + * - RED_ARQOS @ A 4-bit signal value for close to underflow conditions + * - GREEN_ARQOS @ A 4-bit signal value for normal conditions + */ +#define MALIDP500_RQOS_QUALITY 0x00500 + +/* register offsets and bits specific to DP550/DP650 */ +#define MALIDP550_ADDR_SPACE_SIZE 0x10000 +#define MALIDP550_DE_CONTROL 0x00010 +#define MALIDP550_DE_LINE_COUNTER 0x00014 +#define MALIDP550_DE_AXI_CONTROL 0x00018 +#define MALIDP550_DE_QOS 0x0001c +#define MALIDP550_TIMINGS_BASE 0x00030 +#define MALIDP550_HSYNCPOL (1 << 12) +#define MALIDP550_VSYNCPOL (1 << 28) + +#define MALIDP550_DE_DISP_SIDEBAND 0x00040 +#define MALIDP550_DE_BGND_COLOR 0x00044 +#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c +#define MALIDP550_COEFFS_BASE 0x00050 +#define MALIDP550_LV_YUV2RGB 0x00084 +#define MALIDP550_DE_LV1_BASE 0x00100 +#define MALIDP550_DE_LV1_PTR_BASE 0x00124 +#define MALIDP550_DE_LV1_AD_CTRL 0x001B8 +#define MALIDP550_DE_LV2_BASE 0x00200 +#define MALIDP550_DE_LV2_PTR_BASE 0x00224 +#define MALIDP550_DE_LV2_AD_CTRL 0x002B8 +#define MALIDP550_DE_LG_BASE 0x00300 +#define MALIDP550_DE_LG_PTR_BASE 0x0031c +#define MALIDP550_DE_LG_AD_CTRL 0x00330 +#define MALIDP550_DE_LS_BASE 0x00400 +#define MALIDP550_DE_LS_PTR_BASE 0x0042c +#define MALIDP550_DE_PERF_BASE 0x00500 +#define MALIDP550_SE_BASE 0x08000 +#define MALIDP550_SE_CONTROL 0x08010 +#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7) +#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030 +#define MALIDP550_SE_RGB_YUV_COEFFS 0x08078 +#define MALIDP550_SE_MEMWRITE_BASE 0x08100 +#define MALIDP550_DC_BASE 0x0c000 +#define MALIDP550_DC_CONTROL 0x0c010 +#define MALIDP550_DC_CONFIG_REQ (1 << 16) +#define MALIDP550_CONFIG_VALID 0x0c014 +#define MALIDP550_CONFIG_ID 0x0ffd4 + +/* register offsets specific to DP650 */ +#define MALIDP650_DE_LV_MMU_CTRL 0x000D0 +#define MALIDP650_DE_LG_MMU_CTRL 0x00048 +#define MALIDP650_DE_LS_MMU_CTRL 0x00078 + +/* bit masks to set the MMU control register */ +#define MALIDP_MMU_CTRL_EN (1 << 0) +#define MALIDP_MMU_CTRL_MODE (1 << 4) +#define MALIDP_MMU_CTRL_PX_PS(x) (1 << (8 + (x))) +#define MALIDP_MMU_CTRL_PP_NUM_REQ(x) (((x) & 0x7f) << 12) + +/* AFBC register offsets relative to MALIDPXXX_DE_LX_AD_CTRL */ +/* The following register offsets are common for DP500, DP550 and DP650 */ +#define MALIDP_AD_CROP_H 0x4 +#define MALIDP_AD_CROP_V 0x8 +#define MALIDP_AD_END_PTR_LOW 0xc +#define MALIDP_AD_END_PTR_HIGH 0x10 + +/* AFBC decoder Registers */ +#define MALIDP_AD_EN BIT(0) +#define MALIDP_AD_YTR BIT(4) +#define MALIDP_AD_BS BIT(8) +#define MALIDP_AD_CROP_RIGHT_OFFSET 16 +#define MALIDP_AD_CROP_BOTTOM_OFFSET 16 + +/* + * Starting with DP550 the register map blocks has been standardised to the + * following layout: + * + * Offset Block registers + * 0x00000 Display Engine + * 0x08000 Scaling Engine + * 0x0c000 Display Core + * 0x10000 Secure control + * + * The old DP500 IP mixes some DC with the DE registers, hence the need + * for a mapping structure. + */ + +#endif /* __MALIDP_REGS_H__ */ |