aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vc4/vc4_kms.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vc4/vc4_kms.c')
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c208
1 files changed, 187 insertions, 21 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 78d4fb0499e3..7f857af77898 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -11,6 +11,10 @@
* crtc, HDMI encoder).
*/
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
@@ -18,6 +22,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_drv.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -116,6 +121,9 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
struct drm_color_ctm *ctm = ctm_state->ctm;
+ if (vc4->firmware_kms)
+ return;
+
if (ctm_state->fifo) {
HVS_WRITE(SCALER_OLEDCOEF2,
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
@@ -144,29 +152,106 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
}
+static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ unsigned char dsp2_mux = 0;
+ unsigned char dsp3_mux = 3;
+ unsigned char dsp4_mux = 3;
+ unsigned char dsp5_mux = 3;
+ unsigned int i;
+ u32 reg;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+
+ if (!crtc_state->active)
+ continue;
+
+ switch (vc4_crtc->data->hvs_output) {
+ case 2:
+ dsp2_mux = (vc4_state->assigned_channel == 2) ? 1 : 0;
+ break;
+
+ case 3:
+ dsp3_mux = vc4_state->assigned_channel;
+ break;
+
+ case 4:
+ dsp4_mux = vc4_state->assigned_channel;
+ break;
+
+ case 5:
+ dsp5_mux = vc4_state->assigned_channel;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ reg = HVS_READ(SCALER_DISPECTRL);
+ if (FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg) != dsp2_mux)
+ HVS_WRITE(SCALER_DISPECTRL,
+ (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
+ VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX));
+
+ reg = HVS_READ(SCALER_DISPCTRL);
+ if (FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg) != dsp3_mux)
+ HVS_WRITE(SCALER_DISPCTRL,
+ (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
+ VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX));
+
+ reg = HVS_READ(SCALER_DISPEOLN);
+ if (FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg) != dsp4_mux)
+ HVS_WRITE(SCALER_DISPEOLN,
+ (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
+ VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX));
+
+ reg = HVS_READ(SCALER_DISPDITHER);
+ if (FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg) != dsp5_mux)
+ HVS_WRITE(SCALER_DISPDITHER,
+ (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
+ VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX));
+}
+
static void
vc4_atomic_complete_commit(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hvs *hvs = vc4->hvs;
struct vc4_crtc *vc4_crtc;
int i;
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- if (!state->crtcs[i].ptr || !state->crtcs[i].commit)
+ for (i = 0; vc4->hvs && i < dev->mode_config.num_crtc; i++) {
+ struct __drm_crtcs_state *_state = &state->crtcs[i];
+ struct vc4_crtc_state *vc4_crtc_state;
+
+ if (!_state->ptr || !_state->commit)
continue;
- vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr);
- vc4_hvs_mask_underrun(dev, vc4_crtc->channel);
+ vc4_crtc = to_vc4_crtc(_state->ptr);
+ vc4_crtc_state = to_vc4_crtc_state(_state->state);
+ vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
}
+ if (!vc4->firmware_kms)
+ clk_set_rate(hvs->core_clk, 500000000);
+
drm_atomic_helper_wait_for_fences(dev, state, false);
drm_atomic_helper_wait_for_dependencies(state);
drm_atomic_helper_commit_modeset_disables(dev, state);
- vc4_ctm_commit(vc4, state);
+ if (!vc4->firmware_kms) {
+ vc4_ctm_commit(vc4, state);
+ vc4_hvs_pv_muxing_commit(vc4, state);
+ }
drm_atomic_helper_commit_planes(dev, state, 0);
@@ -182,6 +267,9 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
drm_atomic_helper_commit_cleanup_done(state);
+ if (!vc4->firmware_kms)
+ clk_set_rate(hvs->core_clk, 200000000);
+
drm_atomic_state_put(state);
up(&vc4->async_modeset);
@@ -240,7 +328,8 @@ static int vc4_atomic_commit(struct drm_device *dev,
* drm_atomic_helper_setup_commit() from auto-completing
* commit->flip_done.
*/
- state->legacy_cursor_update = false;
+ if (!vc4->firmware_kms)
+ state->legacy_cursor_update = false;
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
@@ -374,8 +463,11 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
/* CTM is being enabled or the matrix changed. */
if (new_crtc_state->ctm) {
+ struct vc4_crtc_state *vc4_crtc_state =
+ to_vc4_crtc_state(new_crtc_state);
+
/* fifo is 1-based since 0 disables CTM. */
- int fifo = to_vc4_crtc(crtc)->channel + 1;
+ int fifo = vc4_crtc_state->assigned_channel + 1;
/* Check userland isn't trying to turn on CTM for more
* than one CRTC at a time.
@@ -415,6 +507,9 @@ static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
struct drm_plane *plane;
int i;
+ if (!vc4->load_tracker_available)
+ return 0;
+
priv_state = drm_atomic_get_private_obj_state(state,
&vc4->load_tracker);
if (IS_ERR(priv_state))
@@ -485,10 +580,67 @@ static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
.atomic_destroy_state = vc4_load_tracker_destroy_state,
};
+#define NUM_OUTPUTS 6
+#define NUM_CHANNELS 3
+
static int
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
- int ret;
+ unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0);
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i, ret;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state =
+ to_vc4_crtc_state(crtc_state);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ bool is_assigned = false;
+ unsigned int channel;
+
+ if (!crtc_state->active || vc4->firmware_kms)
+ continue;
+
+ /*
+ * The problem we have to solve here is that we have
+ * up to 7 encoders, connected to up to 6 CRTCs.
+ *
+ * Those CRTCs, depending on the instance, can be
+ * routed to 1, 2 or 3 HVS FIFOs, and we need to set
+ * the change the muxing between FIFOs and outputs in
+ * the HVS accordingly.
+ *
+ * It would be pretty hard to come up with an
+ * algorithm that would generically solve
+ * this. However, the current routing trees we support
+ * allow us to simplify a bit the problem.
+ *
+ * Indeed, with the current supported layouts, if we
+ * try to assign in the ascending crtc index order the
+ * FIFOs, we can't fall into the situation where an
+ * earlier CRTC that had multiple routes is assigned
+ * one that was the only option for a later CRTC.
+ *
+ * If the layout changes and doesn't give us that in
+ * the future, we will need to have something smarter,
+ * but it works so far.
+ */
+ for_each_set_bit(channel, &unassigned_channels,
+ sizeof(unassigned_channels)) {
+
+ if (!(BIT(channel) & vc4_crtc->data->hvs_available_channels))
+ continue;
+
+ vc4_crtc_state->assigned_channel = channel;
+ unassigned_channels &= ~BIT(channel);
+ is_assigned = true;
+ break;
+ }
+
+ if (!is_assigned)
+ return -EINVAL;
+ }
ret = vc4_ctm_atomic_check(dev, state);
if (ret < 0)
@@ -514,10 +666,14 @@ int vc4_kms_load(struct drm_device *dev)
struct vc4_load_tracker_state *load_state;
int ret;
- /* Start with the load tracker enabled. Can be disabled through the
- * debugfs load_tracker file.
- */
- vc4->load_tracker_enabled = true;
+ if (!of_device_is_compatible(dev->dev->of_node, "brcm,bcm2711-vc5")) {
+ vc4->load_tracker_available = true;
+
+ /* Start with the load tracker enabled. Can be
+ * disabled through the debugfs load_tracker file.
+ */
+ vc4->load_tracker_enabled = true;
+ }
sema_init(&vc4->async_modeset, 1);
@@ -531,12 +687,19 @@ int vc4_kms_load(struct drm_device *dev)
return ret;
}
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ if (!drm_core_check_feature(dev, DRIVER_RENDER)) {
+ /* No V3D as part of vc4. Assume this is Pi4. */
+ dev->mode_config.max_width = 7680;
+ dev->mode_config.max_height = 7680;
+ } else {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ }
dev->mode_config.funcs = &vc4_mode_funcs;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
dev->mode_config.allow_fb_modifiers = true;
+ dev->mode_config.normalize_zpos = true;
drm_modeset_lock_init(&vc4->ctm_state_lock);
@@ -547,14 +710,17 @@ int vc4_kms_load(struct drm_device *dev)
drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
&vc4_ctm_state_funcs);
- load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
- if (!load_state) {
- drm_atomic_private_obj_fini(&vc4->ctm_manager);
- return -ENOMEM;
- }
+ if (vc4->load_tracker_available) {
+ load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
+ if (!load_state) {
+ drm_atomic_private_obj_fini(&vc4->ctm_manager);
+ return -ENOMEM;
+ }
- drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base,
- &vc4_load_tracker_state_funcs);
+ drm_atomic_private_obj_init(dev, &vc4->load_tracker,
+ &load_state->base,
+ &vc4_load_tracker_state_funcs);
+ }
drm_mode_config_reset(dev);