backend/drm: add infrastructure for device-wide commits

master
Simon Ser 11 months ago committed by Kenny Levinsen
parent 4636f8c407
commit 805807fd5b

@ -61,8 +61,8 @@ static void atomic_begin(struct atomic *atom) {
}
static bool atomic_commit(struct atomic *atom, struct wlr_drm_backend *drm,
struct wlr_drm_connector *conn, struct wlr_drm_page_flip *page_flip,
uint32_t flags) {
const struct wlr_drm_device_state *state,
struct wlr_drm_page_flip *page_flip, uint32_t flags) {
if (atom->failed) {
return false;
}
@ -74,12 +74,12 @@ static bool atomic_commit(struct atomic *atom, struct wlr_drm_backend *drm,
log_level = WLR_DEBUG;
}
if (conn != NULL) {
if (state->connectors_len == 1) {
struct wlr_drm_connector *conn = state->connectors[0].connector;
wlr_drm_conn_log_errno(conn, log_level, "Atomic commit failed");
} else {
wlr_log_errno(log_level, "Atomic commit failed");
}
char *flags_str = atomic_commit_flags_str(flags);
wlr_log(WLR_DEBUG, "(Atomic commit flags: %s)",
flags_str ? flags_str : "<error>");
@ -350,82 +350,96 @@ static void set_plane_props(struct atomic *atom, struct wlr_drm_backend *drm,
atomic_add(atom, id, props->crtc_y, (uint64_t)y);
}
static bool atomic_crtc_commit(struct wlr_drm_connector *conn,
struct wlr_drm_connector_state *state,
struct wlr_drm_page_flip *page_flip, uint32_t flags, bool test_only) {
static void atomic_connector_add(struct atomic *atom,
const struct wlr_drm_connector_state *state, bool modeset) {
struct wlr_drm_connector *conn = state->connector;
struct wlr_drm_backend *drm = conn->backend;
struct wlr_drm_crtc *crtc = conn->crtc;
bool modeset = state->modeset;
bool active = state->active;
if (!drm_atomic_connector_prepare(state, modeset)) {
return false;
}
if (test_only) {
flags |= DRM_MODE_ATOMIC_TEST_ONLY;
}
if (modeset) {
flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
}
if (!test_only && state->nonblock) {
flags |= DRM_MODE_ATOMIC_NONBLOCK;
}
struct atomic atom;
atomic_begin(&atom);
atomic_add(&atom, conn->id, conn->props.crtc_id, active ? crtc->id : 0);
atomic_add(atom, conn->id, conn->props.crtc_id, active ? crtc->id : 0);
if (modeset && active && conn->props.link_status != 0) {
atomic_add(&atom, conn->id, conn->props.link_status,
atomic_add(atom, conn->id, conn->props.link_status,
DRM_MODE_LINK_STATUS_GOOD);
}
if (active && conn->props.content_type != 0) {
atomic_add(&atom, conn->id, conn->props.content_type,
atomic_add(atom, conn->id, conn->props.content_type,
DRM_MODE_CONTENT_TYPE_GRAPHICS);
}
if (modeset && active && conn->props.max_bpc != 0 && conn->max_bpc_bounds[1] != 0) {
atomic_add(&atom, conn->id, conn->props.max_bpc, pick_max_bpc(conn, state->primary_fb));
atomic_add(atom, conn->id, conn->props.max_bpc, pick_max_bpc(conn, state->primary_fb));
}
atomic_add(&atom, crtc->id, crtc->props.mode_id, state->mode_id);
atomic_add(&atom, crtc->id, crtc->props.active, active);
atomic_add(atom, crtc->id, crtc->props.mode_id, state->mode_id);
atomic_add(atom, crtc->id, crtc->props.active, active);
if (active) {
if (crtc->props.gamma_lut != 0) {
atomic_add(&atom, crtc->id, crtc->props.gamma_lut, state->gamma_lut);
atomic_add(atom, crtc->id, crtc->props.gamma_lut, state->gamma_lut);
}
if (crtc->props.vrr_enabled != 0) {
atomic_add(&atom, crtc->id, crtc->props.vrr_enabled, state->vrr_enabled);
atomic_add(atom, crtc->id, crtc->props.vrr_enabled, state->vrr_enabled);
}
set_plane_props(&atom, drm, crtc->primary, state->primary_fb, crtc->id,
set_plane_props(atom, drm, crtc->primary, state->primary_fb, crtc->id,
0, 0);
if (crtc->primary->props.fb_damage_clips != 0) {
atomic_add(&atom, crtc->primary->id,
atomic_add(atom, crtc->primary->id,
crtc->primary->props.fb_damage_clips, state->fb_damage_clips);
}
if (crtc->cursor) {
if (drm_connector_is_cursor_visible(conn)) {
set_plane_props(&atom, drm, crtc->cursor, state->cursor_fb,
set_plane_props(atom, drm, crtc->cursor, state->cursor_fb,
crtc->id, conn->cursor_x, conn->cursor_y);
} else {
plane_disable(&atom, crtc->cursor);
plane_disable(atom, crtc->cursor);
}
}
} else {
plane_disable(&atom, crtc->primary);
plane_disable(atom, crtc->primary);
if (crtc->cursor) {
plane_disable(&atom, crtc->cursor);
plane_disable(atom, crtc->cursor);
}
}
}
bool ok = atomic_commit(&atom, drm, conn, page_flip, flags);
atomic_finish(&atom);
static bool atomic_device_commit(struct wlr_drm_backend *drm,
const struct wlr_drm_device_state *state,
struct wlr_drm_page_flip *page_flip, uint32_t flags, bool test_only) {
bool ok = false;
if (ok && !test_only) {
drm_atomic_connector_apply_commit(state);
} else {
drm_atomic_connector_rollback_commit(state);
for (size_t i = 0; i < state->connectors_len; i++) {
if (!drm_atomic_connector_prepare(&state->connectors[i], state->modeset)) {
goto out;
}
}
struct atomic atom;
atomic_begin(&atom);
for (size_t i = 0; i < state->connectors_len; i++) {
atomic_connector_add(&atom, &state->connectors[i], state->modeset);
}
if (test_only) {
flags |= DRM_MODE_ATOMIC_TEST_ONLY;
}
if (state->modeset) {
flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
}
if (!test_only && state->nonblock) {
flags |= DRM_MODE_ATOMIC_NONBLOCK;
}
ok = atomic_commit(&atom, drm, state, page_flip, flags);
atomic_finish(&atom);
out:
for (size_t i = 0; i < state->connectors_len; i++) {
struct wlr_drm_connector_state *conn_state = &state->connectors[i];
if (ok && !test_only) {
drm_atomic_connector_apply_commit(conn_state);
} else {
drm_atomic_connector_rollback_commit(conn_state);
}
}
return ok;
}
@ -456,6 +470,6 @@ bool drm_atomic_reset(struct wlr_drm_backend *drm) {
}
const struct wlr_drm_interface atomic_iface = {
.crtc_commit = atomic_crtc_commit,
.commit = atomic_device_commit,
.reset = drm_atomic_reset,
};

@ -488,26 +488,30 @@ static void drm_connector_rollback_commit(const struct wlr_drm_connector_state *
}
}
static bool drm_crtc_commit(struct wlr_drm_connector *conn,
struct wlr_drm_connector_state *state,
static bool drm_commit(struct wlr_drm_backend *drm,
const struct wlr_drm_device_state *state,
uint32_t flags, bool test_only) {
// Disallow atomic-only flags
assert((flags & ~DRM_MODE_PAGE_FLIP_FLAGS) == 0);
struct wlr_drm_page_flip *page_flip = NULL;
if (flags & DRM_MODE_PAGE_FLIP_EVENT) {
page_flip = drm_page_flip_create(conn);
assert(state->connectors_len == 1);
page_flip = drm_page_flip_create(state->connectors[0].connector);
if (page_flip == NULL) {
return false;
}
}
struct wlr_drm_backend *drm = conn->backend;
bool ok = drm->iface->crtc_commit(conn, state, page_flip, flags, test_only);
bool ok = drm->iface->commit(drm, state, page_flip, flags, test_only);
if (ok && !test_only) {
drm_connector_apply_commit(state, page_flip);
for (size_t i = 0; i < state->connectors_len; i++) {
drm_connector_apply_commit(&state->connectors[i], page_flip);
}
} else {
drm_connector_rollback_commit(state);
for (size_t i = 0; i < state->connectors_len; i++) {
drm_connector_rollback_commit(&state->connectors[i]);
}
drm_page_flip_destroy(page_flip);
}
return ok;
@ -519,15 +523,7 @@ static void drm_connector_state_init(struct wlr_drm_connector_state *state,
*state = (struct wlr_drm_connector_state){
.connector = conn,
.base = base,
.modeset = base->allow_reconfiguration,
.active = output_pending_enabled(&conn->output, base),
// The wlr_output API requires non-modeset commits with a new buffer to
// wait for the frame event. However compositors often perform
// non-modesets commits without a new buffer without waiting for the
// frame event. In that case we need to make the KMS commit blocking,
// otherwise the kernel will error out with EBUSY.
.nonblock = !base->allow_reconfiguration &&
(base->committed & WLR_OUTPUT_STATE_BUFFER),
};
struct wlr_output_mode *mode = conn->output.current_mode;
@ -582,6 +578,22 @@ static void drm_connector_state_init(struct wlr_drm_connector_state *state,
}
}
static void drm_device_state_init_single(struct wlr_drm_device_state *dev_state,
struct wlr_drm_connector_state *conn_state) {
*dev_state = (struct wlr_drm_device_state){
.modeset = conn_state->base->allow_reconfiguration,
// The wlr_output API requires non-modeset commits with a new buffer to
// wait for the frame event. However compositors often perform
// non-modesets commits without a new buffer without waiting for the
// frame event. In that case we need to make the KMS commit blocking,
// otherwise the kernel will error out with EBUSY.
.nonblock = !conn_state->base->allow_reconfiguration &&
(conn_state->base->committed & WLR_OUTPUT_STATE_BUFFER),
.connectors = conn_state,
.connectors_len = 1,
};
}
static void drm_connector_state_finish(struct wlr_drm_connector_state *state) {
drm_fb_clear(&state->primary_fb);
drm_fb_clear(&state->cursor_fb);
@ -708,6 +720,8 @@ static bool drm_connector_test(struct wlr_output *output,
bool ok = false;
struct wlr_drm_connector_state pending = {0};
drm_connector_state_init(&pending, conn, state);
struct wlr_drm_device_state pending_dev = {0};
drm_device_state_init_single(&pending_dev, &pending);
if ((state->committed & WLR_OUTPUT_STATE_ADAPTIVE_SYNC_ENABLED) &&
state->adaptive_sync_enabled &&
@ -751,7 +765,7 @@ static bool drm_connector_test(struct wlr_output *output,
goto out;
}
ok = drm_crtc_commit(conn, &pending, 0, true);
ok = drm_commit(conn->backend, &pending_dev, 0, true);
out:
drm_connector_state_finish(&pending);
@ -795,6 +809,8 @@ static bool drm_connector_commit_state(struct wlr_drm_connector *conn,
bool ok = false;
struct wlr_drm_connector_state pending = {0};
drm_connector_state_init(&pending, conn, base);
struct wlr_drm_device_state pending_dev = {0};
drm_device_state_init_single(&pending_dev, &pending);
if (!pending.active && conn->crtc == NULL) {
// Disabling an already-disabled connector
@ -821,7 +837,7 @@ static bool drm_connector_commit_state(struct wlr_drm_connector *conn,
}
}
if (pending.modeset) {
if (pending_dev.modeset) {
if (pending.active) {
wlr_drm_conn_log(conn, WLR_INFO, "Modesetting with %dx%d @ %.3f Hz",
pending.mode.hdisplay, pending.mode.vdisplay,
@ -835,7 +851,7 @@ static bool drm_connector_commit_state(struct wlr_drm_connector *conn,
// page-flip, either a blocking modeset. When performing a blocking modeset
// we'll wait for all queued page-flips to complete, so we don't need this
// safeguard.
if (pending.nonblock && conn->pending_page_flip != NULL) {
if (pending_dev.nonblock && conn->pending_page_flip != NULL) {
wlr_drm_conn_log(conn, WLR_ERROR, "Failed to page-flip output: "
"a page-flip is already pending");
goto out;
@ -849,7 +865,7 @@ static bool drm_connector_commit_state(struct wlr_drm_connector *conn,
flags |= DRM_MODE_PAGE_FLIP_ASYNC;
}
ok = drm_crtc_commit(conn, &pending, flags, false);
ok = drm_commit(drm, &pending_dev, flags, false);
if (!ok) {
goto out;
}

@ -34,11 +34,12 @@ static bool legacy_fb_props_match(struct wlr_drm_fb *fb1,
return true;
}
static bool legacy_crtc_test(struct wlr_drm_connector *conn,
const struct wlr_drm_connector_state *state) {
static bool legacy_crtc_test(const struct wlr_drm_connector_state *state,
bool modeset) {
struct wlr_drm_connector *conn = state->connector;
struct wlr_drm_crtc *crtc = conn->crtc;
if ((state->base->committed & WLR_OUTPUT_STATE_BUFFER) && !state->modeset) {
if ((state->base->committed & WLR_OUTPUT_STATE_BUFFER) && !modeset) {
struct wlr_drm_fb *pending_fb = state->primary_fb;
struct wlr_drm_fb *prev_fb = crtc->primary->queued_fb;
@ -58,16 +59,9 @@ static bool legacy_crtc_test(struct wlr_drm_connector *conn,
return true;
}
static bool legacy_crtc_commit(struct wlr_drm_connector *conn,
struct wlr_drm_connector_state *state,
struct wlr_drm_page_flip *page_flip, uint32_t flags, bool test_only) {
if (!legacy_crtc_test(conn, state)) {
return false;
}
if (test_only) {
return true;
}
static bool legacy_crtc_commit(const struct wlr_drm_connector_state *state,
struct wlr_drm_page_flip *page_flip, uint32_t flags, bool modeset) {
struct wlr_drm_connector *conn = state->connector;
struct wlr_drm_backend *drm = conn->backend;
struct wlr_output *output = &conn->output;
struct wlr_drm_crtc *crtc = conn->crtc;
@ -83,7 +77,7 @@ static bool legacy_crtc_commit(struct wlr_drm_connector *conn,
fb_id = state->primary_fb->id;
}
if (state->modeset) {
if (modeset) {
uint32_t *conns = NULL;
size_t conns_len = 0;
drmModeModeInfo *mode = NULL;
@ -185,6 +179,32 @@ static bool legacy_crtc_commit(struct wlr_drm_connector *conn,
return true;
}
static bool legacy_commit(struct wlr_drm_backend *drm,
const struct wlr_drm_device_state *state,
struct wlr_drm_page_flip *page_flip, uint32_t flags,
bool test_only) {
for (size_t i = 0; i < state->connectors_len; i++) {
const struct wlr_drm_connector_state *conn_state = &state->connectors[i];
if (!legacy_crtc_test(conn_state, state->modeset)) {
return false;
}
}
if (test_only) {
return true;
}
for (size_t i = 0; i < state->connectors_len; i++) {
const struct wlr_drm_connector_state *conn_state = &state->connectors[i];
if (!legacy_crtc_commit(conn_state, page_flip, flags,
state->modeset)) {
return false;
}
}
return true;
}
static void fill_empty_gamma_table(size_t size,
uint16_t *r, uint16_t *g, uint16_t *b) {
assert(0xFFFF < UINT64_MAX / (size - 1));
@ -241,6 +261,6 @@ static bool legacy_reset(struct wlr_drm_backend *drm) {
}
const struct wlr_drm_interface legacy_iface = {
.crtc_commit = legacy_crtc_commit,
.commit = legacy_commit,
.reset = legacy_reset,
};

@ -1,5 +1,6 @@
#include <fcntl.h>
#include <libliftoff.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <unistd.h>
#include <wlr/util/log.h>
@ -279,41 +280,16 @@ static void update_layer_feedback(struct wlr_drm_backend *drm,
wlr_drm_format_set_finish(&formats);
}
static bool crtc_commit(struct wlr_drm_connector *conn,
struct wlr_drm_connector_state *state,
struct wlr_drm_page_flip *page_flip, uint32_t flags, bool test_only) {
struct wlr_drm_backend *drm = conn->backend;
static bool add_connector(drmModeAtomicReq *req,
const struct wlr_drm_connector_state *state,
bool modeset, struct wl_array *fb_damage_clips_arr) {
struct wlr_drm_connector *conn = state->connector;
struct wlr_drm_crtc *crtc = conn->crtc;
bool modeset = state->modeset;
struct wlr_drm_backend *drm = conn->backend;
bool active = state->active;
bool ok = true;
if (modeset && !register_planes_for_crtc(drm, crtc)) {
return false;
}
if (!drm_atomic_connector_prepare(state, modeset)) {
return false;
}
if (test_only) {
flags |= DRM_MODE_ATOMIC_TEST_ONLY;
}
if (modeset) {
flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
}
if (!test_only && state->nonblock) {
flags |= DRM_MODE_ATOMIC_NONBLOCK;
}
struct wl_array fb_damage_clips_arr = {0};
drmModeAtomicReq *req = drmModeAtomicAlloc();
if (req == NULL) {
wlr_log(WLR_ERROR, "drmModeAtomicAlloc failed");
return false;
}
bool ok = add_prop(req, conn->id, conn->props.crtc_id,
ok = ok && add_prop(req, conn->id, conn->props.crtc_id,
active ? crtc->id : 0);
if (modeset && active && conn->props.link_status != 0) {
ok = ok && add_prop(req, conn->id, conn->props.link_status,
@ -346,7 +322,7 @@ static bool crtc_commit(struct wlr_drm_connector *conn,
for (size_t i = 0; i < state->base->layers_len; i++) {
const struct wlr_output_layer_state *layer_state = &state->base->layers[i];
ok = ok && set_layer_props(drm, layer_state, i + 1,
&fb_damage_clips_arr);
fb_damage_clips_arr);
}
}
@ -366,52 +342,101 @@ static bool crtc_commit(struct wlr_drm_connector *conn,
}
}
if (!ok) {
goto out;
return ok;
}
static void connector_update_layers_feedback(const struct wlr_drm_connector_state *state) {
struct wlr_drm_backend *drm = state->connector->backend;
if (!(state->base->committed & WLR_OUTPUT_STATE_LAYERS)) {
return;
}
int ret = liftoff_output_apply(crtc->liftoff, req, flags);
if (ret != 0) {
wlr_drm_conn_log(conn, test_only ? WLR_DEBUG : WLR_ERROR,
"liftoff_output_apply failed: %s", strerror(-ret));
ok = false;
goto out;
for (size_t i = 0; i < state->base->layers_len; i++) {
struct wlr_output_layer_state *layer_state = &state->base->layers[i];
struct wlr_drm_layer *layer = get_drm_layer(drm, layer_state->layer);
layer_state->accepted =
!liftoff_layer_needs_composition(layer->liftoff);
if (!layer_state->accepted) {
update_layer_feedback(drm, layer);
}
}
}
if (crtc->cursor &&
liftoff_layer_needs_composition(crtc->cursor->liftoff_layer)) {
wlr_drm_conn_log(conn, WLR_DEBUG, "Failed to scan-out cursor plane");
ok = false;
goto out;
static bool commit(struct wlr_drm_backend *drm,
const struct wlr_drm_device_state *state,
struct wlr_drm_page_flip *page_flip, uint32_t flags, bool test_only) {
bool ok = false;
struct wl_array fb_damage_clips_arr = {0};
drmModeAtomicReq *req = NULL;
if (test_only) {
flags |= DRM_MODE_ATOMIC_TEST_ONLY;
}
if (state->modeset) {
flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
}
if (!test_only && state->nonblock) {
flags |= DRM_MODE_ATOMIC_NONBLOCK;
}
ret = drmModeAtomicCommit(drm->fd, req, flags, page_flip);
if (ret != 0) {
wlr_drm_conn_log_errno(conn, test_only ? WLR_DEBUG : WLR_ERROR,
"Atomic commit failed");
ok = false;
for (size_t i = 0; i < state->connectors_len; i++) {
struct wlr_drm_connector_state *conn_state = &state->connectors[i];
struct wlr_drm_connector *conn = conn_state->connector;
if (state->modeset && !register_planes_for_crtc(drm, conn->crtc)) {
goto out;
}
if (!drm_atomic_connector_prepare(conn_state, state->modeset)) {
goto out;
}
}
req = drmModeAtomicAlloc();
if (req == NULL) {
wlr_log(WLR_ERROR, "drmModeAtomicAlloc failed");
goto out;
}
if (state->base->committed & WLR_OUTPUT_STATE_LAYERS) {
for (size_t i = 0; i < state->base->layers_len; i++) {
struct wlr_output_layer_state *layer_state = &state->base->layers[i];
struct wlr_drm_layer *layer = get_drm_layer(drm, layer_state->layer);
layer_state->accepted =
!liftoff_layer_needs_composition(layer->liftoff);
if (!test_only && !layer_state->accepted) {
update_layer_feedback(drm, layer);
}
for (size_t i = 0; i < state->connectors_len; i++) {
if (!add_connector(req, &state->connectors[i], state->modeset, &fb_damage_clips_arr)) {
goto out;
}
}
for (size_t i = 0; i < state->connectors_len; i++) {
struct wlr_drm_connector *conn = state->connectors[i].connector;
struct wlr_drm_crtc *crtc = conn->crtc;
int ret = liftoff_output_apply(crtc->liftoff, req, flags);
if (ret != 0) {
wlr_drm_conn_log(conn, test_only ? WLR_DEBUG : WLR_ERROR,
"liftoff_output_apply failed: %s", strerror(-ret));
goto out;
}
if (crtc->cursor &&
liftoff_layer_needs_composition(crtc->cursor->liftoff_layer)) {
wlr_drm_conn_log(conn, WLR_DEBUG, "Failed to scan-out cursor plane");
goto out;
}
}
ok = drmModeAtomicCommit(drm->fd, req, flags, page_flip) == 0;
if (!ok) {
wlr_log_errno(test_only ? WLR_DEBUG : WLR_ERROR,
"Atomic commit failed");
}
out:
drmModeAtomicFree(req);
if (ok && !test_only) {
drm_atomic_connector_apply_commit(state);
} else {
drm_atomic_connector_rollback_commit(state);
for (size_t i = 0; i < state->connectors_len; i++) {
struct wlr_drm_connector_state *conn_state = &state->connectors[i];
if (ok && !test_only) {
drm_atomic_connector_apply_commit(conn_state);
connector_update_layers_feedback(conn_state);
} else {
drm_atomic_connector_rollback_commit(conn_state);
}
}
uint32_t *fb_damage_clips_ptr;
@ -428,6 +453,6 @@ out:
const struct wlr_drm_interface liftoff_iface = {
.init = init,
.finish = finish,
.crtc_commit = crtc_commit,
.commit = commit,
.reset = drm_atomic_reset,
};

@ -122,11 +122,17 @@ struct wlr_drm_mode {
drmModeModeInfo drm_mode;
};
struct wlr_drm_device_state {
bool modeset;
bool nonblock;
struct wlr_drm_connector_state *connectors;
size_t connectors_len;
};
struct wlr_drm_connector_state {
struct wlr_drm_connector *connector;
const struct wlr_output_state *base;
bool modeset;
bool nonblock;
bool active;
drmModeModeInfo mode;
struct wlr_drm_fb *primary_fb;

@ -10,6 +10,7 @@
struct wlr_drm_backend;
struct wlr_drm_connector;
struct wlr_drm_crtc;
struct wlr_drm_device_state;
struct wlr_drm_connector_state;
struct wlr_drm_fb;
struct wlr_drm_page_flip;
@ -18,9 +19,8 @@ struct wlr_drm_page_flip;
struct wlr_drm_interface {
bool (*init)(struct wlr_drm_backend *drm);
void (*finish)(struct wlr_drm_backend *drm);
// Commit all pending changes on a CRTC.
bool (*crtc_commit)(struct wlr_drm_connector *conn,
struct wlr_drm_connector_state *state,
bool (*commit)(struct wlr_drm_backend *drm,
const struct wlr_drm_device_state *state,
struct wlr_drm_page_flip *page_flip, uint32_t flags, bool test_only);
// Turn off everything
bool (*reset)(struct wlr_drm_backend *drm);

Loading…
Cancel
Save