render: replace wlr_texture_write_pixels with update_from_buffer

This lets the renderer handle the wlr_buffer directly, just like it
does in texture_from_buffer. This also allows the renderer to batch
the rectangle updates, and update more than the damage region if
desirable (e.g. too many rects), so can be more efficient.
Simon Ser 3 years ago
parent 8c3c6987db
commit 98cf38601f

@ -54,10 +54,8 @@ void wlr_renderer_init(struct wlr_renderer *renderer,
const struct wlr_renderer_impl *impl);
struct wlr_texture_impl {
bool (*write_pixels)(struct wlr_texture *texture,
uint32_t stride, uint32_t width, uint32_t height,
uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
const void *data);
bool (*update_from_buffer)(struct wlr_texture *texture,
struct wlr_buffer *buffer, pixman_region32_t *damage);
void (*destroy)(struct wlr_texture *texture);
};

@ -9,6 +9,7 @@
#ifndef WLR_RENDER_WLR_TEXTURE_H
#define WLR_RENDER_WLR_TEXTURE_H
#include <pixman.h>
#include <stdint.h>
#include <wayland-server-core.h>
#include <wlr/render/dmabuf.h>
@ -37,13 +38,17 @@ struct wlr_texture *wlr_texture_from_dmabuf(struct wlr_renderer *renderer,
struct wlr_dmabuf_attributes *attribs);
/**
* Update a texture with raw pixels. The texture must be mutable, and the input
* data must have the same pixel format that the texture was created with.
* Update a texture with a struct wlr_buffer's contents.
*
* The update might be rejected (in case the texture is immutable, the buffer
* has an unsupported type/format, etc), so callers must be prepared to fall
* back to re-creating the texture from scratch via wlr_texture_from_buffer().
*
* The damage can be used by the renderer as an optimization: only the supplied
* region needs to be updated.
*/
bool wlr_texture_write_pixels(struct wlr_texture *texture,
uint32_t stride, uint32_t width, uint32_t height,
uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
const void *data);
bool wlr_texture_update_from_buffer(struct wlr_texture *texture,
struct wlr_buffer *buffer, pixman_region32_t *damage);
/**
* Destroys the texture.

@ -151,9 +151,6 @@ struct wlr_client_buffer {
// private state
struct wl_listener source_destroy;
// If the client buffer has been created from a wl_shm buffer
uint32_t shm_source_format;
};
/**

@ -44,14 +44,24 @@ static bool check_stride(const struct wlr_pixel_format_info *fmt,
return true;
}
static bool gles2_texture_write_pixels(struct wlr_texture *wlr_texture,
uint32_t stride, uint32_t width, uint32_t height,
uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
const void *data) {
static bool gles2_texture_update_from_buffer(struct wlr_texture *wlr_texture,
struct wlr_buffer *buffer, pixman_region32_t *damage) {
struct wlr_gles2_texture *texture = gles2_get_texture(wlr_texture);
if (texture->target != GL_TEXTURE_2D || texture->image != EGL_NO_IMAGE_KHR) {
wlr_log(WLR_ERROR, "Cannot write pixels to immutable texture");
return false;
}
void *data;
uint32_t format;
size_t stride;
if (!wlr_buffer_begin_data_ptr_access(buffer,
WLR_BUFFER_DATA_PTR_ACCESS_READ, &data, &format, &stride)) {
return false;
}
if (format != texture->drm_format) {
wlr_buffer_end_data_ptr_access(buffer);
return false;
}
@ -63,7 +73,8 @@ static bool gles2_texture_write_pixels(struct wlr_texture *wlr_texture,
drm_get_pixel_format_info(texture->drm_format);
assert(drm_fmt);
if (!check_stride(drm_fmt, stride, width)) {
if (!check_stride(drm_fmt, stride, buffer->width)) {
wlr_buffer_end_data_ptr_access(buffer);
return false;
}
@ -75,12 +86,21 @@ static bool gles2_texture_write_pixels(struct wlr_texture *wlr_texture,
glBindTexture(GL_TEXTURE_2D, texture->tex);
glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, stride / (drm_fmt->bpp / 8));
glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, src_x);
glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT, src_y);
int rects_len = 0;
pixman_box32_t *rects = pixman_region32_rectangles(damage, &rects_len);
glTexSubImage2D(GL_TEXTURE_2D, 0, dst_x, dst_y, width, height,
fmt->gl_format, fmt->gl_type, data);
for (int i = 0; i < rects_len; i++) {
pixman_box32_t rect = rects[i];
glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, stride / (drm_fmt->bpp / 8));
glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, rect.x1);
glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT, rect.y1);
int width = rect.x2 - rect.x1;
int height = rect.y2 - rect.y1;
glTexSubImage2D(GL_TEXTURE_2D, 0, rect.x1, rect.y1, width, height,
fmt->gl_format, fmt->gl_type, data);
}
glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, 0);
glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, 0);
@ -92,6 +112,8 @@ static bool gles2_texture_write_pixels(struct wlr_texture *wlr_texture,
wlr_egl_restore_context(&prev_ctx);
wlr_buffer_end_data_ptr_access(buffer);
return true;
}
@ -156,7 +178,7 @@ static void gles2_texture_unref(struct wlr_texture *wlr_texture) {
}
static const struct wlr_texture_impl texture_impl = {
.write_pixels = gles2_texture_write_pixels,
.update_from_buffer = gles2_texture_update_from_buffer,
.destroy = gles2_texture_unref,
};

@ -136,12 +136,45 @@ static bool write_pixels(struct wlr_texture *wlr_texture,
return true;
}
static bool vulkan_texture_write_pixels(struct wlr_texture *wlr_texture,
uint32_t stride, uint32_t width, uint32_t height, uint32_t src_x,
uint32_t src_y, uint32_t dst_x, uint32_t dst_y, const void *vdata) {
return write_pixels(wlr_texture, stride, width, height, src_x, src_y,
dst_x, dst_y, vdata, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
static bool vulkan_texture_update_from_buffer(struct wlr_texture *wlr_texture,
struct wlr_buffer *buffer, pixman_region32_t *damage) {
struct wlr_vk_texture *texture = vulkan_get_texture(wlr_texture);
void *data;
uint32_t format;
size_t stride;
if (!wlr_buffer_begin_data_ptr_access(buffer,
WLR_BUFFER_DATA_PTR_ACCESS_READ, &data, &format, &stride)) {
return false;
}
bool ok = true;
if (format != texture->format->drm_format) {
ok = false;
goto out;
}
int rects_len = 0;
pixman_box32_t *rects = pixman_region32_rectangles(damage, &rects_len);
for (int i = 0; i < rects_len; i++) {
pixman_box32_t rect = rects[i];
uint32_t width = rect.x2 - rect.x1;
uint32_t height = rect.y2 - rect.y1;
// TODO: only map memory once
ok = write_pixels(wlr_texture, stride, width, height, rect.x1, rect.y1,
rect.x1, rect.y1, data, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
if (!ok) {
goto out;
}
}
out:
wlr_buffer_end_data_ptr_access(buffer);
return ok;
}
void vulkan_texture_destroy(struct wlr_vk_texture *texture) {
@ -191,7 +224,7 @@ static void vulkan_texture_unref(struct wlr_texture *wlr_texture) {
}
static const struct wlr_texture_impl texture_impl = {
.write_pixels = vulkan_texture_write_pixels,
.update_from_buffer = vulkan_texture_update_from_buffer,
.destroy = vulkan_texture_unref,
};

@ -71,13 +71,19 @@ struct wlr_texture *wlr_texture_from_buffer(struct wlr_renderer *renderer,
return renderer->impl->texture_from_buffer(renderer, buffer);
}
bool wlr_texture_write_pixels(struct wlr_texture *texture,
uint32_t stride, uint32_t width, uint32_t height,
uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
const void *data) {
if (!texture->impl->write_pixels) {
bool wlr_texture_update_from_buffer(struct wlr_texture *texture,
struct wlr_buffer *buffer, pixman_region32_t *damage) {
if (!texture->impl->update_from_buffer) {
return false;
}
if (texture->width != (uint32_t)buffer->width ||
texture->height != (uint32_t)buffer->height) {
return false;
}
const pixman_box32_t *extents = pixman_region32_extents(damage);
if (extents->x1 < 0 || extents->y1 < 0 || extents->x2 > buffer->width ||
extents->y2 > buffer->height) {
return false;
}
return texture->impl->write_pixels(texture, stride, width, height,
src_x, src_y, dst_x, dst_y, data);
return texture->impl->update_from_buffer(texture, buffer, damage);
}

@ -285,14 +285,6 @@ struct wlr_client_buffer *wlr_client_buffer_create(struct wlr_buffer *buffer,
wl_signal_add(&buffer->events.destroy, &client_buffer->source_destroy);
client_buffer->source_destroy.notify = client_buffer_handle_source_destroy;
if (buffer_is_shm_client_buffer(buffer)) {
struct wlr_shm_client_buffer *shm_client_buffer =
shm_client_buffer_from_buffer(buffer);
client_buffer->shm_source_format = shm_client_buffer->format;
} else {
client_buffer->shm_source_format = DRM_FORMAT_INVALID;
}
// Ensure the buffer will be released before being destroyed
wlr_buffer_lock(&client_buffer->base);
wlr_buffer_drop(&client_buffer->base);
@ -307,46 +299,7 @@ bool wlr_client_buffer_apply_damage(struct wlr_client_buffer *client_buffer,
return false;
}
if ((uint32_t)next->width != client_buffer->texture->width ||
(uint32_t)next->height != client_buffer->texture->height) {
return false;
}
if (client_buffer->shm_source_format == DRM_FORMAT_INVALID) {
// Uploading only damaged regions only works for wl_shm buffers and
// mutable textures (created from wl_shm buffer)
return false;
}
void *data;
uint32_t format;
size_t stride;
if (!wlr_buffer_begin_data_ptr_access(next, WLR_BUFFER_DATA_PTR_ACCESS_READ,
&data, &format, &stride)) {
return false;
}
if (format != client_buffer->shm_source_format) {
// Uploading to textures can't change the format
wlr_buffer_end_data_ptr_access(next);
return false;
}
int n;
pixman_box32_t *rects = pixman_region32_rectangles(damage, &n);
for (int i = 0; i < n; ++i) {
pixman_box32_t *r = &rects[i];
if (!wlr_texture_write_pixels(client_buffer->texture, stride,
r->x2 - r->x1, r->y2 - r->y1, r->x1, r->y1,
r->x1, r->y1, data)) {
wlr_buffer_end_data_ptr_access(next);
return false;
}
}
wlr_buffer_end_data_ptr_access(next);
return true;
return wlr_texture_update_from_buffer(client_buffer->texture, next, damage);
}
static const struct wlr_buffer_impl shm_client_buffer_impl;

Loading…
Cancel
Save