@@ -2361,6 +2361,7 @@ AC_CONFIG_FILES([Makefile
src/gallium/drivers/radeon/Makefile
src/gallium/drivers/radeonsi/Makefile
src/gallium/drivers/rbug/Makefile
+ src/gallium/drivers/renderonly/Makefile
src/gallium/drivers/softpipe/Makefile
src/gallium/drivers/svga/Makefile
src/gallium/drivers/trace/Makefile
new file mode 100644
@@ -0,0 +1,11 @@
+include Makefile.sources
+include $(top_srcdir)/src/gallium/Automake.inc
+
+AM_CPPFLAGS = \
+ $(LIBDRM_CFLAGS) \
+ $(GALLIUM_CFLAGS)
+
+noinst_LTLIBRARIES = librenderonly.la
+
+librenderonly_la_SOURCES = \
+ $(C_SOURCES)
new file mode 100644
@@ -0,0 +1,4 @@
+C_SOURCES := \
+ renderonly_context.c \
+ renderonly_resource.c \
+ renderonly_screen.c
new file mode 100644
@@ -0,0 +1,721 @@
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "util/u_debug.h"
+#include "util/u_inlines.h"
+
+#include "renderonly_context.h"
+#include "renderonly_resource.h"
+#include "renderonly_screen.h"
+
+static void
+renderonly_destroy(struct pipe_context *pcontext)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->destroy(context->gpu);
+ free(context);
+}
+
+static void
+renderonly_draw_vbo(struct pipe_context *pcontext,
+ const struct pipe_draw_info *pinfo)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+ struct pipe_draw_info info;
+
+ if (pinfo && pinfo->indirect) {
+ memcpy(&info, pinfo, sizeof(info));
+ info.indirect = renderonly_resource_unwrap(info.indirect);
+ pinfo = &info;
+ }
+
+ context->gpu->draw_vbo(context->gpu, pinfo);
+}
+
+static void *
+renderonly_create_blend_state(struct pipe_context *pcontext,
+ const struct pipe_blend_state *cso)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ return context->gpu->create_blend_state(context->gpu, cso);
+}
+
+static void
+renderonly_bind_blend_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->bind_blend_state(context->gpu, so);
+}
+
+static void
+renderonly_delete_blend_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->delete_blend_state(context->gpu, so);
+}
+
+static void *
+renderonly_create_sampler_state(struct pipe_context *pcontext,
+ const struct pipe_sampler_state *cso)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ return context->gpu->create_sampler_state(context->gpu, cso);
+}
+
+static void
+renderonly_bind_sampler_states(struct pipe_context *pcontext,
+ unsigned shader,
+ unsigned start_slot,
+ unsigned num_samplers,
+ void **samplers)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->bind_sampler_states(context->gpu, shader, start_slot,
+ num_samplers, samplers);
+}
+
+static void
+renderonly_delete_sampler_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->delete_sampler_state(context->gpu, so);
+}
+
+static void *
+renderonly_create_rasterizer_state(struct pipe_context *pcontext,
+ const struct pipe_rasterizer_state *cso)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ return context->gpu->create_rasterizer_state(context->gpu, cso);
+}
+
+static void
+renderonly_bind_rasterizer_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->bind_rasterizer_state(context->gpu, so);
+}
+
+static void
+renderonly_delete_rasterizer_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->delete_rasterizer_state(context->gpu, so);
+}
+
+static void *
+renderonly_create_depth_stencil_alpha_state(struct pipe_context *pcontext,
+ const struct pipe_depth_stencil_alpha_state *cso)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ return context->gpu->create_depth_stencil_alpha_state(context->gpu,
+ cso);
+}
+
+static void
+renderonly_bind_depth_stencil_alpha_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->bind_depth_stencil_alpha_state(context->gpu, so);
+}
+
+static void
+renderonly_delete_depth_stencil_alpha_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->delete_depth_stencil_alpha_state(context->gpu, so);
+}
+
+static void *
+renderonly_create_fs_state(struct pipe_context *pcontext,
+ const struct pipe_shader_state *cso)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ return context->gpu->create_fs_state(context->gpu, cso);
+}
+
+static void
+renderonly_bind_fs_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->bind_fs_state(context->gpu, so);
+}
+
+static void
+renderonly_delete_fs_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->delete_fs_state(context->gpu, so);
+}
+
+static void *
+renderonly_create_vs_state(struct pipe_context *pcontext,
+ const struct pipe_shader_state *cso)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ return context->gpu->create_vs_state(context->gpu, cso);
+}
+
+static void
+renderonly_bind_vs_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->bind_vs_state(context->gpu, so);
+}
+
+static void
+renderonly_delete_vs_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->delete_vs_state(context->gpu, so);
+}
+
+static void *
+renderonly_create_gs_state(struct pipe_context *pcontext,
+ const struct pipe_shader_state *cso)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ return context->gpu->create_gs_state(context->gpu, cso);
+}
+
+static void
+renderonly_bind_gs_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->bind_gs_state(context->gpu, so);
+}
+
+static void
+renderonly_delete_gs_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->delete_gs_state(context->gpu, so);
+}
+
+static void *
+renderonly_create_vertex_elements_state(struct pipe_context *pcontext,
+ unsigned num_elements,
+ const struct pipe_vertex_element *elements)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ return context->gpu->create_vertex_elements_state(context->gpu,
+ num_elements,
+ elements);
+}
+
+static void
+renderonly_bind_vertex_elements_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->bind_vertex_elements_state(context->gpu, so);
+}
+
+static void
+renderonly_delete_vertex_elements_state(struct pipe_context *pcontext,
+ void *so)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->delete_vertex_elements_state(context->gpu, so);
+}
+
+static void
+renderonly_set_constant_buffer(struct pipe_context *pcontext,
+ uint shader,
+ uint index,
+ struct pipe_constant_buffer *buf)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+ struct pipe_constant_buffer buffer;
+
+ if (buf && buf->buffer) {
+ memcpy(&buffer, buf, sizeof(buffer));
+ buffer.buffer = renderonly_resource_unwrap(buffer.buffer);
+ buf = &buffer;
+ }
+
+ context->gpu->set_constant_buffer(context->gpu, shader, index, buf);
+}
+
+static void
+renderonly_set_framebuffer_state(struct pipe_context *pcontext,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+ struct pipe_framebuffer_state state;
+ unsigned i;
+
+ if (fb) {
+ memcpy(&state, fb, sizeof(state));
+
+ for (i = 0; i < fb->nr_cbufs; i++)
+ state.cbufs[i] = renderonly_surface_unwrap(fb->cbufs[i]);
+
+ while (i < PIPE_MAX_COLOR_BUFS)
+ state.cbufs[i++] = NULL;
+
+ state.zsbuf = renderonly_surface_unwrap(fb->zsbuf);
+
+ fb = &state;
+ }
+
+ context->gpu->set_framebuffer_state(context->gpu, fb);
+}
+
+static void
+renderonly_set_polygon_stipple(struct pipe_context *pcontext,
+ const struct pipe_poly_stipple *stipple)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->set_polygon_stipple(context->gpu, stipple);
+}
+
+static void
+renderonly_set_scissor_states(struct pipe_context *pcontext,
+ unsigned start_slot,
+ unsigned num_scissors,
+ const struct pipe_scissor_state *scissors)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->set_scissor_states(context->gpu, start_slot,
+ num_scissors, scissors);
+}
+
+static void
+renderonly_set_viewport_states(struct pipe_context *pcontext,
+ unsigned start_slot,
+ unsigned num_viewports,
+ const struct pipe_viewport_state *viewports)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->set_viewport_states(context->gpu, start_slot,
+ num_viewports, viewports);
+}
+
+static void
+renderonly_set_sampler_views(struct pipe_context *pcontext,
+ unsigned shader,
+ unsigned start_slot,
+ unsigned num_views,
+ struct pipe_sampler_view **pviews)
+{
+ struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+ unsigned i;
+
+ for (i = 0; i < num_views; i++)
+ views[i] = renderonly_sampler_view_unwrap(pviews[i]);
+
+ context->gpu->set_sampler_views(context->gpu, shader, start_slot,
+ num_views, views);
+}
+
+static void
+renderonly_set_shader_images(struct pipe_context *pcontext,
+ unsigned shader,
+ unsigned start_slot, unsigned count,
+ struct pipe_image_view **images)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->set_shader_images(context->gpu, shader, start_slot,
+ count, images);
+}
+
+static void
+renderonly_set_vertex_buffers(struct pipe_context *pcontext,
+ unsigned start_slot,
+ unsigned num_buffers,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+ struct pipe_vertex_buffer buf[PIPE_MAX_SHADER_INPUTS];
+ unsigned i;
+
+ if (num_buffers && buffers) {
+ memcpy(buf, buffers, num_buffers * sizeof(struct pipe_vertex_buffer));
+
+ for (i = 0; i < num_buffers; i++)
+ buf[i].buffer = renderonly_resource_unwrap(buf[i].buffer);
+
+ buffers = buf;
+ }
+
+ context->gpu->set_vertex_buffers(context->gpu, start_slot,
+ num_buffers, buffers);
+}
+
+static void
+renderonly_set_index_buffer(struct pipe_context *pcontext,
+ const struct pipe_index_buffer *buffer)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+ struct pipe_index_buffer buf;
+
+ if (buffer) {
+ memcpy(&buf, buffer, sizeof(buf));
+ buf.buffer = renderonly_resource_unwrap(buf.buffer);
+ buffer = &buf;
+ }
+
+ context->gpu->set_index_buffer(context->gpu, buffer);
+}
+
+static struct pipe_stream_output_target *
+renderonly_create_stream_output_target(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ unsigned buffer_offset,
+ unsigned buffer_size)
+{
+ struct renderonly_resource *resource = to_renderonly_resource(presource);
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ return context->gpu->create_stream_output_target(context->gpu,
+ resource->gpu,
+ buffer_offset,
+ buffer_size);
+}
+
+static void
+renderonly_stream_output_target_destroy(struct pipe_context *pcontext,
+ struct pipe_stream_output_target *target)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->stream_output_target_destroy(context->gpu, target);
+}
+
+static void
+renderonly_set_stream_output_targets(struct pipe_context *pcontext,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ const unsigned *offsets)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->set_stream_output_targets(context->gpu, num_targets,
+ targets, offsets);
+}
+
+static void
+renderonly_blit(struct pipe_context *pcontext,
+ const struct pipe_blit_info *pinfo)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+ struct pipe_blit_info info;
+
+ if (pinfo) {
+ memcpy(&info, pinfo, sizeof(info));
+ info.dst.resource = renderonly_resource_unwrap(info.dst.resource);
+ info.src.resource = renderonly_resource_unwrap(info.src.resource);
+ pinfo = &info;
+ }
+
+ context->gpu->blit(context->gpu, pinfo);
+}
+
+static void
+renderonly_clear(struct pipe_context *pcontext,
+ unsigned buffers,
+ const union pipe_color_union *color,
+ double depth,
+ unsigned stencil)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->clear(context->gpu, buffers, color, depth, stencil);
+}
+
+static void
+renderonly_flush(struct pipe_context *pcontext,
+ struct pipe_fence_handle **fence,
+ unsigned flags)
+{
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->flush(context->gpu, fence, flags);
+}
+
+static struct pipe_sampler_view *
+renderonly_create_sampler_view(struct pipe_context *pcontext,
+ struct pipe_resource *ptexture,
+ const struct pipe_sampler_view *template)
+{
+ struct renderonly_resource *texture = to_renderonly_resource(ptexture);
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+ struct renderonly_sampler_view *view;
+
+ view = calloc(1, sizeof(*view));
+ if (!view)
+ return NULL;
+
+ view->gpu = context->gpu->create_sampler_view(context->gpu,
+ texture->gpu,
+ template);
+ memcpy(&view->base, view->gpu, sizeof(*view->gpu));
+ /* overwrite to prevent reference from being released */
+ view->base.texture = NULL;
+
+ pipe_reference_init(&view->base.reference, 1);
+ pipe_resource_reference(&view->base.texture, ptexture);
+ view->base.context = pcontext;
+
+ return &view->base;
+}
+
+static void
+renderonly_sampler_view_destroy(struct pipe_context *pcontext,
+ struct pipe_sampler_view *pview)
+{
+ struct renderonly_sampler_view *view = to_renderonly_sampler_view(pview);
+
+ pipe_resource_reference(&view->base.texture, NULL);
+ pipe_sampler_view_reference(&view->gpu, NULL);
+ free(view);
+}
+
+static void
+renderonly_flush_resource(struct pipe_context *pcontext,
+ struct pipe_resource *presource)
+{
+ struct renderonly_resource *resource = to_renderonly_resource(presource);
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+ struct renderonly_screen *screen = to_renderonly_screen(presource->screen);
+ struct pipe_blit_info blit;
+
+ context->gpu->flush_resource(context->gpu, resource->gpu);
+
+ if (!resource->scanout || !screen->ops->intermediate_rendering)
+ return;
+
+ /* we need to blit our gpu render result to dumb buffer */
+ memset(&blit, 0, sizeof(blit));
+ blit.mask = PIPE_MASK_RGBA;
+ blit.filter = PIPE_TEX_FILTER_LINEAR;
+ blit.src.resource = resource->gpu;
+ blit.src.format = resource->gpu->format;
+ blit.src.level = 0;
+ blit.src.box.width = resource->gpu->width0;
+ blit.src.box.height = resource->gpu->height0;
+ blit.dst.resource = resource->prime;
+ blit.dst.format = resource->prime->format;
+ blit.dst.level = 0;
+ blit.dst.box.width = resource->prime->width0;
+ blit.dst.box.height = resource->prime->height0;
+
+ context->gpu->blit(context->gpu, &blit);
+}
+
+static void *
+renderonly_transfer_map(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
+{
+ struct renderonly_resource *resource = to_renderonly_resource(presource);
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+ struct renderonly_transfer *transfer;
+
+ transfer = calloc(1, sizeof(*transfer));
+ if (!transfer)
+ return NULL;
+
+ transfer->map = context->gpu->transfer_map(context->gpu,
+ resource->gpu,
+ level,
+ usage,
+ box,
+ &transfer->gpu);
+ memcpy(&transfer->base, transfer->gpu, sizeof(*transfer->gpu));
+ transfer->base.resource = NULL;
+ pipe_resource_reference(&transfer->base.resource, presource);
+
+ *ptransfer = &transfer->base;
+
+ return transfer->map;
+}
+
+static void
+renderonly_transfer_unmap(struct pipe_context *pcontext,
+ struct pipe_transfer *ptransfer)
+{
+ struct renderonly_transfer *transfer = to_renderonly_transfer(ptransfer);
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->transfer_unmap(context->gpu, transfer->gpu);
+ pipe_resource_reference(&transfer->base.resource, NULL);
+ free(transfer);
+}
+
+static void
+renderonly_transfer_inline_write(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ const void *data,
+ unsigned stride,
+ unsigned layer_stride)
+{
+ struct renderonly_resource *resource = to_renderonly_resource(presource);
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+
+ context->gpu->transfer_inline_write(context->gpu, resource->gpu,
+ level, usage, box, data, stride,
+ layer_stride);
+}
+
+struct pipe_context *
+renderonly_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
+{
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+ struct renderonly_context *context;
+
+ context = calloc(1, sizeof(*context));
+ if (!context)
+ return NULL;
+
+ context->gpu = screen->gpu->context_create(screen->gpu, priv, flags);
+ if (!context->gpu) {
+ debug_error("failed to create GPU context\n");
+ free(context);
+ return NULL;
+ }
+
+ context->base.screen = &screen->base;
+ context->base.priv = priv;
+
+ context->base.destroy = renderonly_destroy;
+
+ context->base.draw_vbo = renderonly_draw_vbo;
+
+ context->base.create_blend_state = renderonly_create_blend_state;
+ context->base.bind_blend_state = renderonly_bind_blend_state;
+ context->base.delete_blend_state = renderonly_delete_blend_state;
+
+ context->base.create_sampler_state = renderonly_create_sampler_state;
+ context->base.bind_sampler_states = renderonly_bind_sampler_states;
+ context->base.delete_sampler_state = renderonly_delete_sampler_state;
+
+ context->base.create_rasterizer_state = renderonly_create_rasterizer_state;
+ context->base.bind_rasterizer_state = renderonly_bind_rasterizer_state;
+ context->base.delete_rasterizer_state = renderonly_delete_rasterizer_state;
+
+ context->base.create_depth_stencil_alpha_state = renderonly_create_depth_stencil_alpha_state;
+ context->base.bind_depth_stencil_alpha_state = renderonly_bind_depth_stencil_alpha_state;
+ context->base.delete_depth_stencil_alpha_state = renderonly_delete_depth_stencil_alpha_state;
+
+ context->base.create_fs_state = renderonly_create_fs_state;
+ context->base.bind_fs_state = renderonly_bind_fs_state;
+ context->base.delete_fs_state = renderonly_delete_fs_state;
+
+ context->base.create_vs_state = renderonly_create_vs_state;
+ context->base.bind_vs_state = renderonly_bind_vs_state;
+ context->base.delete_vs_state = renderonly_delete_vs_state;
+
+ context->base.create_gs_state = renderonly_create_gs_state;
+ context->base.bind_gs_state = renderonly_bind_gs_state;
+ context->base.delete_gs_state = renderonly_delete_gs_state;
+
+ context->base.create_vertex_elements_state = renderonly_create_vertex_elements_state;
+ context->base.bind_vertex_elements_state = renderonly_bind_vertex_elements_state;
+ context->base.delete_vertex_elements_state = renderonly_delete_vertex_elements_state;
+
+ context->base.set_constant_buffer = renderonly_set_constant_buffer;
+ context->base.set_framebuffer_state = renderonly_set_framebuffer_state;
+ context->base.set_polygon_stipple = renderonly_set_polygon_stipple;
+ context->base.set_scissor_states = renderonly_set_scissor_states;
+ context->base.set_viewport_states = renderonly_set_viewport_states;
+ context->base.set_sampler_views = renderonly_set_sampler_views;
+
+ context->base.set_shader_images = renderonly_set_shader_images;
+ context->base.set_vertex_buffers = renderonly_set_vertex_buffers;
+ context->base.set_index_buffer = renderonly_set_index_buffer;
+
+ context->base.create_stream_output_target = renderonly_create_stream_output_target;
+ context->base.stream_output_target_destroy = renderonly_stream_output_target_destroy;
+ context->base.set_stream_output_targets = renderonly_set_stream_output_targets;
+
+ context->base.blit = renderonly_blit;
+ context->base.clear = renderonly_clear;
+ context->base.flush = renderonly_flush;
+
+ context->base.create_sampler_view = renderonly_create_sampler_view;
+ context->base.sampler_view_destroy = renderonly_sampler_view_destroy;
+
+ context->base.flush_resource = renderonly_flush_resource;
+
+ context->base.create_surface = renderonly_create_surface;
+ context->base.surface_destroy = renderonly_surface_destroy;
+
+ context->base.transfer_map = renderonly_transfer_map;
+ context->base.transfer_unmap = renderonly_transfer_unmap;
+ context->base.transfer_inline_write = renderonly_transfer_inline_write;
+
+ return &context->base;
+}
new file mode 100644
@@ -0,0 +1,80 @@
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef RENDERONLY_CONTEXT_H
+#define RENDERONLY_CONTEXT_H
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+
+struct renderonly_screen;
+
+struct renderonly_context {
+ struct pipe_context base;
+ struct pipe_context *gpu;
+};
+
+static inline struct renderonly_context *
+to_renderonly_context(struct pipe_context *context)
+{
+ return (struct renderonly_context *)context;
+}
+
+struct pipe_context *renderonly_context_create(struct pipe_screen *pscreen,
+ void *priv, unsigned flags);
+
+struct renderonly_sampler_view {
+ struct pipe_sampler_view base;
+ struct pipe_sampler_view *gpu;
+};
+
+static inline struct renderonly_sampler_view *
+to_renderonly_sampler_view(struct pipe_sampler_view *view)
+{
+ return (struct renderonly_sampler_view *)view;
+}
+
+static inline struct pipe_sampler_view *
+renderonly_sampler_view_unwrap(struct pipe_sampler_view *view)
+{
+ if (!view)
+ return NULL;
+
+ return to_renderonly_sampler_view(view)->gpu;
+}
+
+struct renderonly_transfer {
+ struct pipe_transfer base;
+ struct pipe_transfer *gpu;
+
+ unsigned int count;
+ void *map;
+};
+
+static inline struct renderonly_transfer *
+to_renderonly_transfer(struct pipe_transfer *transfer)
+{
+ return (struct renderonly_transfer *)transfer;
+}
+
+#endif /* RENDERONLY_CONTEXT_H */
new file mode 100644
@@ -0,0 +1,296 @@
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+
+#include <xf86drm.h>
+
+#include "pipe/p_state.h"
+#include "util/u_debug.h"
+#include "util/u_format.h"
+#include "util/u_inlines.h"
+
+#include "state_tracker/drm_driver.h"
+
+#include "renderonly_context.h"
+#include "renderonly_resource.h"
+#include "renderonly_screen.h"
+
+static bool resource_import_scanout(struct renderonly_screen *screen,
+ struct renderonly_resource *resource,
+ const struct pipe_resource *template)
+{
+ struct winsys_handle handle;
+ boolean status;
+ int fd, err;
+
+ resource->gpu = screen->gpu->resource_create(screen->gpu,
+ template);
+ if (!resource->gpu)
+ return false;
+
+ memset(&handle, 0, sizeof(handle));
+ handle.type = DRM_API_HANDLE_TYPE_FD;
+
+ status = screen->gpu->resource_get_handle(screen->gpu,
+ resource->gpu,
+ &handle);
+ if (!status)
+ return false;
+
+ resource->stride = handle.stride;
+ fd = handle.handle;
+
+ err = drmPrimeFDToHandle(screen->fd, fd, &resource->handle);
+ if (err < 0) {
+ fprintf(stderr, "drmPrimeFDToHandle() failed: %s\n",
+ strerror(errno));
+ close(fd);
+ return false;
+ }
+
+ close(fd);
+
+ if (screen->ops->tiling) {
+ err = screen->ops->tiling(screen->fd, resource->handle);
+ if (err < 0) {
+ fprintf(stderr, "failed to set tiling parameters: %s\n",
+ strerror(errno));
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool resource_dumb(struct renderonly_screen *screen,
+ struct renderonly_resource *resource,
+ const struct pipe_resource *template)
+{
+ struct drm_mode_create_dumb create_dumb = { 0 };
+ struct winsys_handle handle;
+ int prime_fd, err;
+
+ /* create dumb buffer at scanout GPU */
+ create_dumb.width = template->width0;
+ create_dumb.height = template->height0;
+ create_dumb.bpp = 32;
+ create_dumb.flags = 0;
+ create_dumb.pitch = 0;
+ create_dumb.size = 0;
+ create_dumb.handle = 0;
+
+ err = ioctl(screen->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
+ if (err < 0) {
+ fprintf(stderr, "DRM_IOCTL_MODE_CREATE_DUMB failed: %s\n",
+ strerror(errno));
+ return false;
+ }
+
+ resource->handle = create_dumb.handle;
+ resource->stride = create_dumb.pitch;
+
+ /* create resource at renderonly GPU */
+ resource->gpu = screen->gpu->resource_create(screen->gpu, template);
+ if (!resource->gpu)
+ return false;
+
+ /* export dumb buffer */
+ err = drmPrimeHandleToFD(screen->fd, create_dumb.handle, O_CLOEXEC, &prime_fd);
+ if (err < 0) {
+ fprintf(stderr, "failed to export dumb buffer: %s\n",
+ strerror(errno));
+ return false;
+ }
+
+ /* import dumb buffer */
+ handle.type = DRM_API_HANDLE_TYPE_FD;
+ handle.handle = prime_fd;
+ handle.stride = create_dumb.pitch;
+
+ resource->prime = screen->gpu->resource_from_handle(screen->gpu, template, &handle);
+ if (!resource->prime) {
+ fprintf(stderr, "failed to create resource_from_handle: %s\n",
+ strerror(errno));
+ return false;
+ }
+
+ return true;
+}
+
+struct pipe_resource *
+renderonly_resource_create(struct pipe_screen *pscreen,
+ const struct pipe_resource *template)
+{
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+ struct renderonly_resource *resource;
+
+ resource = calloc(1, sizeof(*resource));
+ if (!resource)
+ return NULL;
+
+ if (template->bind & PIPE_BIND_SCANOUT) {
+
+ bool success = false;
+
+ if (!screen->ops->intermediate_rendering) {
+ /* create scanout resource in renderonly GPU, export it
+ * and import it into the scanout hardware. If defined
+ * tiling will be setup for the crated resource. */
+ success = resource_import_scanout(screen, resource, template);
+ } else {
+ /* create dump buffer in scanout hardware, export it
+ * and import it into renderonly GPU. */
+ success = resource_dumb(screen, resource, template);
+ }
+
+ if (!success)
+ goto destroy;
+
+ resource->scanout = true;
+
+ } else {
+ resource->gpu = screen->gpu->resource_create(screen->gpu,
+ template);
+ if (!resource->gpu)
+ goto destroy;
+ }
+
+ memcpy(&resource->base, resource->gpu, sizeof(*resource->gpu));
+ pipe_reference_init(&resource->base.reference, 1);
+ resource->base.screen = &screen->base;
+
+ return &resource->base;
+
+destroy:
+ if (resource->gpu)
+ screen->gpu->resource_destroy(screen->gpu, resource->gpu);
+ free(resource);
+ return NULL;
+}
+
+struct pipe_resource *
+renderonly_resource_from_handle(struct pipe_screen *pscreen,
+ const struct pipe_resource *template,
+ struct winsys_handle *handle)
+{
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+ struct renderonly_resource *resource;
+
+ resource = calloc(1, sizeof(*resource));
+ if (!resource)
+ return NULL;
+
+ resource->gpu = screen->gpu->resource_from_handle(screen->gpu,
+ template,
+ handle);
+ if (!resource->gpu) {
+ free(resource);
+ return NULL;
+ }
+
+ memcpy(&resource->base, resource->gpu, sizeof(*resource->gpu));
+ pipe_reference_init(&resource->base.reference, 1);
+ resource->base.screen = &screen->base;
+
+ return &resource->base;
+}
+
+boolean
+renderonly_resource_get_handle(struct pipe_screen *pscreen,
+ struct pipe_resource *presource,
+ struct winsys_handle *handle)
+{
+ struct renderonly_resource *resource = to_renderonly_resource(presource);
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+ boolean ret = TRUE;
+
+ if (presource->bind & PIPE_BIND_SCANOUT) {
+ handle->handle = resource->handle;
+ handle->stride = resource->stride;
+ } else {
+ ret = screen->gpu->resource_get_handle(screen->gpu,
+ resource->gpu,
+ handle);
+ }
+
+ return ret;
+}
+
+void
+renderonly_resource_destroy(struct pipe_screen *pscreen,
+ struct pipe_resource *presource)
+{
+ struct renderonly_resource *resource = to_renderonly_resource(presource);
+
+ pipe_resource_reference(&resource->gpu, NULL);
+ free(resource);
+}
+
+struct pipe_surface *
+renderonly_create_surface(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ const struct pipe_surface *template)
+{
+ struct renderonly_resource *resource = to_renderonly_resource(presource);
+ struct renderonly_context *context = to_renderonly_context(pcontext);
+ struct renderonly_surface *surface;
+
+ surface = calloc(1, sizeof(*surface));
+ if (!surface)
+ return NULL;
+
+ surface->gpu = context->gpu->create_surface(context->gpu,
+ resource->gpu,
+ template);
+ if (!surface->gpu) {
+ free(surface);
+ return NULL;
+ }
+
+ memcpy(&surface->base, surface->gpu, sizeof(*surface->gpu));
+ /* overwrite to prevent reference from being released */
+ surface->base.texture = NULL;
+
+ pipe_reference_init(&surface->base.reference, 1);
+ pipe_resource_reference(&surface->base.texture, presource);
+ surface->base.context = &context->base;
+
+ return &surface->base;
+}
+
+void
+renderonly_surface_destroy(struct pipe_context *pcontext,
+ struct pipe_surface *psurface)
+{
+ struct renderonly_surface *surface = to_renderonly_surface(psurface);
+
+ pipe_resource_reference(&surface->base.texture, NULL);
+ pipe_surface_reference(&surface->gpu, NULL);
+ free(surface);
+}
new file mode 100644
@@ -0,0 +1,101 @@
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef RENDERONLY_RESOURCE_H
+#define RENDERONLY_RESOURCE_H
+
+#include "pipe/p_state.h"
+
+struct winsys_handle;
+
+struct renderonly_resource {
+ struct pipe_resource base;
+ struct pipe_resource *gpu;
+
+ bool scanout;
+ struct pipe_resource *prime;
+
+ uint32_t stride;
+ uint32_t handle;
+ size_t size;
+};
+
+static inline struct renderonly_resource *
+to_renderonly_resource(struct pipe_resource *resource)
+{
+ return (struct renderonly_resource *)resource;
+}
+
+static inline struct pipe_resource *
+renderonly_resource_unwrap(struct pipe_resource *resource)
+{
+ if (!resource)
+ return NULL;
+
+ return to_renderonly_resource(resource)->gpu;
+}
+
+struct pipe_resource *
+renderonly_resource_create(struct pipe_screen *pscreen,
+ const struct pipe_resource *template);
+struct pipe_resource *
+renderonly_resource_from_handle(struct pipe_screen *pscreen,
+ const struct pipe_resource *template,
+ struct winsys_handle *handle);
+boolean
+renderonly_resource_get_handle(struct pipe_screen *pscreen,
+ struct pipe_resource *resource,
+ struct winsys_handle *handle);
+void
+renderonly_resource_destroy(struct pipe_screen *pscreen,
+ struct pipe_resource *resource);
+
+struct renderonly_surface {
+ struct pipe_surface base;
+ struct pipe_surface *gpu;
+};
+
+static inline struct renderonly_surface *
+to_renderonly_surface(struct pipe_surface *surface)
+{
+ return (struct renderonly_surface *)surface;
+}
+
+static inline struct pipe_surface *
+renderonly_surface_unwrap(struct pipe_surface *surface)
+{
+ if (!surface)
+ return NULL;
+
+ return to_renderonly_surface(surface)->gpu;
+}
+
+struct pipe_surface *
+renderonly_create_surface(struct pipe_context *pcontext,
+ struct pipe_resource *presource,
+ const struct pipe_surface *template);
+void
+renderonly_surface_destroy(struct pipe_context *pcontext,
+ struct pipe_surface *psurface);
+
+#endif /* RENDERONLY_RESOURCE_H */
new file mode 100644
@@ -0,0 +1,178 @@
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+
+#include <xf86drm.h>
+
+#include "util/u_string.h"
+#include "util/u_debug.h"
+
+#include "renderonly_context.h"
+#include "renderonly_resource.h"
+#include "renderonly_screen.h"
+
+static const char *
+renderonly_get_name(struct pipe_screen *pscreen)
+{
+ static char buffer[256];
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+
+ util_snprintf(buffer, sizeof(buffer), "%s-%s",
+ drmGetDeviceNameFromFd(screen->fd),
+ screen->gpu->get_name(screen->gpu));
+ return buffer;
+}
+
+static const char *
+renderonly_get_vendor(struct pipe_screen *pscreen)
+{
+ return "renderonly";
+}
+
+static void renderonly_screen_destroy(struct pipe_screen *pscreen)
+{
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+
+ screen->gpu->destroy(screen->gpu);
+ free(pscreen);
+}
+
+static int
+renderonly_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
+{
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+
+ return screen->gpu->get_param(screen->gpu, param);
+}
+
+static float
+renderonly_screen_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
+{
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+
+ return screen->gpu->get_paramf(screen->gpu, param);
+}
+
+static int
+renderonly_screen_get_shader_param(struct pipe_screen *pscreen,
+ unsigned shader,
+ enum pipe_shader_cap param)
+{
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+
+ return screen->gpu->get_shader_param(screen->gpu, shader, param);
+}
+
+static boolean
+renderonly_screen_is_format_supported(struct pipe_screen *pscreen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned sample_count,
+ unsigned usage)
+{
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+
+ return screen->gpu->is_format_supported(screen->gpu, format, target,
+ sample_count, usage);
+}
+
+static void
+renderonly_fence_reference(struct pipe_screen *pscreen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+
+ screen->gpu->fence_reference(screen->gpu, ptr, fence);
+}
+
+static boolean
+renderonly_fence_finish(struct pipe_screen *pscreen,
+ struct pipe_fence_handle *fence,
+ uint64_t timeout)
+{
+ struct renderonly_screen *screen = to_renderonly_screen(pscreen);
+
+ return screen->gpu->fence_finish(screen->gpu, fence, timeout);
+}
+
+static int renderonly_open_render_node(int fd)
+{
+ return open("/dev/dri/renderD128", O_RDWR);
+}
+
+struct pipe_screen *
+renderonly_screen_create(int fd, const struct renderonly_ops *ops)
+{
+ struct renderonly_screen *screen;
+
+ screen = calloc(1, sizeof(*screen));
+ if (!screen)
+ return NULL;
+
+ screen->fd = fd;
+ screen->ops = ops;
+ assert(screen->ops);
+
+ screen->gpu_fd = renderonly_open_render_node(screen->fd);
+ if (screen->gpu_fd < 0) {
+ fprintf(stderr, "failed to open GPU device: %s\n",
+ strerror(errno));
+ free(screen);
+ return NULL;
+ }
+
+ assert(screen->ops->open);
+ screen->gpu = screen->ops->open(screen->gpu_fd);
+ if (!screen->gpu) {
+ fprintf(stderr, "failed to create GPU screen\n");
+ close(screen->gpu_fd);
+ free(screen);
+ return NULL;
+ }
+
+ screen->base.get_name = renderonly_get_name;
+ screen->base.get_vendor = renderonly_get_vendor;
+ screen->base.destroy = renderonly_screen_destroy;
+ screen->base.get_param = renderonly_screen_get_param;
+ screen->base.get_paramf = renderonly_screen_get_paramf;
+ screen->base.get_shader_param = renderonly_screen_get_shader_param;
+ screen->base.context_create = renderonly_context_create;
+ screen->base.is_format_supported = renderonly_screen_is_format_supported;
+
+ screen->base.resource_create = renderonly_resource_create;
+ screen->base.resource_from_handle = renderonly_resource_from_handle;
+ screen->base.resource_get_handle = renderonly_resource_get_handle;
+ screen->base.resource_destroy = renderonly_resource_destroy;
+
+ screen->base.fence_reference = renderonly_fence_reference;
+ screen->base.fence_finish = renderonly_fence_finish;
+
+ if (ops->intermediate_rendering)
+ screen->base.flush_frontbuffer = NULL; /* TODO */
+
+ return &screen->base;
+}
new file mode 100644
@@ -0,0 +1,55 @@
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef RENDERONLY_SCREEN_H
+#define RENDERONLY_SCREEN_H
+
+#include "pipe/p_screen.h"
+
+struct renderonly_ops {
+ struct pipe_screen *(*open)(int fd);
+ int (*tiling)(int fd, uint32_t handle);
+
+ bool intermediate_rendering;
+};
+
+struct renderonly_screen {
+ struct pipe_screen base;
+ int fd;
+
+ struct pipe_screen *gpu;
+ int gpu_fd;
+
+ const struct renderonly_ops *ops;
+};
+
+static inline struct renderonly_screen *
+to_renderonly_screen(struct pipe_screen *pscreen)
+{
+ return (struct renderonly_screen *)pscreen;
+}
+
+struct pipe_screen *renderonly_screen_create(int fd,
+ const struct renderonly_ops *ops);
+
+#endif /* RENDERONLY_SCREEN_H */
This commit adds a generic renderonly driver library, which fullfille the requirements for tegra and etnaviv. As a result it is possible to run unmodified egl software directly (without any compositor) on supported devices. In every use case we import a dumb buffer from scanout gpu into the renderonly gpu. If the scanout hardware does support the used tiling format from the renderonly gpu, a driver can define a function which is used to 'setup' the needed tiling on that imported buffer. This functions gets called during rendertarget resource creation. If the scanout hardware does not support the used tiling format we need to create an extra rendertarget resource for the renderonly gpu. During XXX we blit the renderonly rendertarget onto the imported dumb buffer. We assume that the renderonly driver provides a blit function that is capable of resolving the tilied into untiled one. Signed-off-by: Christian Gmeiner <christian.gmeiner@gmail.com> --- configure.ac | 1 + src/gallium/drivers/renderonly/Makefile.am | 11 + src/gallium/drivers/renderonly/Makefile.sources | 4 + .../drivers/renderonly/renderonly_context.c | 721 +++++++++++++++++++++ .../drivers/renderonly/renderonly_context.h | 80 +++ .../drivers/renderonly/renderonly_resource.c | 296 +++++++++ .../drivers/renderonly/renderonly_resource.h | 101 +++ src/gallium/drivers/renderonly/renderonly_screen.c | 178 +++++ src/gallium/drivers/renderonly/renderonly_screen.h | 55 ++ 9 files changed, 1447 insertions(+) create mode 100644 src/gallium/drivers/renderonly/Makefile.am create mode 100644 src/gallium/drivers/renderonly/Makefile.sources create mode 100644 src/gallium/drivers/renderonly/renderonly_context.c create mode 100644 src/gallium/drivers/renderonly/renderonly_context.h create mode 100644 src/gallium/drivers/renderonly/renderonly_resource.c create mode 100644 src/gallium/drivers/renderonly/renderonly_resource.h create mode 100644 src/gallium/drivers/renderonly/renderonly_screen.c create mode 100644 src/gallium/drivers/renderonly/renderonly_screen.h