@@ -110,4 +110,6 @@ source "drivers/staging/ccree/Kconfig"
source "drivers/staging/typec/Kconfig"
+source "drivers/staging/vboxvideo/Kconfig"
+
endif # STAGING
@@ -44,3 +44,4 @@ obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
+obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
new file mode 100644
@@ -0,0 +1,12 @@
+config DRM_VBOXVIDEO
+ tristate "Virtual Box Graphics Card"
+ depends on DRM && X86 && PCI
+ select DRM_KMS_HELPER
+ help
+ This is a KMS driver for the virtual Graphics Card used in
+ Virtual Box virtual machines.
+
+ Although it is possible to builtin this module, it is advised
+ to build this driver as a module, so that it can be updated
+ independently of the kernel. Select M to built this driver as a
+ module and add support for these devices via drm/kms interfaces.
new file mode 100644
@@ -0,0 +1,7 @@
+ccflags-y := -Iinclude/drm -I$(src)/osindependent
+
+vboxvideo-y := osindependent/HGSMIBase.o osindependent/Modesetting.o \
+ osindependent/VBVABase.o vbox_drv.o vbox_fb.o vbox_hgsmi.o \
+ vbox_irq.o vbox_main.o vbox_mode.o vbox_prime.o vbox_ttm.o
+
+obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
new file mode 100644
@@ -0,0 +1,7 @@
+TODO:
+-Get a review from the drm-maintainers on dri-devel done on this driver
+-Fill this TODO with the results of that review
+
+Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
+Hans de Goede <hdegoede@redhat.com> and
+Michael Thayer <michael.thayer@oracle.com>.
new file mode 100644
@@ -0,0 +1,371 @@
+/* $Id: HGSMIBase.cpp 67145 2017-05-30 15:11:29Z vboxsync $ */
+/** @file
+ * VirtualBox Video driver, common code - HGSMI guest-to-host communication.
+ */
+
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <VBoxVideoGuest.h>
+#include <VBoxVideoVBE.h>
+#include <VBoxVideoIPRT.h>
+
+/** Detect whether HGSMI is supported by the host. */
+DECLHIDDEN(bool) VBoxHGSMIIsSupported(void)
+{
+ uint16_t DispiId;
+
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX, VBE_DISPI_INDEX_ID);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_DATA, VBE_DISPI_ID_HGSMI);
+
+ DispiId = VBVO_PORT_READ_U16(VBE_DISPI_IOPORT_DATA);
+
+ return (DispiId == VBE_DISPI_ID_HGSMI);
+}
+
+
+/**
+ * Inform the host of the location of the host flags in VRAM via an HGSMI
+ * command.
+ * @returns IPRT status value.
+ * @returns VERR_NOT_IMPLEMENTED if the host does not support the command.
+ * @returns VERR_NO_MEMORY if a heap allocation fails.
+ * @param pCtx the context of the guest heap to use.
+ * @param offLocation the offset chosen for the flags withing guest
+ * VRAM.
+ */
+DECLHIDDEN(int) VBoxHGSMIReportFlagsLocation(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ HGSMIOFFSET offLocation)
+{
+ HGSMIBUFFERLOCATION *p;
+ int rc = VINF_SUCCESS;
+
+ /* Allocate the IO buffer. */
+ p = (HGSMIBUFFERLOCATION *)VBoxHGSMIBufferAlloc(pCtx,
+ sizeof(HGSMIBUFFERLOCATION),
+ HGSMI_CH_HGSMI,
+ HGSMI_CC_HOST_FLAGS_LOCATION);
+ if (p)
+ {
+ /* Prepare data to be sent to the host. */
+ p->offLocation = offLocation;
+ p->cbLocation = sizeof(HGSMIHOSTFLAGS);
+ rc = VBoxHGSMIBufferSubmit(pCtx, p);
+ /* Free the IO buffer. */
+ VBoxHGSMIBufferFree(pCtx, p);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ return rc;
+}
+
+
+/**
+ * Notify the host of HGSMI-related guest capabilities via an HGSMI command.
+ * @returns IPRT status value.
+ * @returns VERR_NOT_IMPLEMENTED if the host does not support the command.
+ * @returns VERR_NO_MEMORY if a heap allocation fails.
+ * @param pCtx the context of the guest heap to use.
+ * @param fCaps the capabilities to report, see VBVACAPS.
+ */
+DECLHIDDEN(int) VBoxHGSMISendCapsInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t fCaps)
+{
+ VBVACAPS *pCaps;
+ int rc = VINF_SUCCESS;
+
+ /* Allocate the IO buffer. */
+ pCaps = (VBVACAPS *)VBoxHGSMIBufferAlloc(pCtx,
+ sizeof(VBVACAPS), HGSMI_CH_VBVA,
+ VBVA_INFO_CAPS);
+
+ if (pCaps)
+ {
+ /* Prepare data to be sent to the host. */
+ pCaps->rc = VERR_NOT_IMPLEMENTED;
+ pCaps->fCaps = fCaps;
+ rc = VBoxHGSMIBufferSubmit(pCtx, pCaps);
+ if (RT_SUCCESS(rc))
+ {
+ AssertRC(pCaps->rc);
+ rc = pCaps->rc;
+ }
+ /* Free the IO buffer. */
+ VBoxHGSMIBufferFree(pCtx, pCaps);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ return rc;
+}
+
+
+/**
+ * Get the information needed to map the basic communication structures in
+ * device memory into our address space. All pointer parameters are optional.
+ *
+ * @param cbVRAM how much video RAM is allocated to the device
+ * @param poffVRAMBaseMapping where to save the offset from the start of the
+ * device VRAM of the whole area to map
+ * @param pcbMapping where to save the mapping size
+ * @param poffGuestHeapMemory where to save the offset into the mapped area
+ * of the guest heap backing memory
+ * @param pcbGuestHeapMemory where to save the size of the guest heap
+ * backing memory
+ * @param poffHostFlags where to save the offset into the mapped area
+ * of the host flags
+ */
+DECLHIDDEN(void) VBoxHGSMIGetBaseMappingInfo(uint32_t cbVRAM,
+ uint32_t *poffVRAMBaseMapping,
+ uint32_t *pcbMapping,
+ uint32_t *poffGuestHeapMemory,
+ uint32_t *pcbGuestHeapMemory,
+ uint32_t *poffHostFlags)
+{
+ AssertPtrNullReturnVoid(poffVRAMBaseMapping);
+ AssertPtrNullReturnVoid(pcbMapping);
+ AssertPtrNullReturnVoid(poffGuestHeapMemory);
+ AssertPtrNullReturnVoid(pcbGuestHeapMemory);
+ AssertPtrNullReturnVoid(poffHostFlags);
+ if (poffVRAMBaseMapping)
+ *poffVRAMBaseMapping = cbVRAM - VBVA_ADAPTER_INFORMATION_SIZE;
+ if (pcbMapping)
+ *pcbMapping = VBVA_ADAPTER_INFORMATION_SIZE;
+ if (poffGuestHeapMemory)
+ *poffGuestHeapMemory = 0;
+ if (pcbGuestHeapMemory)
+ *pcbGuestHeapMemory = VBVA_ADAPTER_INFORMATION_SIZE
+ - sizeof(HGSMIHOSTFLAGS);
+ if (poffHostFlags)
+ *poffHostFlags = VBVA_ADAPTER_INFORMATION_SIZE
+ - sizeof(HGSMIHOSTFLAGS);
+}
+
+
+/** Sanity test on first call. We do not worry about concurrency issues. */
+static int testQueryConf(PHGSMIGUESTCOMMANDCONTEXT pCtx)
+{
+ static bool cOnce = false;
+ uint32_t ulValue = 0;
+ int rc;
+
+ if (cOnce)
+ return VINF_SUCCESS;
+ cOnce = true;
+ rc = VBoxQueryConfHGSMI(pCtx, UINT32_MAX, &ulValue);
+ if (RT_SUCCESS(rc) && ulValue == UINT32_MAX)
+ return VINF_SUCCESS;
+ cOnce = false;
+ if (RT_FAILURE(rc))
+ return rc;
+ return VERR_INTERNAL_ERROR;
+}
+
+
+/**
+ * Query the host for an HGSMI configuration parameter via an HGSMI command.
+ * @returns iprt status value
+ * @param pCtx the context containing the heap used
+ * @param u32Index the index of the parameter to query,
+ * @see VBVACONF32::u32Index
+ * @param u32DefValue defaut value
+ * @param pulValue where to store the value of the parameter on success
+ */
+DECLHIDDEN(int) VBoxQueryConfHGSMIDef(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t u32Index, uint32_t u32DefValue, uint32_t *pulValue)
+{
+ int rc = VINF_SUCCESS;
+ VBVACONF32 *p;
+ // LogFunc(("u32Index = %d\n", u32Index));
+
+ rc = testQueryConf(pCtx);
+ if (RT_FAILURE(rc))
+ return rc;
+ /* Allocate the IO buffer. */
+ p = (VBVACONF32 *)VBoxHGSMIBufferAlloc(pCtx,
+ sizeof(VBVACONF32), HGSMI_CH_VBVA,
+ VBVA_QUERY_CONF32);
+ if (p)
+ {
+ /* Prepare data to be sent to the host. */
+ p->u32Index = u32Index;
+ p->u32Value = u32DefValue;
+ rc = VBoxHGSMIBufferSubmit(pCtx, p);
+ if (RT_SUCCESS(rc))
+ {
+ *pulValue = p->u32Value;
+ // LogFunc(("u32Value = %d\n", p->u32Value));
+ }
+ /* Free the IO buffer. */
+ VBoxHGSMIBufferFree(pCtx, p);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ // LogFunc(("rc = %d\n", rc));
+ return rc;
+}
+
+DECLHIDDEN(int) VBoxQueryConfHGSMI(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t u32Index, uint32_t *pulValue)
+{
+ return VBoxQueryConfHGSMIDef(pCtx, u32Index, UINT32_MAX, pulValue);
+}
+
+/**
+ * Pass the host a new mouse pointer shape via an HGSMI command.
+ *
+ * @returns success or failure
+ * @param pCtx the context containing the heap to be used
+ * @param fFlags cursor flags, @see VMMDevReqMousePointer::fFlags
+ * @param cHotX horizontal position of the hot spot
+ * @param cHotY vertical position of the hot spot
+ * @param cWidth width in pixels of the cursor
+ * @param cHeight height in pixels of the cursor
+ * @param pPixels pixel data, @see VMMDevReqMousePointer for the format
+ * @param cbLength size in bytes of the pixel data
+ */
+DECLHIDDEN(int) VBoxHGSMIUpdatePointerShape(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t fFlags,
+ uint32_t cHotX,
+ uint32_t cHotY,
+ uint32_t cWidth,
+ uint32_t cHeight,
+ uint8_t *pPixels,
+ uint32_t cbLength)
+{
+ VBVAMOUSEPOINTERSHAPE *p;
+ uint32_t cbData = 0;
+ int rc = VINF_SUCCESS;
+
+ if (fFlags & VBOX_MOUSE_POINTER_SHAPE)
+ {
+ /* Size of the pointer data: sizeof (AND mask) + sizeof (XOR_MASK) */
+ cbData = ((((cWidth + 7) / 8) * cHeight + 3) & ~3)
+ + cWidth * 4 * cHeight;
+ /* If shape is supplied, then always create the pointer visible.
+ * See comments in 'vboxUpdatePointerShape'
+ */
+ fFlags |= VBOX_MOUSE_POINTER_VISIBLE;
+ }
+ // LogFlowFunc(("cbData %d, %dx%d\n", cbData, cWidth, cHeight));
+ if (cbData > cbLength)
+ {
+ // LogFunc(("calculated pointer data size is too big (%d bytes, limit %d)\n",
+ // cbData, cbLength));
+ return VERR_INVALID_PARAMETER;
+ }
+ /* Allocate the IO buffer. */
+ p = (VBVAMOUSEPOINTERSHAPE *)VBoxHGSMIBufferAlloc(pCtx,
+ sizeof(VBVAMOUSEPOINTERSHAPE)
+ + cbData,
+ HGSMI_CH_VBVA,
+ VBVA_MOUSE_POINTER_SHAPE);
+ if (p)
+ {
+ /* Prepare data to be sent to the host. */
+ /* Will be updated by the host. */
+ p->i32Result = VINF_SUCCESS;
+ /* We have our custom flags in the field */
+ p->fu32Flags = fFlags;
+ p->u32HotX = cHotX;
+ p->u32HotY = cHotY;
+ p->u32Width = cWidth;
+ p->u32Height = cHeight;
+ if (p->fu32Flags & VBOX_MOUSE_POINTER_SHAPE)
+ /* Copy the actual pointer data. */
+ memcpy (p->au8Data, pPixels, cbData);
+ rc = VBoxHGSMIBufferSubmit(pCtx, p);
+ if (RT_SUCCESS(rc))
+ rc = p->i32Result;
+ /* Free the IO buffer. */
+ VBoxHGSMIBufferFree(pCtx, p);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ // LogFlowFunc(("rc %d\n", rc));
+ return rc;
+}
+
+
+/**
+ * Report the guest cursor position. The host may wish to use this information
+ * to re-position its own cursor (though this is currently unlikely). The
+ * current host cursor position is returned.
+ * @param pCtx The context containing the heap used.
+ * @param fReportPosition Are we reporting a position?
+ * @param x Guest cursor X position.
+ * @param y Guest cursor Y position.
+ * @param pxHost Host cursor X position is stored here. Optional.
+ * @param pyHost Host cursor Y position is stored here. Optional.
+ * @returns iprt status code.
+ * @returns VERR_NO_MEMORY HGSMI heap allocation failed.
+ */
+DECLHIDDEN(int) VBoxHGSMICursorPosition(PHGSMIGUESTCOMMANDCONTEXT pCtx, bool fReportPosition, uint32_t x, uint32_t y,
+ uint32_t *pxHost, uint32_t *pyHost)
+{
+ int rc = VINF_SUCCESS;
+ VBVACURSORPOSITION *p;
+ // Log(("%s: x=%u, y=%u\n", __PRETTY_FUNCTION__, (unsigned)x, (unsigned)y));
+
+ /* Allocate the IO buffer. */
+ p = (VBVACURSORPOSITION *)VBoxHGSMIBufferAlloc(pCtx, sizeof(VBVACURSORPOSITION), HGSMI_CH_VBVA, VBVA_CURSOR_POSITION);
+ if (p)
+ {
+ /* Prepare data to be sent to the host. */
+ p->fReportPosition = fReportPosition ? 1 : 0;
+ p->x = x;
+ p->y = y;
+ rc = VBoxHGSMIBufferSubmit(pCtx, p);
+ if (RT_SUCCESS(rc))
+ {
+ if (pxHost)
+ *pxHost = p->x;
+ if (pyHost)
+ *pyHost = p->y;
+ // Log(("%s: return: x=%u, y=%u\n", __PRETTY_FUNCTION__, (unsigned)p->x, (unsigned)p->y));
+ }
+ /* Free the IO buffer. */
+ VBoxHGSMIBufferFree(pCtx, p);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ // LogFunc(("rc = %d\n", rc));
+ return rc;
+}
+
+
+/** @todo Mouse pointer position to be read from VMMDev memory, address of the memory region
+ * can be queried from VMMDev via an IOCTL. This VMMDev memory region will contain
+ * host information which is needed by the guest.
+ *
+ * Reading will not cause a switch to the host.
+ *
+ * Have to take into account:
+ * * synchronization: host must write to the memory only from EMT,
+ * large structures must be read under flag, which tells the host
+ * that the guest is currently reading the memory (OWNER flag?).
+ * * guest writes: may be allocate a page for the host info and make
+ * the page readonly for the guest.
+ * * the information should be available only for additions drivers.
+ * * VMMDev additions driver will inform the host which version of the info it expects,
+ * host must support all versions.
+ *
+ */
new file mode 100644
@@ -0,0 +1,52 @@
+/** @file
+ * VBox Host Guest Shared Memory Interface (HGSMI) buffer management.
+ */
+
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ___VBox_Graphics_HGSMIBase_h___
+#define ___VBox_Graphics_HGSMIBase_h___
+
+#include <VBoxVideoIPRT.h>
+#include <HGSMIChannels.h>
+#include <HGSMIChSetup.h>
+#include <HGSMIDefs.h>
+#include <linux/genalloc.h>
+
+typedef struct gen_pool *PHGSMIGUESTCOMMANDCONTEXT;
+
+void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
+ u8 channel, u16 channel_info);
+void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf);
+int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf);
+
+/*
+ * Translate CamelCase names used in osindependent code to standard
+ * kernel style names.
+ */
+#define VBoxHGSMIBufferAlloc(ctx, size, ch, ch_info) \
+ hgsmi_buffer_alloc(ctx, size, ch, ch_info)
+#define VBoxHGSMIBufferFree(ctx, buf) hgsmi_buffer_free(ctx, buf)
+#define VBoxHGSMIBufferSubmit(ctx, buf) hgsmi_buffer_submit(ctx, buf)
+
+#endif
new file mode 100644
@@ -0,0 +1,76 @@
+/** @file
+ * VBox Host Guest Shared Memory Interface (HGSMI), sHost/Guest shared part.
+ */
+
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ___VBox_Graphics_HGSMIChSetup_h
+#define ___VBox_Graphics_HGSMIChSetup_h
+
+/* HGSMI setup and configuration channel commands and data structures. */
+#define HGSMI_CC_HOST_FLAGS_LOCATION 0 /* Tell the host the location of HGSMIHOSTFLAGS structure,
+ * where the host can write information about pending
+ * buffers, etc, and which can be quickly polled by
+ * the guest without a need to port IO.
+ */
+
+typedef struct HGSMIBUFFERLOCATION
+{
+ HGSMIOFFSET offLocation;
+ HGSMISIZE cbLocation;
+} HGSMIBUFFERLOCATION;
+AssertCompileSize(HGSMIBUFFERLOCATION, 8);
+
+/* HGSMI setup and configuration data structures. */
+/* host->guest commands pending, should be accessed under FIFO lock only */
+#define HGSMIHOSTFLAGS_COMMANDS_PENDING UINT32_C(0x1)
+/* IRQ is fired, should be accessed under VGAState::lock only */
+#define HGSMIHOSTFLAGS_IRQ UINT32_C(0x2)
+#ifdef VBOX_WITH_WDDM
+/* one or more guest commands is completed, should be accessed under FIFO lock only */
+# define HGSMIHOSTFLAGS_GCOMMAND_COMPLETED UINT32_C(0x4)
+/* watchdog timer interrupt flag (used for debugging), should be accessed under VGAState::lock only */
+# define HGSMIHOSTFLAGS_WATCHDOG UINT32_C(0x8)
+#endif
+/* vsync interrupt flag, should be accessed under VGAState::lock only */
+#define HGSMIHOSTFLAGS_VSYNC UINT32_C(0x10)
+/** monitor hotplug flag, should be accessed under VGAState::lock only */
+#define HGSMIHOSTFLAGS_HOTPLUG UINT32_C(0x20)
+/** Cursor capability state change flag, should be accessed under
+ * VGAState::lock only. @see VBVACONF32. */
+#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES UINT32_C(0x40)
+
+typedef struct HGSMIHOSTFLAGS
+{
+ /* host flags can be accessed and modified in multiple threads concurrently,
+ * e.g. CrOpenGL HGCM and GUI threads when to completing HGSMI 3D and Video Accel respectively,
+ * EMT thread when dealing with HGSMI command processing, etc.
+ * Besides settings/cleaning flags atomically, some each flag has its own special sync restrictions,
+ * see commants for flags definitions above */
+ volatile uint32_t u32HostFlags;
+ uint32_t au32Reserved[3];
+} HGSMIHOSTFLAGS;
+AssertCompileSize(HGSMIHOSTFLAGS, 16);
+
+#endif
+
new file mode 100644
@@ -0,0 +1,65 @@
+/** @file
+ *
+ * VBox Host Guest Shared Memory Interface (HGSMI).
+ * Host/Guest shared part.
+ * Channel identifiers.
+ */
+
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef __HGSMIChannels_h__
+#define __HGSMIChannels_h__
+
+
+/* Each channel has an 8 bit identifier. There are a number of predefined
+ * (hardcoded) channels.
+ *
+ * HGSMI_CH_HGSMI channel can be used to map a string channel identifier
+ * to a free 16 bit numerical value. values are allocated in range
+ * [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST].
+ *
+ */
+
+
+/* Predefined channel identifiers. Used internally by VBOX to simplify the channel setup. */
+#define HGSMI_CH_RESERVED (0x00) /* A reserved channel value. */
+
+#define HGSMI_CH_HGSMI (0x01) /* HGCMI: setup and configuration channel. */
+
+#define HGSMI_CH_VBVA (0x02) /* Graphics: VBVA. */
+#define HGSMI_CH_SEAMLESS (0x03) /* Graphics: Seamless with a single guest region. */
+#define HGSMI_CH_SEAMLESS2 (0x04) /* Graphics: Seamless with separate host windows. */
+#define HGSMI_CH_OPENGL (0x05) /* Graphics: OpenGL HW acceleration. */
+
+
+/* Dynamically allocated channel identifiers. */
+#define HGSMI_CH_STRING_FIRST (0x20) /* The first channel index to be used for string mappings (inclusive). */
+#define HGSMI_CH_STRING_LAST (0xff) /* The last channel index for string mappings (inclusive). */
+
+
+/* Check whether the channel identifier is allocated for a dynamic channel. */
+#define HGSMI_IS_DYNAMIC_CHANNEL(_channel) (((uint8_t)(_channel) & 0xE0) != 0)
+
+
+#endif /* !__HGSMIChannels_h__*/
new file mode 100644
@@ -0,0 +1,123 @@
+/** @file
+ *
+ * VBox Host Guest Shared Memory Interface (HGSMI).
+ * Host/Guest shared part: types and defines.
+ */
+
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef ___VBox_Graphics_HGSMIDefs_h
+#define ___VBox_Graphics_HGSMIDefs_h
+
+#include <VBoxVideoIPRT.h>
+
+/* HGSMI uses 32 bit offsets and sizes. */
+typedef uint32_t HGSMISIZE;
+typedef uint32_t HGSMIOFFSET;
+
+#define HGSMIOFFSET_VOID ((HGSMIOFFSET)~0)
+
+/* Describes a shared memory area buffer.
+ * Used for calculations with offsets and for buffers verification.
+ */
+typedef struct HGSMIAREA
+{
+ uint8_t *pu8Base; /* The starting address of the area. Corresponds to offset 'offBase'. */
+ HGSMIOFFSET offBase; /* The starting offset of the area. */
+ HGSMIOFFSET offLast; /* The last valid offset:
+ * offBase + cbArea - 1 - (sizeof(header) + sizeof(tail)).
+ */
+ HGSMISIZE cbArea; /* Size of the area. */
+} HGSMIAREA;
+
+
+/* The buffer description flags. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03 /* Buffer sequence type mask. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00 /* Single buffer, not a part of a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01 /* The first buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02 /* A middle buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03 /* The last buffer in a sequence. */
+
+
+#pragma pack(1)
+/* 16 bytes buffer header. */
+typedef struct HGSMIBUFFERHEADER
+{
+ uint32_t u32DataSize; /* Size of data that follows the header. */
+
+ uint8_t u8Flags; /* The buffer description: HGSMI_BUFFER_HEADER_F_* */
+
+ uint8_t u8Channel; /* The channel the data must be routed to. */
+ uint16_t u16ChannelInfo; /* Opaque to the HGSMI, used by the channel. */
+
+ union {
+ uint8_t au8Union[8]; /* Opaque placeholder to make the union 8 bytes. */
+
+ struct
+ { /* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */
+ uint32_t u32Reserved1; /* A reserved field, initialize to 0. */
+ uint32_t u32Reserved2; /* A reserved field, initialize to 0. */
+ } Buffer;
+
+ struct
+ { /* HGSMI_BUFFER_HEADER_F_SEQ_START */
+ uint32_t u32SequenceNumber; /* The sequence number, the same for all buffers in the sequence. */
+ uint32_t u32SequenceSize; /* The total size of the sequence. */
+ } SequenceStart;
+
+ struct
+ { /* HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and HGSMI_BUFFER_HEADER_F_SEQ_END */
+ uint32_t u32SequenceNumber; /* The sequence number, the same for all buffers in the sequence. */
+ uint32_t u32SequenceOffset; /* Data offset in the entire sequence. */
+ } SequenceContinue;
+ } u;
+} HGSMIBUFFERHEADER;
+
+/* 8 bytes buffer tail. */
+typedef struct HGSMIBUFFERTAIL
+{
+ uint32_t u32Reserved; /* Reserved, must be initialized to 0. */
+ uint32_t u32Checksum; /* Verifyer for the buffer header and offset and for first 4 bytes of the tail. */
+} HGSMIBUFFERTAIL;
+#pragma pack()
+
+AssertCompileSize(HGSMIBUFFERHEADER, 16);
+AssertCompileSize(HGSMIBUFFERTAIL, 8);
+
+/* The size of the array of channels. Array indexes are uint8_t. Note: the value must not be changed. */
+#define HGSMI_NUMBER_OF_CHANNELS 0x100
+
+typedef struct HGSMIENV
+{
+ /* Environment context pointer. */
+ void *pvEnv;
+
+ /* Allocate system memory. */
+ DECLCALLBACKMEMBER(void *, pfnAlloc)(void *pvEnv, HGSMISIZE cb);
+
+ /* Free system memory. */
+ DECLCALLBACKMEMBER(void, pfnFree)(void *pvEnv, void *pv);
+} HGSMIENV;
+
+#endif /* !___VBox_Graphics_HGSMIDefs_h */
new file mode 100644
@@ -0,0 +1,391 @@
+/* $Id: Modesetting.cpp 66544 2017-04-12 17:02:30Z vboxsync $ */
+/** @file
+ * VirtualBox Video driver, common code - HGSMI initialisation and helper
+ * functions.
+ */
+
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <VBoxVideoGuest.h>
+#include <VBoxVideoVBE.h>
+
+#ifndef VBOX_GUESTR3XF86MOD
+# include <VBoxVideoIPRT.h>
+#endif
+
+/**
+ * Gets the count of virtual monitors attached to the guest via an HGSMI
+ * command
+ *
+ * @returns the right count on success or 1 on failure.
+ * @param pCtx the context containing the heap to use
+ */
+DECLHIDDEN(uint32_t) VBoxHGSMIGetMonitorCount(PHGSMIGUESTCOMMANDCONTEXT pCtx)
+{
+ /* Query the configured number of displays. */
+ uint32_t cDisplays = 0;
+ VBoxQueryConfHGSMI(pCtx, VBOX_VBVA_CONF32_MONITOR_COUNT, &cDisplays);
+ // LogFunc(("cDisplays = %d\n", cDisplays));
+ if (cDisplays == 0 || cDisplays > VBOX_VIDEO_MAX_SCREENS)
+ /* Host reported some bad value. Continue in the 1 screen mode. */
+ cDisplays = 1;
+ return cDisplays;
+}
+
+
+/**
+ * Returns the size of the video RAM in bytes.
+ *
+ * @returns the size
+ */
+DECLHIDDEN(uint32_t) VBoxVideoGetVRAMSize(void)
+{
+ /** @note A 32bit read on this port returns the VRAM size. */
+ return VBVO_PORT_READ_U32(VBE_DISPI_IOPORT_DATA);
+}
+
+
+/**
+ * Check whether this hardware allows the display width to have non-multiple-
+ * of-eight values.
+ *
+ * @returns true if any width is allowed, false otherwise.
+ */
+DECLHIDDEN(bool) VBoxVideoAnyWidthAllowed(void)
+{
+ unsigned DispiId;
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX, VBE_DISPI_INDEX_ID);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_DATA, VBE_DISPI_ID_ANYX);
+ DispiId = VBVO_PORT_READ_U16(VBE_DISPI_IOPORT_DATA);
+ return (DispiId == VBE_DISPI_ID_ANYX);
+}
+
+
+/**
+ * Tell the host about how VRAM is divided up between each screen via an HGSMI
+ * command. It is acceptable to specifiy identical data for each screen if
+ * they share a single framebuffer.
+ *
+ * @returns iprt status code, either VERR_NO_MEMORY or the status returned by
+ * @a pfnFill
+ * @todo What was I thinking of with that callback function? It
+ * would be much simpler to just pass in a structure in normal
+ * memory and copy it.
+ * @param pCtx the context containing the heap to use
+ * @param u32Count the number of screens we are activating
+ * @param pfnFill a callback which initialises the VBVAINFOVIEW structures
+ * for all screens
+ * @param pvData context data for @a pfnFill
+ */
+DECLHIDDEN(int) VBoxHGSMISendViewInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t u32Count,
+ PFNHGSMIFILLVIEWINFO pfnFill,
+ void *pvData)
+{
+ int rc;
+ /* Issue the screen info command. */
+ void *p = VBoxHGSMIBufferAlloc(pCtx, sizeof(VBVAINFOVIEW) * u32Count,
+ HGSMI_CH_VBVA, VBVA_INFO_VIEW);
+ if (p)
+ {
+ VBVAINFOVIEW *pInfo = (VBVAINFOVIEW *)p;
+ rc = pfnFill(pvData, pInfo, u32Count);
+ if (RT_SUCCESS(rc))
+ VBoxHGSMIBufferSubmit (pCtx, p);
+ VBoxHGSMIBufferFree(pCtx, p);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ return rc;
+}
+
+
+/**
+ * Set a video mode using port registers. This must be done for the first
+ * screen before every HGSMI modeset and also works when HGSM is not enabled.
+ * @param cWidth the mode width
+ * @param cHeight the mode height
+ * @param cVirtWidth the mode pitch
+ * @param cBPP the colour depth of the mode
+ * @param fFlags flags for the mode. These will be or-ed with the
+ * default _ENABLED flag, so unless you are restoring
+ * a saved mode or have special requirements you can pass
+ * zero here.
+ * @param cx the horizontal panning offset
+ * @param cy the vertical panning offset
+ */
+DECLHIDDEN(void) VBoxVideoSetModeRegisters(uint16_t cWidth, uint16_t cHeight,
+ uint16_t cVirtWidth, uint16_t cBPP,
+ uint16_t fFlags, uint16_t cx,
+ uint16_t cy)
+{
+ /* set the mode characteristics */
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX, VBE_DISPI_INDEX_XRES);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_DATA, cWidth);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX, VBE_DISPI_INDEX_YRES);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_DATA, cHeight);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX,
+ VBE_DISPI_INDEX_VIRT_WIDTH);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_DATA, cVirtWidth);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX, VBE_DISPI_INDEX_BPP);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_DATA, cBPP);
+ /* enable the mode */
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX,
+ VBE_DISPI_INDEX_ENABLE);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_DATA,
+ fFlags | VBE_DISPI_ENABLED);
+ /* Panning registers */
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX,
+ VBE_DISPI_INDEX_X_OFFSET);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_DATA, cx);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX,
+ VBE_DISPI_INDEX_Y_OFFSET);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_DATA, cy);
+ /** @todo read from the port to see if the mode switch was successful */
+}
+
+
+/**
+ * Get the video mode for the first screen using the port registers. All
+ * parameters are optional
+ * @returns true if the VBE mode returned is active, false if we are in VGA
+ * mode
+ * @note If anyone else needs additional register values just extend the
+ * function with additional parameters and fix any existing callers.
+ * @param pcWidth where to store the mode width
+ * @param pcHeight where to store the mode height
+ * @param pcVirtWidth where to store the mode pitch
+ * @param pcBPP where to store the colour depth of the mode
+ * @param pfFlags where to store the flags for the mode
+ */
+DECLHIDDEN(bool) VBoxVideoGetModeRegisters(uint16_t *pcWidth, uint16_t *pcHeight,
+ uint16_t *pcVirtWidth, uint16_t *pcBPP,
+ uint16_t *pfFlags)
+{
+ uint16_t fFlags;
+
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX,
+ VBE_DISPI_INDEX_ENABLE);
+ fFlags = VBVO_PORT_READ_U16(VBE_DISPI_IOPORT_DATA);
+ if (pcWidth)
+ {
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX,
+ VBE_DISPI_INDEX_XRES);
+ *pcWidth = VBVO_PORT_READ_U16(VBE_DISPI_IOPORT_DATA);
+ }
+ if (pcHeight)
+ {
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX,
+ VBE_DISPI_INDEX_YRES);
+ *pcHeight = VBVO_PORT_READ_U16(VBE_DISPI_IOPORT_DATA);
+ }
+ if (pcVirtWidth)
+ {
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX,
+ VBE_DISPI_INDEX_VIRT_WIDTH);
+ *pcVirtWidth = VBVO_PORT_READ_U16(VBE_DISPI_IOPORT_DATA);
+ }
+ if (pcBPP)
+ {
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX,
+ VBE_DISPI_INDEX_BPP);
+ *pcBPP = VBVO_PORT_READ_U16(VBE_DISPI_IOPORT_DATA);
+ }
+ if (pfFlags)
+ *pfFlags = fFlags;
+ return RT_BOOL(fFlags & VBE_DISPI_ENABLED);
+}
+
+
+/**
+ * Disable our extended graphics mode and go back to VGA mode.
+ */
+DECLHIDDEN(void) VBoxVideoDisableVBE(void)
+{
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_INDEX,
+ VBE_DISPI_INDEX_ENABLE);
+ VBVO_PORT_WRITE_U16(VBE_DISPI_IOPORT_DATA, 0);
+}
+
+
+/**
+ * Set a video mode via an HGSMI request. The views must have been
+ * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
+ * set on the first display then it must be set first using registers.
+ * @param pCtx The context containing the heap to use.
+ * @param cDisplay the screen number
+ * @param cOriginX the horizontal displacement relative to the first screen
+ * @param cOriginY the vertical displacement relative to the first screen
+ * @param offStart the offset of the visible area of the framebuffer
+ * relative to the framebuffer start
+ * @param cbPitch the offset in bytes between the starts of two adjecent
+ * scan lines in video RAM
+ * @param cWidth the mode width
+ * @param cHeight the mode height
+ * @param cBPP the colour depth of the mode
+ * @param fFlags flags
+ */
+DECLHIDDEN(void) VBoxHGSMIProcessDisplayInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t cDisplay,
+ int32_t cOriginX,
+ int32_t cOriginY,
+ uint32_t offStart,
+ uint32_t cbPitch,
+ uint32_t cWidth,
+ uint32_t cHeight,
+ uint16_t cBPP,
+ uint16_t fFlags)
+{
+ /* Issue the screen info command. */
+ void *p = VBoxHGSMIBufferAlloc(pCtx,
+ sizeof (VBVAINFOSCREEN),
+ HGSMI_CH_VBVA,
+ VBVA_INFO_SCREEN);
+ if (!p)
+ {
+ // LogFunc(("HGSMIHeapAlloc failed\n"));
+ }
+ else
+ {
+ VBVAINFOSCREEN *pScreen = (VBVAINFOSCREEN *)p;
+
+ pScreen->u32ViewIndex = cDisplay;
+ pScreen->i32OriginX = cOriginX;
+ pScreen->i32OriginY = cOriginY;
+ pScreen->u32StartOffset = offStart;
+ pScreen->u32LineSize = cbPitch;
+ pScreen->u32Width = cWidth;
+ pScreen->u32Height = cHeight;
+ pScreen->u16BitsPerPixel = cBPP;
+ pScreen->u16Flags = fFlags;
+
+ VBoxHGSMIBufferSubmit(pCtx, p);
+
+ VBoxHGSMIBufferFree(pCtx, p);
+ }
+}
+
+
+/** Report the rectangle relative to which absolute pointer events should be
+ * expressed. This information remains valid until the next VBVA resize event
+ * for any screen, at which time it is reset to the bounding rectangle of all
+ * virtual screens.
+ * @param pCtx The context containing the heap to use.
+ * @param cOriginX Upper left X co-ordinate relative to the first screen.
+ * @param cOriginY Upper left Y co-ordinate relative to the first screen.
+ * @param cWidth Rectangle width.
+ * @param cHeight Rectangle height.
+ * @returns iprt status code.
+ * @returns VERR_NO_MEMORY HGSMI heap allocation failed.
+ */
+DECLHIDDEN(int) VBoxHGSMIUpdateInputMapping(PHGSMIGUESTCOMMANDCONTEXT pCtx, int32_t cOriginX, int32_t cOriginY,
+ uint32_t cWidth, uint32_t cHeight)
+{
+ int rc = VINF_SUCCESS;
+ VBVAREPORTINPUTMAPPING *p;
+ // Log(("%s: cOriginX=%d, cOriginY=%d, cWidth=%u, cHeight=%u\n", __PRETTY_FUNCTION__, (int)cOriginX, (int)cOriginX,
+ // (unsigned)cWidth, (unsigned)cHeight));
+
+ /* Allocate the IO buffer. */
+ p = (VBVAREPORTINPUTMAPPING *)VBoxHGSMIBufferAlloc(pCtx, sizeof(VBVAREPORTINPUTMAPPING), HGSMI_CH_VBVA,
+ VBVA_REPORT_INPUT_MAPPING);
+ if (p)
+ {
+ /* Prepare data to be sent to the host. */
+ p->x = cOriginX;
+ p->y = cOriginY;
+ p->cx = cWidth;
+ p->cy = cHeight;
+ rc = VBoxHGSMIBufferSubmit(pCtx, p);
+ /* Free the IO buffer. */
+ VBoxHGSMIBufferFree(pCtx, p);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+ // LogFunc(("rc = %d\n", rc));
+ return rc;
+}
+
+
+/**
+ * Get most recent video mode hints.
+ * @param pCtx the context containing the heap to use
+ * @param cScreens the number of screens to query hints for, starting at 0.
+ * @param paHints array of VBVAMODEHINT structures for receiving the hints.
+ * @returns iprt status code
+ * @returns VERR_NO_MEMORY HGSMI heap allocation failed.
+ * @returns VERR_NOT_SUPPORTED Host does not support this command.
+ */
+DECLHIDDEN(int) VBoxHGSMIGetModeHints(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ unsigned cScreens, VBVAMODEHINT *paHints)
+{
+ int rc;
+ void *p;
+
+ AssertPtr(paHints);
+ if (!VALID_PTR(paHints))
+ return VERR_INVALID_POINTER;
+
+ p = VBoxHGSMIBufferAlloc(pCtx, sizeof(VBVAQUERYMODEHINTS)
+ + cScreens * sizeof(VBVAMODEHINT),
+ HGSMI_CH_VBVA, VBVA_QUERY_MODE_HINTS);
+ if (!p)
+ {
+ // LogFunc(("HGSMIHeapAlloc failed\n"));
+ return VERR_NO_MEMORY;
+ }
+ else
+ {
+ VBVAQUERYMODEHINTS *pQuery = (VBVAQUERYMODEHINTS *)p;
+
+ pQuery->cHintsQueried = cScreens;
+ pQuery->cbHintStructureGuest = sizeof(VBVAMODEHINT);
+ pQuery->rc = VERR_NOT_SUPPORTED;
+
+ VBoxHGSMIBufferSubmit(pCtx, p);
+ rc = pQuery->rc;
+ if (RT_SUCCESS(rc))
+ memcpy(paHints, ((uint8_t *)p) + sizeof(VBVAQUERYMODEHINTS),
+ cScreens * sizeof(VBVAMODEHINT));
+
+ VBoxHGSMIBufferFree(pCtx, p);
+ }
+ return rc;
+}
+
+
+/**
+ * Query the supported flags in VBVAINFOSCREEN::u16Flags.
+ *
+ * @returns The mask of VBVA_SCREEN_F_* flags or 0 if host does not support the request.
+ * @param pCtx the context containing the heap to use
+ */
+DECLHIDDEN(uint16_t) VBoxHGSMIGetScreenFlags(PHGSMIGUESTCOMMANDCONTEXT pCtx)
+{
+ uint32_t u32Flags = 0;
+ int rc = VBoxQueryConfHGSMIDef(pCtx, VBOX_VBVA_CONF32_SCREEN_FLAGS, 0, &u32Flags);
+ // LogFunc(("u32Flags = 0x%x rc %Rrc\n", u32Flags, rc));
+ if (RT_FAILURE(rc))
+ u32Flags = 0;
+ return (uint16_t)u32Flags;
+}
new file mode 100644
@@ -0,0 +1,395 @@
+/* $Id: VBVABase.cpp 66544 2017-04-12 17:02:30Z vboxsync $ */
+/** @file
+ * VirtualBox Video driver, common code - VBVA initialisation and helper
+ * functions.
+ */
+
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <VBoxVideoGuest.h>
+#include <VBoxVideoIPRT.h>
+
+/*
+ * There is a hardware ring buffer in the graphics device video RAM, formerly
+ * in the VBox VMMDev PCI memory space.
+ * All graphics commands go there serialized by VBoxVBVABufferBeginUpdate.
+ * and vboxHwBufferEndUpdate.
+ *
+ * off32Free is writing position. off32Data is reading position.
+ * off32Free == off32Data means buffer is empty.
+ * There must be always gap between off32Data and off32Free when data
+ * are in the buffer.
+ * Guest only changes off32Free, host changes off32Data.
+ */
+
+/* Forward declarations of internal functions. */
+static void vboxHwBufferFlush(PHGSMIGUESTCOMMANDCONTEXT pCtx);
+static void vboxHwBufferPlaceDataAt(PVBVABUFFERCONTEXT pCtx, const void *p,
+ uint32_t cb, uint32_t offset);
+static bool vboxHwBufferWrite(PVBVABUFFERCONTEXT pCtx,
+ PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
+ const void *p, uint32_t cb);
+
+
+static bool vboxVBVAInformHost(PVBVABUFFERCONTEXT pCtx,
+ PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
+ int32_t cScreen, bool bEnable)
+{
+ bool bRc = false;
+
+#if 0 /* All callers check this */
+ if (ppdev->bHGSMISupported)
+#endif
+ {
+ void *p = VBoxHGSMIBufferAlloc(pHGSMICtx,
+ sizeof (VBVAENABLE_EX),
+ HGSMI_CH_VBVA,
+ VBVA_ENABLE);
+ if (!p)
+ {
+ // LogFunc(("HGSMIHeapAlloc failed\n"));
+ }
+ else
+ {
+ VBVAENABLE_EX *pEnable = (VBVAENABLE_EX *)p;
+
+ pEnable->Base.u32Flags = bEnable? VBVA_F_ENABLE: VBVA_F_DISABLE;
+ pEnable->Base.u32Offset = pCtx->offVRAMBuffer;
+ pEnable->Base.i32Result = VERR_NOT_SUPPORTED;
+ if (cScreen >= 0)
+ {
+ pEnable->Base.u32Flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
+ pEnable->u32ScreenId = cScreen;
+ }
+
+ VBoxHGSMIBufferSubmit(pHGSMICtx, p);
+
+ if (bEnable)
+ {
+ bRc = RT_SUCCESS(pEnable->Base.i32Result);
+ }
+ else
+ {
+ bRc = true;
+ }
+
+ VBoxHGSMIBufferFree(pHGSMICtx, p);
+ }
+ }
+
+ return bRc;
+}
+
+/*
+ * Public hardware buffer methods.
+ */
+DECLHIDDEN(bool) VBoxVBVAEnable(PVBVABUFFERCONTEXT pCtx,
+ PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
+ VBVABUFFER *pVBVA, int32_t cScreen)
+{
+ bool bRc = false;
+
+ // LogFlowFunc(("pVBVA %p\n", pVBVA));
+
+#if 0 /* All callers check this */
+ if (ppdev->bHGSMISupported)
+#endif
+ {
+ // LogFunc(("pVBVA %p vbva off 0x%x\n", pVBVA, pCtx->offVRAMBuffer));
+
+ pVBVA->hostFlags.u32HostEvents = 0;
+ pVBVA->hostFlags.u32SupportedOrders = 0;
+ pVBVA->off32Data = 0;
+ pVBVA->off32Free = 0;
+ memset(pVBVA->aRecords, 0, sizeof (pVBVA->aRecords));
+ pVBVA->indexRecordFirst = 0;
+ pVBVA->indexRecordFree = 0;
+ pVBVA->cbPartialWriteThreshold = 256;
+ pVBVA->cbData = pCtx->cbBuffer - sizeof (VBVABUFFER) + sizeof (pVBVA->au8Data);
+
+ pCtx->fHwBufferOverflow = false;
+ pCtx->pRecord = NULL;
+ pCtx->pVBVA = pVBVA;
+
+ bRc = vboxVBVAInformHost(pCtx, pHGSMICtx, cScreen, true);
+ }
+
+ if (!bRc)
+ {
+ VBoxVBVADisable(pCtx, pHGSMICtx, cScreen);
+ }
+
+ return bRc;
+}
+
+DECLHIDDEN(void) VBoxVBVADisable(PVBVABUFFERCONTEXT pCtx,
+ PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
+ int32_t cScreen)
+{
+ // LogFlowFunc(("\n"));
+
+ pCtx->fHwBufferOverflow = false;
+ pCtx->pRecord = NULL;
+ pCtx->pVBVA = NULL;
+
+ vboxVBVAInformHost(pCtx, pHGSMICtx, cScreen, false);
+
+ return;
+}
+
+DECLHIDDEN(bool) VBoxVBVABufferBeginUpdate(PVBVABUFFERCONTEXT pCtx,
+ PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
+{
+ bool bRc = false;
+
+ // LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
+
+ if ( pCtx->pVBVA
+ && (pCtx->pVBVA->hostFlags.u32HostEvents & VBVA_F_MODE_ENABLED))
+ {
+ uint32_t indexRecordNext;
+
+ Assert(!pCtx->fHwBufferOverflow);
+ Assert(pCtx->pRecord == NULL);
+
+ indexRecordNext = (pCtx->pVBVA->indexRecordFree + 1) % VBVA_MAX_RECORDS;
+
+ if (indexRecordNext == pCtx->pVBVA->indexRecordFirst)
+ {
+ /* All slots in the records queue are used. */
+ vboxHwBufferFlush (pHGSMICtx);
+ }
+
+ if (indexRecordNext == pCtx->pVBVA->indexRecordFirst)
+ {
+ /* Even after flush there is no place. Fail the request. */
+ // LogFunc(("no space in the queue of records!!! first %d, last %d\n",
+ // pCtx->pVBVA->indexRecordFirst, pCtx->pVBVA->indexRecordFree));
+ }
+ else
+ {
+ /* Initialize the record. */
+ VBVARECORD *pRecord = &pCtx->pVBVA->aRecords[pCtx->pVBVA->indexRecordFree];
+
+ pRecord->cbRecord = VBVA_F_RECORD_PARTIAL;
+
+ pCtx->pVBVA->indexRecordFree = indexRecordNext;
+
+ // LogFunc(("indexRecordNext = %d\n", indexRecordNext));
+
+ /* Remember which record we are using. */
+ pCtx->pRecord = pRecord;
+
+ bRc = true;
+ }
+ }
+
+ return bRc;
+}
+
+DECLHIDDEN(void) VBoxVBVABufferEndUpdate(PVBVABUFFERCONTEXT pCtx)
+{
+ VBVARECORD *pRecord;
+
+ // LogFunc(("\n"));
+
+ Assert(pCtx->pVBVA);
+
+ pRecord = pCtx->pRecord;
+ Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
+
+ /* Mark the record completed. */
+ pRecord->cbRecord &= ~VBVA_F_RECORD_PARTIAL;
+
+ pCtx->fHwBufferOverflow = false;
+ pCtx->pRecord = NULL;
+
+ return;
+}
+
+/*
+ * Private operations.
+ */
+static uint32_t vboxHwBufferAvail (const VBVABUFFER *pVBVA)
+{
+ int32_t i32Diff = pVBVA->off32Data - pVBVA->off32Free;
+
+ return i32Diff > 0? i32Diff: pVBVA->cbData + i32Diff;
+}
+
+static void vboxHwBufferFlush(PHGSMIGUESTCOMMANDCONTEXT pCtx)
+{
+ /* Issue the flush command. */
+ void *p = VBoxHGSMIBufferAlloc(pCtx,
+ sizeof (VBVAFLUSH),
+ HGSMI_CH_VBVA,
+ VBVA_FLUSH);
+ if (!p)
+ {
+ // LogFunc(("HGSMIHeapAlloc failed\n"));
+ }
+ else
+ {
+ VBVAFLUSH *pFlush = (VBVAFLUSH *)p;
+
+ pFlush->u32Reserved = 0;
+
+ VBoxHGSMIBufferSubmit(pCtx, p);
+
+ VBoxHGSMIBufferFree(pCtx, p);
+ }
+
+ return;
+}
+
+static void vboxHwBufferPlaceDataAt(PVBVABUFFERCONTEXT pCtx, const void *p,
+ uint32_t cb, uint32_t offset)
+{
+ VBVABUFFER *pVBVA = pCtx->pVBVA;
+ uint32_t u32BytesTillBoundary = pVBVA->cbData - offset;
+ uint8_t *dst = &pVBVA->au8Data[offset];
+ int32_t i32Diff = cb - u32BytesTillBoundary;
+
+ if (i32Diff <= 0)
+ {
+ /* Chunk will not cross buffer boundary. */
+ memcpy (dst, p, cb);
+ }
+ else
+ {
+ /* Chunk crosses buffer boundary. */
+ memcpy (dst, p, u32BytesTillBoundary);
+ memcpy (&pVBVA->au8Data[0], (uint8_t *)p + u32BytesTillBoundary, i32Diff);
+ }
+
+ return;
+}
+
+static bool vboxHwBufferWrite(PVBVABUFFERCONTEXT pCtx,
+ PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
+ const void *p, uint32_t cb)
+{
+ VBVARECORD *pRecord;
+ uint32_t cbHwBufferAvail;
+
+ uint32_t cbWritten = 0;
+
+ VBVABUFFER *pVBVA = pCtx->pVBVA;
+ Assert(pVBVA);
+
+ if (!pVBVA || pCtx->fHwBufferOverflow)
+ {
+ return false;
+ }
+
+ Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
+
+ pRecord = pCtx->pRecord;
+ Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
+
+ // LogFunc(("%d\n", cb));
+
+ cbHwBufferAvail = vboxHwBufferAvail (pVBVA);
+
+ while (cb > 0)
+ {
+ uint32_t cbChunk = cb;
+
+ // LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
+ // pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
+
+ if (cbChunk >= cbHwBufferAvail)
+ {
+ // LogFunc(("1) avail %d, chunk %d\n", cbHwBufferAvail, cbChunk));
+
+ vboxHwBufferFlush (pHGSMICtx);
+
+ cbHwBufferAvail = vboxHwBufferAvail (pVBVA);
+
+ if (cbChunk >= cbHwBufferAvail)
+ {
+ // LogFunc(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
+ // cb, cbHwBufferAvail));
+
+ if (cbHwBufferAvail <= pVBVA->cbPartialWriteThreshold)
+ {
+ // LogFunc(("Buffer overflow!!!\n"));
+ pCtx->fHwBufferOverflow = true;
+ Assert(false);
+ return false;
+ }
+
+ cbChunk = cbHwBufferAvail - pVBVA->cbPartialWriteThreshold;
+ }
+ }
+
+ Assert(cbChunk <= cb);
+ Assert(cbChunk <= vboxHwBufferAvail (pVBVA));
+
+ vboxHwBufferPlaceDataAt (pCtx, (uint8_t *)p + cbWritten, cbChunk, pVBVA->off32Free);
+
+ pVBVA->off32Free = (pVBVA->off32Free + cbChunk) % pVBVA->cbData;
+ pRecord->cbRecord += cbChunk;
+ cbHwBufferAvail -= cbChunk;
+
+ cb -= cbChunk;
+ cbWritten += cbChunk;
+ }
+
+ return true;
+}
+
+/*
+ * Public writer to the hardware buffer.
+ */
+DECLHIDDEN(bool) VBoxVBVAWrite(PVBVABUFFERCONTEXT pCtx,
+ PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
+ const void *pv, uint32_t cb)
+{
+ return vboxHwBufferWrite (pCtx, pHGSMICtx, pv, cb);
+}
+
+DECLHIDDEN(bool) VBoxVBVAOrderSupported(PVBVABUFFERCONTEXT pCtx, unsigned code)
+{
+ VBVABUFFER *pVBVA = pCtx->pVBVA;
+
+ if (!pVBVA)
+ {
+ return false;
+ }
+
+ if (pVBVA->hostFlags.u32SupportedOrders & (1 << code))
+ {
+ return true;
+ }
+
+ return false;
+}
+
+DECLHIDDEN(void) VBoxVBVASetupBufferContext(PVBVABUFFERCONTEXT pCtx,
+ uint32_t offVRAMBuffer,
+ uint32_t cbBuffer)
+{
+ pCtx->offVRAMBuffer = offVRAMBuffer;
+ pCtx->cbBuffer = cbBuffer;
+}
new file mode 100644
@@ -0,0 +1,2105 @@
+/** @file
+ * VirtualBox Video interface.
+ */
+
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ */
+
+#ifndef ___VBox_Graphics_VBoxVideo_h
+#define ___VBox_Graphics_VBoxVideo_h
+
+#include <VBoxVideoIPRT.h>
+
+/* this should be in sync with monitorCount <xsd:maxInclusive value="64"/> in src/VBox/Main/xml/VirtualBox-settings-common.xsd */
+#define VBOX_VIDEO_MAX_SCREENS 64
+
+/*
+ * The last 4096 bytes of the guest VRAM contains the generic info for all
+ * DualView chunks: sizes and offsets of chunks. This is filled by miniport.
+ *
+ * Last 4096 bytes of each chunk contain chunk specific data: framebuffer info,
+ * etc. This is used exclusively by the corresponding instance of a display driver.
+ *
+ * The VRAM layout:
+ * Last 4096 bytes - Adapter information area.
+ * 4096 bytes aligned miniport heap (value specified in the config rouded up).
+ * Slack - what left after dividing the VRAM.
+ * 4096 bytes aligned framebuffers:
+ * last 4096 bytes of each framebuffer is the display information area.
+ *
+ * The Virtual Graphics Adapter information in the guest VRAM is stored by the
+ * guest video driver using structures prepended by VBOXVIDEOINFOHDR.
+ *
+ * When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO
+ * the host starts to process the info. The first element at the start of
+ * the 4096 bytes region should be normally be a LINK that points to
+ * actual information chain. That way the guest driver can have some
+ * fixed layout of the information memory block and just rewrite
+ * the link to point to relevant memory chain.
+ *
+ * The processing stops at the END element.
+ *
+ * The host can access the memory only when the port IO is processed.
+ * All data that will be needed later must be copied from these 4096 bytes.
+ * But other VRAM can be used by host until the mode is disabled.
+ *
+ * The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO
+ * to disable the mode.
+ *
+ * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
+ * from the host and issue commands to the host.
+ *
+ * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
+ * following operations with the VBE data register can be performed:
+ *
+ * Operation Result
+ * write 16 bit value NOP
+ * read 16 bit value count of monitors
+ * write 32 bit value sets the vbox command value and the command processed by the host
+ * read 32 bit value result of the last vbox command is returned
+ */
+
+#define VBOX_VIDEO_PRIMARY_SCREEN 0
+#define VBOX_VIDEO_NO_SCREEN ~0
+
+/**
+ * VBVA command header.
+ *
+ * @todo Where does this fit in?
+ */
+typedef struct VBVACMDHDR
+{
+ /** Coordinates of affected rectangle. */
+ int16_t x;
+ int16_t y;
+ uint16_t w;
+ uint16_t h;
+} VBVACMDHDR;
+AssertCompileSize(VBVACMDHDR, 8);
+
+/** @name VBVA ring defines.
+ *
+ * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
+ * data. For example big bitmaps which do not fit to the buffer.
+ *
+ * Guest starts writing to the buffer by initializing a record entry in the
+ * aRecords queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
+ * written. As data is written to the ring buffer, the guest increases off32End
+ * for the record.
+ *
+ * The host reads the aRecords on flushes and processes all completed records.
+ * When host encounters situation when only a partial record presents and
+ * cbRecord & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
+ * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
+ * off32Head. After that on each flush the host continues fetching the data
+ * until the record is completed.
+ *
+ */
+#define VBVA_RING_BUFFER_SIZE (_4M - _1K)
+#define VBVA_RING_BUFFER_THRESHOLD (4 * _1K)
+
+#define VBVA_MAX_RECORDS (64)
+
+#define VBVA_F_MODE_ENABLED UINT32_C(0x00000001)
+#define VBVA_F_MODE_VRDP UINT32_C(0x00000002)
+#define VBVA_F_MODE_VRDP_RESET UINT32_C(0x00000004)
+#define VBVA_F_MODE_VRDP_ORDER_MASK UINT32_C(0x00000008)
+
+#define VBVA_F_STATE_PROCESSING UINT32_C(0x00010000)
+
+#define VBVA_F_RECORD_PARTIAL UINT32_C(0x80000000)
+
+/**
+ * VBVA record.
+ */
+typedef struct VBVARECORD
+{
+ /** The length of the record. Changed by guest. */
+ uint32_t cbRecord;
+} VBVARECORD;
+AssertCompileSize(VBVARECORD, 4);
+
+/* The size of the information. */
+/*
+ * The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of the
+ * runtime heapsimple API. Use minimum 2 pages here, because the info area also may
+ * contain other data (for example HGSMIHOSTFLAGS structure).
+ */
+#ifndef VBOX_XPDM_MINIPORT
+# define VBVA_ADAPTER_INFORMATION_SIZE (64*_1K)
+#else
+#define VBVA_ADAPTER_INFORMATION_SIZE (16*_1K)
+#define VBVA_DISPLAY_INFORMATION_SIZE (64*_1K)
+#endif
+#define VBVA_MIN_BUFFER_SIZE (64*_1K)
+
+
+/* The value for port IO to let the adapter to interpret the adapter memory. */
+#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF
+
+/* The value for port IO to let the adapter to interpret the adapter memory. */
+#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000
+
+/* The value for port IO to let the adapter to interpret the display memory.
+ * The display number is encoded in low 16 bits.
+ */
+#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000
+
+
+/* The end of the information. */
+#define VBOX_VIDEO_INFO_TYPE_END 0
+/* Instructs the host to fetch the next VBOXVIDEOINFOHDR at the given offset of VRAM. */
+#define VBOX_VIDEO_INFO_TYPE_LINK 1
+/* Information about a display memory position. */
+#define VBOX_VIDEO_INFO_TYPE_DISPLAY 2
+/* Information about a screen. */
+#define VBOX_VIDEO_INFO_TYPE_SCREEN 3
+/* Information about host notifications for the driver. */
+#define VBOX_VIDEO_INFO_TYPE_HOST_EVENTS 4
+/* Information about non-volatile guest VRAM heap. */
+#define VBOX_VIDEO_INFO_TYPE_NV_HEAP 5
+/* VBVA enable/disable. */
+#define VBOX_VIDEO_INFO_TYPE_VBVA_STATUS 6
+/* VBVA flush. */
+#define VBOX_VIDEO_INFO_TYPE_VBVA_FLUSH 7
+/* Query configuration value. */
+#define VBOX_VIDEO_INFO_TYPE_QUERY_CONF32 8
+
+
+#pragma pack(1)
+typedef struct VBOXVIDEOINFOHDR
+{
+ uint8_t u8Type;
+ uint8_t u8Reserved;
+ uint16_t u16Length;
+} VBOXVIDEOINFOHDR;
+
+
+typedef struct VBOXVIDEOINFOLINK
+{
+ /* Relative offset in VRAM */
+ int32_t i32Offset;
+} VBOXVIDEOINFOLINK;
+
+
+/* Resides in adapter info memory. Describes a display VRAM chunk. */
+typedef struct VBOXVIDEOINFODISPLAY
+{
+ /* Index of the framebuffer assigned by guest. */
+ uint32_t u32Index;
+
+ /* Absolute offset in VRAM of the framebuffer to be displayed on the monitor. */
+ uint32_t u32Offset;
+
+ /* The size of the memory that can be used for the screen. */
+ uint32_t u32FramebufferSize;
+
+ /* The size of the memory that is used for the Display information.
+ * The information is at u32Offset + u32FramebufferSize
+ */
+ uint32_t u32InformationSize;
+
+} VBOXVIDEOINFODISPLAY;
+
+
+/* Resides in display info area, describes the current video mode. */
+#define VBOX_VIDEO_INFO_SCREEN_F_NONE 0x00
+#define VBOX_VIDEO_INFO_SCREEN_F_ACTIVE 0x01
+
+typedef struct VBOXVIDEOINFOSCREEN
+{
+ /* Physical X origin relative to the primary screen. */
+ int32_t xOrigin;
+
+ /* Physical Y origin relative to the primary screen. */
+ int32_t yOrigin;
+
+ /* The scan line size in bytes. */
+ uint32_t u32LineSize;
+
+ /* Width of the screen. */
+ uint16_t u16Width;
+
+ /* Height of the screen. */
+ uint16_t u16Height;
+
+ /* Color depth. */
+ uint8_t bitsPerPixel;
+
+ /* VBOX_VIDEO_INFO_SCREEN_F_* */
+ uint8_t u8Flags;
+} VBOXVIDEOINFOSCREEN;
+
+/* The guest initializes the structure to 0. The positions of the structure in the
+ * display info area must not be changed, host will update the structure. Guest checks
+ * the events and modifies the structure as a response to host.
+ */
+#define VBOX_VIDEO_INFO_HOST_EVENTS_F_NONE 0x00000000
+#define VBOX_VIDEO_INFO_HOST_EVENTS_F_VRDP_RESET 0x00000080
+
+typedef struct VBOXVIDEOINFOHOSTEVENTS
+{
+ /* Host events. */
+ uint32_t fu32Events;
+} VBOXVIDEOINFOHOSTEVENTS;
+
+/* Resides in adapter info memory. Describes the non-volatile VRAM heap. */
+typedef struct VBOXVIDEOINFONVHEAP
+{
+ /* Absolute offset in VRAM of the start of the heap. */
+ uint32_t u32HeapOffset;
+
+ /* The size of the heap. */
+ uint32_t u32HeapSize;
+
+} VBOXVIDEOINFONVHEAP;
+
+/* Display information area. */
+typedef struct VBOXVIDEOINFOVBVASTATUS
+{
+ /* Absolute offset in VRAM of the start of the VBVA QUEUE. 0 to disable VBVA. */
+ uint32_t u32QueueOffset;
+
+ /* The size of the VBVA QUEUE. 0 to disable VBVA. */
+ uint32_t u32QueueSize;
+
+} VBOXVIDEOINFOVBVASTATUS;
+
+typedef struct VBOXVIDEOINFOVBVAFLUSH
+{
+ uint32_t u32DataStart;
+
+ uint32_t u32DataEnd;
+
+} VBOXVIDEOINFOVBVAFLUSH;
+
+#define VBOX_VIDEO_QCI32_MONITOR_COUNT 0
+#define VBOX_VIDEO_QCI32_OFFSCREEN_HEAP_SIZE 1
+
+typedef struct VBOXVIDEOINFOQUERYCONF32
+{
+ uint32_t u32Index;
+
+ uint32_t u32Value;
+
+} VBOXVIDEOINFOQUERYCONF32;
+#pragma pack()
+
+#ifdef VBOX_WITH_VIDEOHWACCEL
+#pragma pack(1)
+
+#define VBOXVHWA_VERSION_MAJ 0
+#define VBOXVHWA_VERSION_MIN 0
+#define VBOXVHWA_VERSION_BLD 6
+#define VBOXVHWA_VERSION_RSV 0
+
+typedef enum
+{
+ VBOXVHWACMD_TYPE_SURF_CANCREATE = 1,
+ VBOXVHWACMD_TYPE_SURF_CREATE,
+ VBOXVHWACMD_TYPE_SURF_DESTROY,
+ VBOXVHWACMD_TYPE_SURF_LOCK,
+ VBOXVHWACMD_TYPE_SURF_UNLOCK,
+ VBOXVHWACMD_TYPE_SURF_BLT,
+ VBOXVHWACMD_TYPE_SURF_FLIP,
+ VBOXVHWACMD_TYPE_SURF_OVERLAY_UPDATE,
+ VBOXVHWACMD_TYPE_SURF_OVERLAY_SETPOSITION,
+ VBOXVHWACMD_TYPE_SURF_COLORKEY_SET,
+ VBOXVHWACMD_TYPE_QUERY_INFO1,
+ VBOXVHWACMD_TYPE_QUERY_INFO2,
+ VBOXVHWACMD_TYPE_ENABLE,
+ VBOXVHWACMD_TYPE_DISABLE,
+ VBOXVHWACMD_TYPE_HH_CONSTRUCT,
+ VBOXVHWACMD_TYPE_HH_RESET
+#ifdef VBOX_WITH_WDDM
+ , VBOXVHWACMD_TYPE_SURF_GETINFO
+ , VBOXVHWACMD_TYPE_SURF_COLORFILL
+#endif
+ , VBOXVHWACMD_TYPE_HH_DISABLE
+ , VBOXVHWACMD_TYPE_HH_ENABLE
+ , VBOXVHWACMD_TYPE_HH_SAVESTATE_SAVEBEGIN
+ , VBOXVHWACMD_TYPE_HH_SAVESTATE_SAVEEND
+ , VBOXVHWACMD_TYPE_HH_SAVESTATE_SAVEPERFORM
+ , VBOXVHWACMD_TYPE_HH_SAVESTATE_LOADPERFORM
+} VBOXVHWACMD_TYPE;
+
+/* the command processing was asynch, set by the host to indicate asynch command completion
+ * must not be cleared once set, the command completion is performed by issuing a host->guest completion command
+ * while keeping this flag unchanged */
+#define VBOXVHWACMD_FLAG_HG_ASYNCH 0x00010000
+/* asynch completion is performed by issuing the event */
+#define VBOXVHWACMD_FLAG_GH_ASYNCH_EVENT 0x00000001
+/* issue interrupt on asynch completion */
+#define VBOXVHWACMD_FLAG_GH_ASYNCH_IRQ 0x00000002
+/* guest does not do any op on completion of this command, the host may copy the command and indicate that it does not need the command anymore
+ * by setting the VBOXVHWACMD_FLAG_HG_ASYNCH_RETURNED flag */
+#define VBOXVHWACMD_FLAG_GH_ASYNCH_NOCOMPLETION 0x00000004
+/* the host has copied the VBOXVHWACMD_FLAG_GH_ASYNCH_NOCOMPLETION command and returned it to the guest */
+#define VBOXVHWACMD_FLAG_HG_ASYNCH_RETURNED 0x00020000
+/* this is the host->host cmd, i.e. a configuration command posted by the host to the framebuffer */
+#define VBOXVHWACMD_FLAG_HH_CMD 0x10000000
+
+typedef struct VBOXVHWACMD
+{
+ VBOXVHWACMD_TYPE enmCmd; /* command type */
+ volatile int32_t rc; /* command result */
+ int32_t iDisplay; /* display index */
+ volatile int32_t Flags; /* ored VBOXVHWACMD_FLAG_xxx values */
+ uint64_t GuestVBVAReserved1; /* field internally used by the guest VBVA cmd handling, must NOT be modified by clients */
+ uint64_t GuestVBVAReserved2; /* field internally used by the guest VBVA cmd handling, must NOT be modified by clients */
+ volatile uint32_t cRefs;
+ int32_t Reserved;
+ union
+ {
+ struct VBOXVHWACMD *pNext;
+ uint32_t offNext;
+ uint64_t Data; /* the body is 64-bit aligned */
+ } u;
+ char body[1];
+} VBOXVHWACMD;
+
+#define VBOXVHWACMD_HEADSIZE() (RT_OFFSETOF(VBOXVHWACMD, body))
+#define VBOXVHWACMD_SIZE_FROMBODYSIZE(_s) (VBOXVHWACMD_HEADSIZE() + (_s))
+#define VBOXVHWACMD_SIZE(_tCmd) (VBOXVHWACMD_SIZE_FROMBODYSIZE(sizeof(_tCmd)))
+typedef unsigned int VBOXVHWACMD_LENGTH;
+typedef uint64_t VBOXVHWA_SURFHANDLE;
+#define VBOXVHWA_SURFHANDLE_INVALID 0ULL
+#define VBOXVHWACMD_BODY(_p, _t) ((_t*)(_p)->body)
+#define VBOXVHWACMD_HEAD(_pb) ((VBOXVHWACMD*)((uint8_t *)(_pb) - RT_OFFSETOF(VBOXVHWACMD, body)))
+
+typedef struct VBOXVHWA_RECTL
+{
+ int32_t left;
+ int32_t top;
+ int32_t right;
+ int32_t bottom;
+} VBOXVHWA_RECTL;
+
+typedef struct VBOXVHWA_COLORKEY
+{
+ uint32_t low;
+ uint32_t high;
+} VBOXVHWA_COLORKEY;
+
+typedef struct VBOXVHWA_PIXELFORMAT
+{
+ uint32_t flags;
+ uint32_t fourCC;
+ union
+ {
+ uint32_t rgbBitCount;
+ uint32_t yuvBitCount;
+ } c;
+
+ union
+ {
+ uint32_t rgbRBitMask;
+ uint32_t yuvYBitMask;
+ } m1;
+
+ union
+ {
+ uint32_t rgbGBitMask;
+ uint32_t yuvUBitMask;
+ } m2;
+
+ union
+ {
+ uint32_t rgbBBitMask;
+ uint32_t yuvVBitMask;
+ } m3;
+
+ union
+ {
+ uint32_t rgbABitMask;
+ } m4;
+
+ uint32_t Reserved;
+} VBOXVHWA_PIXELFORMAT;
+
+typedef struct VBOXVHWA_SURFACEDESC
+{
+ uint32_t flags;
+ uint32_t height;
+ uint32_t width;
+ uint32_t pitch;
+ uint32_t sizeX;
+ uint32_t sizeY;
+ uint32_t cBackBuffers;
+ uint32_t Reserved;
+ VBOXVHWA_COLORKEY DstOverlayCK;
+ VBOXVHWA_COLORKEY DstBltCK;
+ VBOXVHWA_COLORKEY SrcOverlayCK;
+ VBOXVHWA_COLORKEY SrcBltCK;
+ VBOXVHWA_PIXELFORMAT PixelFormat;
+ uint32_t surfCaps;
+ uint32_t Reserved2;
+ VBOXVHWA_SURFHANDLE hSurf;
+ uint64_t offSurface;
+} VBOXVHWA_SURFACEDESC;
+
+typedef struct VBOXVHWA_BLTFX
+{
+ uint32_t flags;
+ uint32_t rop;
+ uint32_t rotationOp;
+ uint32_t rotation;
+ uint32_t fillColor;
+ uint32_t Reserved;
+ VBOXVHWA_COLORKEY DstCK;
+ VBOXVHWA_COLORKEY SrcCK;
+} VBOXVHWA_BLTFX;
+
+typedef struct VBOXVHWA_OVERLAYFX
+{
+ uint32_t flags;
+ uint32_t Reserved1;
+ uint32_t fxFlags;
+ uint32_t Reserved2;
+ VBOXVHWA_COLORKEY DstCK;
+ VBOXVHWA_COLORKEY SrcCK;
+} VBOXVHWA_OVERLAYFX;
+
+#define VBOXVHWA_CAPS_BLT 0x00000040
+#define VBOXVHWA_CAPS_BLTCOLORFILL 0x04000000
+#define VBOXVHWA_CAPS_BLTFOURCC 0x00000100
+#define VBOXVHWA_CAPS_BLTSTRETCH 0x00000200
+#define VBOXVHWA_CAPS_BLTQUEUE 0x00000080
+
+#define VBOXVHWA_CAPS_OVERLAY 0x00000800
+#define VBOXVHWA_CAPS_OVERLAYFOURCC 0x00002000
+#define VBOXVHWA_CAPS_OVERLAYSTRETCH 0x00004000
+#define VBOXVHWA_CAPS_OVERLAYCANTCLIP 0x00001000
+
+#define VBOXVHWA_CAPS_COLORKEY 0x00400000
+#define VBOXVHWA_CAPS_COLORKEYHWASSIST 0x01000000
+
+#define VBOXVHWA_SCAPS_BACKBUFFER 0x00000004
+#define VBOXVHWA_SCAPS_COMPLEX 0x00000008
+#define VBOXVHWA_SCAPS_FLIP 0x00000010
+#define VBOXVHWA_SCAPS_FRONTBUFFER 0x00000020
+#define VBOXVHWA_SCAPS_OFFSCREENPLAIN 0x00000040
+#define VBOXVHWA_SCAPS_OVERLAY 0x00000080
+#define VBOXVHWA_SCAPS_PRIMARYSURFACE 0x00000200
+#define VBOXVHWA_SCAPS_SYSTEMMEMORY 0x00000800
+#define VBOXVHWA_SCAPS_VIDEOMEMORY 0x00004000
+#define VBOXVHWA_SCAPS_VISIBLE 0x00008000
+#define VBOXVHWA_SCAPS_LOCALVIDMEM 0x10000000
+
+#define VBOXVHWA_PF_PALETTEINDEXED8 0x00000020
+#define VBOXVHWA_PF_RGB 0x00000040
+#define VBOXVHWA_PF_RGBTOYUV 0x00000100
+#define VBOXVHWA_PF_YUV 0x00000200
+#define VBOXVHWA_PF_FOURCC 0x00000004
+
+#define VBOXVHWA_LOCK_DISCARDCONTENTS 0x00002000
+
+#define VBOXVHWA_CFG_ENABLED 0x00000001
+
+#define VBOXVHWA_SD_BACKBUFFERCOUNT 0x00000020
+#define VBOXVHWA_SD_CAPS 0x00000001
+#define VBOXVHWA_SD_CKDESTBLT 0x00004000
+#define VBOXVHWA_SD_CKDESTOVERLAY 0x00002000
+#define VBOXVHWA_SD_CKSRCBLT 0x00010000
+#define VBOXVHWA_SD_CKSRCOVERLAY 0x00008000
+#define VBOXVHWA_SD_HEIGHT 0x00000002
+#define VBOXVHWA_SD_PITCH 0x00000008
+#define VBOXVHWA_SD_PIXELFORMAT 0x00001000
+/*#define VBOXVHWA_SD_REFRESHRATE 0x00040000*/
+#define VBOXVHWA_SD_WIDTH 0x00000004
+
+#define VBOXVHWA_CKEYCAPS_DESTBLT 0x00000001
+#define VBOXVHWA_CKEYCAPS_DESTBLTCLRSPACE 0x00000002
+#define VBOXVHWA_CKEYCAPS_DESTBLTCLRSPACEYUV 0x00000004
+#define VBOXVHWA_CKEYCAPS_DESTBLTYUV 0x00000008
+#define VBOXVHWA_CKEYCAPS_DESTOVERLAY 0x00000010
+#define VBOXVHWA_CKEYCAPS_DESTOVERLAYCLRSPACE 0x00000020
+#define VBOXVHWA_CKEYCAPS_DESTOVERLAYCLRSPACEYUV 0x00000040
+#define VBOXVHWA_CKEYCAPS_DESTOVERLAYONEACTIVE 0x00000080
+#define VBOXVHWA_CKEYCAPS_DESTOVERLAYYUV 0x00000100
+#define VBOXVHWA_CKEYCAPS_SRCBLT 0x00000200
+#define VBOXVHWA_CKEYCAPS_SRCBLTCLRSPACE 0x00000400
+#define VBOXVHWA_CKEYCAPS_SRCBLTCLRSPACEYUV 0x00000800
+#define VBOXVHWA_CKEYCAPS_SRCBLTYUV 0x00001000
+#define VBOXVHWA_CKEYCAPS_SRCOVERLAY 0x00002000
+#define VBOXVHWA_CKEYCAPS_SRCOVERLAYCLRSPACE 0x00004000
+#define VBOXVHWA_CKEYCAPS_SRCOVERLAYCLRSPACEYUV 0x00008000
+#define VBOXVHWA_CKEYCAPS_SRCOVERLAYONEACTIVE 0x00010000
+#define VBOXVHWA_CKEYCAPS_SRCOVERLAYYUV 0x00020000
+#define VBOXVHWA_CKEYCAPS_NOCOSTOVERLAY 0x00040000
+
+#define VBOXVHWA_BLT_COLORFILL 0x00000400
+#define VBOXVHWA_BLT_DDFX 0x00000800
+#define VBOXVHWA_BLT_EXTENDED_FLAGS 0x40000000
+#define VBOXVHWA_BLT_EXTENDED_LINEAR_CONTENT 0x00000004
+#define VBOXVHWA_BLT_EXTENDED_PRESENTATION_STRETCHFACTOR 0x00000010
+#define VBOXVHWA_BLT_KEYDESTOVERRIDE 0x00004000
+#define VBOXVHWA_BLT_KEYSRCOVERRIDE 0x00010000
+#define VBOXVHWA_BLT_LAST_PRESENTATION 0x20000000
+#define VBOXVHWA_BLT_PRESENTATION 0x10000000
+#define VBOXVHWA_BLT_ROP 0x00020000
+
+
+#define VBOXVHWA_OVER_DDFX 0x00080000
+#define VBOXVHWA_OVER_HIDE 0x00000200
+#define VBOXVHWA_OVER_KEYDEST 0x00000400
+#define VBOXVHWA_OVER_KEYDESTOVERRIDE 0x00000800
+#define VBOXVHWA_OVER_KEYSRC 0x00001000
+#define VBOXVHWA_OVER_KEYSRCOVERRIDE 0x00002000
+#define VBOXVHWA_OVER_SHOW 0x00004000
+
+#define VBOXVHWA_CKEY_COLORSPACE 0x00000001
+#define VBOXVHWA_CKEY_DESTBLT 0x00000002
+#define VBOXVHWA_CKEY_DESTOVERLAY 0x00000004
+#define VBOXVHWA_CKEY_SRCBLT 0x00000008
+#define VBOXVHWA_CKEY_SRCOVERLAY 0x00000010
+
+#define VBOXVHWA_BLT_ARITHSTRETCHY 0x00000001
+#define VBOXVHWA_BLT_MIRRORLEFTRIGHT 0x00000002
+#define VBOXVHWA_BLT_MIRRORUPDOWN 0x00000004
+
+#define VBOXVHWA_OVERFX_ARITHSTRETCHY 0x00000001
+#define VBOXVHWA_OVERFX_MIRRORLEFTRIGHT 0x00000002
+#define VBOXVHWA_OVERFX_MIRRORUPDOWN 0x00000004
+
+#define VBOXVHWA_CAPS2_CANRENDERWINDOWED 0x00080000
+#define VBOXVHWA_CAPS2_WIDESURFACES 0x00001000
+#define VBOXVHWA_CAPS2_COPYFOURCC 0x00008000
+/*#define VBOXVHWA_CAPS2_FLIPINTERVAL 0x00200000*/
+/*#define VBOXVHWA_CAPS2_FLIPNOVSYNC 0x00400000*/
+
+
+#define VBOXVHWA_OFFSET64_VOID (UINT64_MAX)
+
+typedef struct VBOXVHWA_VERSION
+{
+ uint32_t maj;
+ uint32_t min;
+ uint32_t bld;
+ uint32_t reserved;
+} VBOXVHWA_VERSION;
+
+#define VBOXVHWA_VERSION_INIT(_pv) do { \
+ (_pv)->maj = VBOXVHWA_VERSION_MAJ; \
+ (_pv)->min = VBOXVHWA_VERSION_MIN; \
+ (_pv)->bld = VBOXVHWA_VERSION_BLD; \
+ (_pv)->reserved = VBOXVHWA_VERSION_RSV; \
+ } while(0)
+
+typedef struct VBOXVHWACMD_QUERYINFO1
+{
+ union
+ {
+ struct
+ {
+ VBOXVHWA_VERSION guestVersion;
+ } in;
+
+ struct
+ {
+ uint32_t cfgFlags;
+ uint32_t caps;
+
+ uint32_t caps2;
+ uint32_t colorKeyCaps;
+
+ uint32_t stretchCaps;
+ uint32_t surfaceCaps;
+
+ uint32_t numOverlays;
+ uint32_t curOverlays;
+
+ uint32_t numFourCC;
+ uint32_t reserved;
+ } out;
+ } u;
+} VBOXVHWACMD_QUERYINFO1;
+
+typedef struct VBOXVHWACMD_QUERYINFO2
+{
+ uint32_t numFourCC;
+ uint32_t FourCC[1];
+} VBOXVHWACMD_QUERYINFO2;
+
+#define VBOXVHWAINFO2_SIZE(_cFourCC) RT_OFFSETOF(VBOXVHWACMD_QUERYINFO2, FourCC[_cFourCC])
+
+typedef struct VBOXVHWACMD_SURF_CANCREATE
+{
+ VBOXVHWA_SURFACEDESC SurfInfo;
+ union
+ {
+ struct
+ {
+ uint32_t bIsDifferentPixelFormat;
+ uint32_t Reserved;
+ } in;
+
+ struct
+ {
+ int32_t ErrInfo;
+ } out;
+ } u;
+} VBOXVHWACMD_SURF_CANCREATE;
+
+typedef struct VBOXVHWACMD_SURF_CREATE
+{
+ VBOXVHWA_SURFACEDESC SurfInfo;
+} VBOXVHWACMD_SURF_CREATE;
+
+#ifdef VBOX_WITH_WDDM
+typedef struct VBOXVHWACMD_SURF_GETINFO
+{
+ VBOXVHWA_SURFACEDESC SurfInfo;
+} VBOXVHWACMD_SURF_GETINFO;
+#endif
+
+typedef struct VBOXVHWACMD_SURF_DESTROY
+{
+ union
+ {
+ struct
+ {
+ VBOXVHWA_SURFHANDLE hSurf;
+ } in;
+ } u;
+} VBOXVHWACMD_SURF_DESTROY;
+
+typedef struct VBOXVHWACMD_SURF_LOCK
+{
+ union
+ {
+ struct
+ {
+ VBOXVHWA_SURFHANDLE hSurf;
+ uint64_t offSurface;
+ uint32_t flags;
+ uint32_t rectValid;
+ VBOXVHWA_RECTL rect;
+ } in;
+ } u;
+} VBOXVHWACMD_SURF_LOCK;
+
+typedef struct VBOXVHWACMD_SURF_UNLOCK
+{
+ union
+ {
+ struct
+ {
+ VBOXVHWA_SURFHANDLE hSurf;
+ uint32_t xUpdatedMemValid;
+ uint32_t reserved;
+ VBOXVHWA_RECTL xUpdatedMemRect;
+ } in;
+ } u;
+} VBOXVHWACMD_SURF_UNLOCK;
+
+typedef struct VBOXVHWACMD_SURF_BLT
+{
+ uint64_t DstGuestSurfInfo;
+ uint64_t SrcGuestSurfInfo;
+ union
+ {
+ struct
+ {
+ VBOXVHWA_SURFHANDLE hDstSurf;
+ uint64_t offDstSurface;
+ VBOXVHWA_RECTL dstRect;
+ VBOXVHWA_SURFHANDLE hSrcSurf;
+ uint64_t offSrcSurface;
+ VBOXVHWA_RECTL srcRect;
+ uint32_t flags;
+ uint32_t xUpdatedSrcMemValid;
+ VBOXVHWA_BLTFX desc;
+ VBOXVHWA_RECTL xUpdatedSrcMemRect;
+ } in;
+ } u;
+} VBOXVHWACMD_SURF_BLT;
+
+#ifdef VBOX_WITH_WDDM
+typedef struct VBOXVHWACMD_SURF_COLORFILL
+{
+ union
+ {
+ struct
+ {
+ VBOXVHWA_SURFHANDLE hSurf;
+ uint64_t offSurface;
+ uint32_t u32Reserved;
+ uint32_t cRects;
+ VBOXVHWA_RECTL aRects[1];
+ } in;
+ } u;
+} VBOXVHWACMD_SURF_COLORFILL;
+#endif
+
+typedef struct VBOXVHWACMD_SURF_FLIP
+{
+ uint64_t TargGuestSurfInfo;
+ uint64_t CurrGuestSurfInfo;
+ union
+ {
+ struct
+ {
+ VBOXVHWA_SURFHANDLE hTargSurf;
+ uint64_t offTargSurface;
+ VBOXVHWA_SURFHANDLE hCurrSurf;
+ uint64_t offCurrSurface;
+ uint32_t flags;
+ uint32_t xUpdatedTargMemValid;
+ VBOXVHWA_RECTL xUpdatedTargMemRect;
+ } in;
+ } u;
+} VBOXVHWACMD_SURF_FLIP;
+
+typedef struct VBOXVHWACMD_SURF_COLORKEY_SET
+{
+ union
+ {
+ struct
+ {
+ VBOXVHWA_SURFHANDLE hSurf;
+ uint64_t offSurface;
+ VBOXVHWA_COLORKEY CKey;
+ uint32_t flags;
+ uint32_t reserved;
+ } in;
+ } u;
+} VBOXVHWACMD_SURF_COLORKEY_SET;
+
+#define VBOXVHWACMD_SURF_OVERLAY_UPDATE_F_SRCMEMRECT 0x00000001
+#define VBOXVHWACMD_SURF_OVERLAY_UPDATE_F_DSTMEMRECT 0x00000002
+
+typedef struct VBOXVHWACMD_SURF_OVERLAY_UPDATE
+{
+ union
+ {
+ struct
+ {
+ VBOXVHWA_SURFHANDLE hDstSurf;
+ uint64_t offDstSurface;
+ VBOXVHWA_RECTL dstRect;
+ VBOXVHWA_SURFHANDLE hSrcSurf;
+ uint64_t offSrcSurface;
+ VBOXVHWA_RECTL srcRect;
+ uint32_t flags;
+ uint32_t xFlags;
+ VBOXVHWA_OVERLAYFX desc;
+ VBOXVHWA_RECTL xUpdatedSrcMemRect;
+ VBOXVHWA_RECTL xUpdatedDstMemRect;
+ } in;
+ } u;
+}VBOXVHWACMD_SURF_OVERLAY_UPDATE;
+
+typedef struct VBOXVHWACMD_SURF_OVERLAY_SETPOSITION
+{
+ union
+ {
+ struct
+ {
+ VBOXVHWA_SURFHANDLE hDstSurf;
+ uint64_t offDstSurface;
+ VBOXVHWA_SURFHANDLE hSrcSurf;
+ uint64_t offSrcSurface;
+ uint32_t xPos;
+ uint32_t yPos;
+ uint32_t flags;
+ uint32_t reserved;
+ } in;
+ } u;
+} VBOXVHWACMD_SURF_OVERLAY_SETPOSITION;
+
+typedef struct VBOXVHWACMD_HH_CONSTRUCT
+{
+ void *pVM;
+ /* VRAM info for the backend to be able to properly translate VRAM offsets */
+ void *pvVRAM;
+ uint32_t cbVRAM;
+} VBOXVHWACMD_HH_CONSTRUCT;
+
+typedef struct VBOXVHWACMD_HH_SAVESTATE_SAVEPERFORM
+{
+ struct SSMHANDLE * pSSM;
+} VBOXVHWACMD_HH_SAVESTATE_SAVEPERFORM;
+
+typedef struct VBOXVHWACMD_HH_SAVESTATE_LOADPERFORM
+{
+ struct SSMHANDLE * pSSM;
+} VBOXVHWACMD_HH_SAVESTATE_LOADPERFORM;
+
+typedef DECLCALLBACK(void) FNVBOXVHWA_HH_CALLBACK(void*);
+typedef FNVBOXVHWA_HH_CALLBACK *PFNVBOXVHWA_HH_CALLBACK;
+
+#define VBOXVHWA_HH_CALLBACK_SET(_pCmd, _pfn, _parg) \
+ do { \
+ (_pCmd)->GuestVBVAReserved1 = (uint64_t)(uintptr_t)(_pfn); \
+ (_pCmd)->GuestVBVAReserved2 = (uint64_t)(uintptr_t)(_parg); \
+ }while(0)
+
+#define VBOXVHWA_HH_CALLBACK_GET(_pCmd) ((PFNVBOXVHWA_HH_CALLBACK)(_pCmd)->GuestVBVAReserved1)
+#define VBOXVHWA_HH_CALLBACK_GET_ARG(_pCmd) ((void*)(_pCmd)->GuestVBVAReserved2)
+
+#pragma pack()
+#endif /* #ifdef VBOX_WITH_VIDEOHWACCEL */
+
+/* All structures are without alignment. */
+#pragma pack(1)
+
+typedef struct VBVAHOSTFLAGS
+{
+ uint32_t u32HostEvents;
+ uint32_t u32SupportedOrders;
+} VBVAHOSTFLAGS;
+
+typedef struct VBVABUFFER
+{
+ VBVAHOSTFLAGS hostFlags;
+
+ /* The offset where the data start in the buffer. */
+ uint32_t off32Data;
+ /* The offset where next data must be placed in the buffer. */
+ uint32_t off32Free;
+
+ /* The queue of record descriptions. */
+ VBVARECORD aRecords[VBVA_MAX_RECORDS];
+ uint32_t indexRecordFirst;
+ uint32_t indexRecordFree;
+
+ /* Space to leave free in the buffer when large partial records are transferred. */
+ uint32_t cbPartialWriteThreshold;
+
+ uint32_t cbData;
+ uint8_t au8Data[1]; /* variable size for the rest of the VBVABUFFER area in VRAM. */
+} VBVABUFFER;
+
+#define VBVA_MAX_RECORD_SIZE (128*_1M)
+
+/* guest->host commands */
+#define VBVA_QUERY_CONF32 1
+#define VBVA_SET_CONF32 2
+#define VBVA_INFO_VIEW 3
+#define VBVA_INFO_HEAP 4
+#define VBVA_FLUSH 5
+#define VBVA_INFO_SCREEN 6
+#define VBVA_ENABLE 7
+#define VBVA_MOUSE_POINTER_SHAPE 8
+#ifdef VBOX_WITH_VIDEOHWACCEL
+# define VBVA_VHWA_CMD 9
+#endif /* # ifdef VBOX_WITH_VIDEOHWACCEL */
+#ifdef VBOX_WITH_VDMA
+# define VBVA_VDMA_CTL 10 /* setup G<->H DMA channel info */
+# define VBVA_VDMA_CMD 11 /* G->H DMA command */
+#endif
+#define VBVA_INFO_CAPS 12 /* informs host about HGSMI caps. see VBVACAPS below */
+#define VBVA_SCANLINE_CFG 13 /* configures scanline, see VBVASCANLINECFG below */
+#define VBVA_SCANLINE_INFO 14 /* requests scanline info, see VBVASCANLINEINFO below */
+#define VBVA_CMDVBVA_SUBMIT 16 /* inform host about VBVA Command submission */
+#define VBVA_CMDVBVA_FLUSH 17 /* inform host about VBVA Command submission */
+#define VBVA_CMDVBVA_CTL 18 /* G->H DMA command */
+#define VBVA_QUERY_MODE_HINTS 19 /* Query most recent mode hints sent. */
+/** Report the guest virtual desktop position and size for mapping host and
+ * guest pointer positions. */
+#define VBVA_REPORT_INPUT_MAPPING 20
+/** Report the guest cursor position and query the host position. */
+#define VBVA_CURSOR_POSITION 21
+
+/* host->guest commands */
+#define VBVAHG_EVENT 1
+#define VBVAHG_DISPLAY_CUSTOM 2
+#ifdef VBOX_WITH_VDMA
+#define VBVAHG_SHGSMI_COMPLETION 3
+#endif
+
+#ifdef VBOX_WITH_VIDEOHWACCEL
+#define VBVAHG_DCUSTOM_VHWA_CMDCOMPLETE 1
+#pragma pack(1)
+typedef struct VBVAHOSTCMDVHWACMDCOMPLETE
+{
+ uint32_t offCmd;
+}VBVAHOSTCMDVHWACMDCOMPLETE;
+#pragma pack()
+#endif /* # ifdef VBOX_WITH_VIDEOHWACCEL */
+
+#pragma pack(1)
+typedef enum
+{
+ VBVAHOSTCMD_OP_EVENT = 1,
+ VBVAHOSTCMD_OP_CUSTOM
+}VBVAHOSTCMD_OP_TYPE;
+
+typedef struct VBVAHOSTCMDEVENT
+{
+ uint64_t pEvent;
+}VBVAHOSTCMDEVENT;
+
+
+typedef struct VBVAHOSTCMD
+{
+ /* destination ID if >=0 specifies display index, otherwize the command is directed to the miniport */
+ int32_t iDstID;
+ int32_t customOpCode;
+ union
+ {
+ struct VBVAHOSTCMD *pNext;
+ uint32_t offNext;
+ uint64_t Data; /* the body is 64-bit aligned */
+ } u;
+ char body[1];
+}VBVAHOSTCMD;
+
+#define VBVAHOSTCMD_SIZE(_size) (sizeof(VBVAHOSTCMD) + (_size))
+#define VBVAHOSTCMD_BODY(_pCmd, _tBody) ((_tBody*)(_pCmd)->body)
+#define VBVAHOSTCMD_HDR(_pBody) ((VBVAHOSTCMD*)(((uint8_t*)_pBody) - RT_OFFSETOF(VBVAHOSTCMD, body)))
+#define VBVAHOSTCMD_HDRSIZE (RT_OFFSETOF(VBVAHOSTCMD, body))
+
+#pragma pack()
+
+/* VBVACONF32::u32Index */
+#define VBOX_VBVA_CONF32_MONITOR_COUNT 0
+#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1
+/** Returns VINF_SUCCESS if the host can report mode hints via VBVA.
+ * Set value to VERR_NOT_SUPPORTED before calling. */
+#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2
+/** Returns VINF_SUCCESS if the host can report guest cursor enabled status via
+ * VBVA. Set value to VERR_NOT_SUPPORTED before calling. */
+#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3
+/** Returns the currently available host cursor capabilities. Available if
+ * VBVACONF32::VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success.
+ * @see VMMDevReqMouseStatus::mouseFeatures. */
+#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4
+/** Returns the supported flags in VBVAINFOSCREEN::u8Flags. */
+#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5
+/** Returns the max size of VBVA record. */
+#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6
+
+typedef struct VBVACONF32
+{
+ uint32_t u32Index;
+ uint32_t u32Value;
+} VBVACONF32;
+
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0 RT_BIT(0)
+/** Guest cursor capability: can the host show a hardware cursor at the host
+ * pointer location? */
+#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE RT_BIT(1)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2 RT_BIT(2)
+/** Reserved for historical reasons. Must always be unset. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3 RT_BIT(3)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4 RT_BIT(4)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5 RT_BIT(5)
+
+typedef struct VBVAINFOVIEW
+{
+ /* Index of the screen, assigned by the guest. */
+ uint32_t u32ViewIndex;
+
+ /* The screen offset in VRAM, the framebuffer starts here. */
+ uint32_t u32ViewOffset;
+
+ /* The size of the VRAM memory that can be used for the view. */
+ uint32_t u32ViewSize;
+
+ /* The recommended maximum size of the VRAM memory for the screen. */
+ uint32_t u32MaxScreenSize;
+} VBVAINFOVIEW;
+
+typedef struct VBVAINFOHEAP
+{
+ /* Absolute offset in VRAM of the start of the heap. */
+ uint32_t u32HeapOffset;
+
+ /* The size of the heap. */
+ uint32_t u32HeapSize;
+
+} VBVAINFOHEAP;
+
+typedef struct VBVAFLUSH
+{
+ uint32_t u32Reserved;
+
+} VBVAFLUSH;
+
+typedef struct VBVACMDVBVASUBMIT
+{
+ uint32_t u32Reserved;
+} VBVACMDVBVASUBMIT;
+
+/* flush is requested because due to guest command buffer overflow */
+#define VBVACMDVBVAFLUSH_F_GUEST_BUFFER_OVERFLOW 1
+
+typedef struct VBVACMDVBVAFLUSH
+{
+ uint32_t u32Flags;
+} VBVACMDVBVAFLUSH;
+
+
+/* VBVAINFOSCREEN::u8Flags */
+#define VBVA_SCREEN_F_NONE 0x0000
+#define VBVA_SCREEN_F_ACTIVE 0x0001
+/** The virtual monitor has been disabled by the guest and should be removed
+ * by the host and ignored for purposes of pointer position calculation. */
+#define VBVA_SCREEN_F_DISABLED 0x0002
+/** The virtual monitor has been blanked by the guest and should be blacked
+ * out by the host using width, height, etc values from the VBVAINFOSCREEN request. */
+#define VBVA_SCREEN_F_BLANK 0x0004
+/** The virtual monitor has been blanked by the guest and should be blacked
+ * out by the host using the previous mode values for width. height, etc. */
+#define VBVA_SCREEN_F_BLANK2 0x0008
+
+typedef struct VBVAINFOSCREEN
+{
+ /* Which view contains the screen. */
+ uint32_t u32ViewIndex;
+
+ /* Physical X origin relative to the primary screen. */
+ int32_t i32OriginX;
+
+ /* Physical Y origin relative to the primary screen. */
+ int32_t i32OriginY;
+
+ /* Offset of visible framebuffer relative to the framebuffer start. */
+ uint32_t u32StartOffset;
+
+ /* The scan line size in bytes. */
+ uint32_t u32LineSize;
+
+ /* Width of the screen. */
+ uint32_t u32Width;
+
+ /* Height of the screen. */
+ uint32_t u32Height;
+
+ /* Color depth. */
+ uint16_t u16BitsPerPixel;
+
+ /* VBVA_SCREEN_F_* */
+ uint16_t u16Flags;
+} VBVAINFOSCREEN;
+
+
+/* VBVAENABLE::u32Flags */
+#define VBVA_F_NONE 0x00000000
+#define VBVA_F_ENABLE 0x00000001
+#define VBVA_F_DISABLE 0x00000002
+/* extended VBVA to be used with WDDM */
+#define VBVA_F_EXTENDED 0x00000004
+/* vbva offset is absolute VRAM offset */
+#define VBVA_F_ABSOFFSET 0x00000008
+
+typedef struct VBVAENABLE
+{
+ uint32_t u32Flags;
+ uint32_t u32Offset;
+ int32_t i32Result;
+} VBVAENABLE;
+
+typedef struct VBVAENABLE_EX
+{
+ VBVAENABLE Base;
+ uint32_t u32ScreenId;
+} VBVAENABLE_EX;
+
+
+typedef struct VBVAMOUSEPOINTERSHAPE
+{
+ /* The host result. */
+ int32_t i32Result;
+
+ /* VBOX_MOUSE_POINTER_* bit flags. */
+ uint32_t fu32Flags;
+
+ /* X coordinate of the hot spot. */
+ uint32_t u32HotX;
+
+ /* Y coordinate of the hot spot. */
+ uint32_t u32HotY;
+
+ /* Width of the pointer in pixels. */
+ uint32_t u32Width;
+
+ /* Height of the pointer in scanlines. */
+ uint32_t u32Height;
+
+ /* Pointer data.
+ *
+ ****
+ * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color) mask.
+ *
+ * For pointers without alpha channel the XOR mask pixels are 32 bit values: (lsb)BGR0(msb).
+ * For pointers with alpha channel the XOR mask consists of (lsb)BGRA(msb) 32 bit values.
+ *
+ * Guest driver must create the AND mask for pointers with alpha channel, so if host does not
+ * support alpha, the pointer could be displayed as a normal color pointer. The AND mask can
+ * be constructed from alpha values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
+ *
+ * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND mask,
+ * therefore, is cbAnd = (width + 7) / 8 * height. The padding bits at the
+ * end of any scanline are undefined.
+ *
+ * The XOR mask follows the AND mask on the next 4 bytes aligned offset:
+ * uint8_t *pXor = pAnd + (cbAnd + 3) & ~3
+ * Bytes in the gap between the AND and the XOR mask are undefined.
+ * XOR mask scanlines have no gap between them and size of XOR mask is:
+ * cXor = width * 4 * height.
+ ****
+ *
+ * Preallocate 4 bytes for accessing actual data as p->au8Data.
+ */
+ uint8_t au8Data[4];
+
+} VBVAMOUSEPOINTERSHAPE;
+
+/** @name VBVAMOUSEPOINTERSHAPE::fu32Flags
+ * @note The VBOX_MOUSE_POINTER_* flags are used in the guest video driver,
+ * values must be <= 0x8000 and must not be changed. (try make more sense
+ * of this, please).
+ * @{
+ */
+/** pointer is visible */
+#define VBOX_MOUSE_POINTER_VISIBLE (0x0001)
+/** pointer has alpha channel */
+#define VBOX_MOUSE_POINTER_ALPHA (0x0002)
+/** pointerData contains new pointer shape */
+#define VBOX_MOUSE_POINTER_SHAPE (0x0004)
+/** @} */
+
+/* the guest driver can handle asynch guest cmd completion by reading the command offset from io port */
+#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001
+/* the guest driver can handle video adapter IRQs */
+#define VBVACAPS_IRQ 0x00000002
+/** The guest can read video mode hints sent via VBVA. */
+#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004
+/** The guest can switch to a software cursor on demand. */
+#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008
+/** The guest does not depend on host handling the VBE registers. */
+#define VBVACAPS_USE_VBVA_ONLY 0x00000010
+typedef struct VBVACAPS
+{
+ int32_t rc;
+ uint32_t fCaps;
+} VBVACAPS;
+
+/* makes graphics device generate IRQ on VSYNC */
+#define VBVASCANLINECFG_ENABLE_VSYNC_IRQ 0x00000001
+/* guest driver may request the current scanline */
+#define VBVASCANLINECFG_ENABLE_SCANLINE_INFO 0x00000002
+/* request the current refresh period, returned in u32RefreshPeriodMs */
+#define VBVASCANLINECFG_QUERY_REFRESH_PERIOD 0x00000004
+/* set new refresh period specified in u32RefreshPeriodMs.
+ * if used with VBVASCANLINECFG_QUERY_REFRESH_PERIOD,
+ * u32RefreshPeriodMs is set to the previous refresh period on return */
+#define VBVASCANLINECFG_SET_REFRESH_PERIOD 0x00000008
+
+typedef struct VBVASCANLINECFG
+{
+ int32_t rc;
+ uint32_t fFlags;
+ uint32_t u32RefreshPeriodMs;
+ uint32_t u32Reserved;
+} VBVASCANLINECFG;
+
+typedef struct VBVASCANLINEINFO
+{
+ int32_t rc;
+ uint32_t u32ScreenId;
+ uint32_t u32InVBlank;
+ uint32_t u32ScanLine;
+} VBVASCANLINEINFO;
+
+/** Query the most recent mode hints received from the host. */
+typedef struct VBVAQUERYMODEHINTS
+{
+ /** The maximum number of screens to return hints for. */
+ uint16_t cHintsQueried;
+ /** The size of the mode hint structures directly following this one. */
+ uint16_t cbHintStructureGuest;
+ /** The return code for the operation. Initialise to VERR_NOT_SUPPORTED. */
+ int32_t rc;
+} VBVAQUERYMODEHINTS;
+
+/** Structure in which a mode hint is returned. The guest allocates an array
+ * of these immediately after the VBVAQUERYMODEHINTS structure. To accomodate
+ * future extensions, the VBVAQUERYMODEHINTS structure specifies the size of
+ * the VBVAMODEHINT structures allocated by the guest, and the host only fills
+ * out structure elements which fit into that size. The host should fill any
+ * unused members (e.g. dx, dy) or structure space on the end with ~0. The
+ * whole structure can legally be set to ~0 to skip a screen. */
+typedef struct VBVAMODEHINT
+{
+ uint32_t magic;
+ uint32_t cx;
+ uint32_t cy;
+ uint32_t cBPP; /* Which has never been used... */
+ uint32_t cDisplay;
+ uint32_t dx; /**< X offset into the virtual frame-buffer. */
+ uint32_t dy; /**< Y offset into the virtual frame-buffer. */
+ uint32_t fEnabled; /* Not fFlags. Add new members for new flags. */
+} VBVAMODEHINT;
+
+#define VBVAMODEHINT_MAGIC UINT32_C(0x0801add9)
+
+/** Report the rectangle relative to which absolute pointer events should be
+ * expressed. This information remains valid until the next VBVA resize event
+ * for any screen, at which time it is reset to the bounding rectangle of all
+ * virtual screens and must be re-set.
+ * @see VBVA_REPORT_INPUT_MAPPING. */
+typedef struct VBVAREPORTINPUTMAPPING
+{
+ int32_t x; /**< Upper left X co-ordinate relative to the first screen. */
+ int32_t y; /**< Upper left Y co-ordinate relative to the first screen. */
+ uint32_t cx; /**< Rectangle width. */
+ uint32_t cy; /**< Rectangle height. */
+} VBVAREPORTINPUTMAPPING;
+
+/** Report the guest cursor position and query the host one. The host may wish
+ * to use the guest information to re-position its own cursor (though this is
+ * currently unlikely).
+ * @see VBVA_CURSOR_POSITION */
+typedef struct VBVACURSORPOSITION
+{
+ uint32_t fReportPosition; /**< Are we reporting a position? */
+ uint32_t x; /**< Guest cursor X position */
+ uint32_t y; /**< Guest cursor Y position */
+} VBVACURSORPOSITION;
+
+#pragma pack()
+
+typedef uint64_t VBOXVIDEOOFFSET;
+
+#define VBOXVIDEOOFFSET_VOID ((VBOXVIDEOOFFSET)~0)
+
+#pragma pack(1)
+
+/*
+ * VBOXSHGSMI made on top HGSMI and allows receiving notifications
+ * about G->H command completion
+ */
+/* SHGSMI command header */
+typedef struct VBOXSHGSMIHEADER
+{
+ uint64_t pvNext; /*<- completion processing queue */
+ uint32_t fFlags; /*<- see VBOXSHGSMI_FLAG_XXX Flags */
+ uint32_t cRefs; /*<- command referece count */
+ uint64_t u64Info1; /*<- contents depends on the fFlags value */
+ uint64_t u64Info2; /*<- contents depends on the fFlags value */
+} VBOXSHGSMIHEADER, *PVBOXSHGSMIHEADER;
+
+typedef enum
+{
+ VBOXVDMACMD_TYPE_UNDEFINED = 0,
+ VBOXVDMACMD_TYPE_DMA_PRESENT_BLT = 1,
+ VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER,
+ VBOXVDMACMD_TYPE_DMA_BPB_FILL,
+ VBOXVDMACMD_TYPE_DMA_PRESENT_SHADOW2PRIMARY,
+ VBOXVDMACMD_TYPE_DMA_PRESENT_CLRFILL,
+ VBOXVDMACMD_TYPE_DMA_PRESENT_FLIP,
+ VBOXVDMACMD_TYPE_DMA_NOP,
+ VBOXVDMACMD_TYPE_CHROMIUM_CMD, /* chromium cmd */
+ VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER_VRAMSYS,
+ VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ /* make the device notify child (monitor) state change IRQ */
+} VBOXVDMACMD_TYPE;
+
+#pragma pack()
+
+/* the command processing was asynch, set by the host to indicate asynch command completion
+ * must not be cleared once set, the command completion is performed by issuing a host->guest completion command
+ * while keeping this flag unchanged */
+#define VBOXSHGSMI_FLAG_HG_ASYNCH 0x00010000
+#if 0
+/* if set - asynch completion is performed by issuing the event,
+ * if cleared - asynch completion is performed by calling a callback */
+#define VBOXSHGSMI_FLAG_GH_ASYNCH_EVENT 0x00000001
+#endif
+/* issue interrupt on asynch completion, used for critical G->H commands,
+ * i.e. for completion of which guest is waiting. */
+#define VBOXSHGSMI_FLAG_GH_ASYNCH_IRQ 0x00000002
+/* guest does not do any op on completion of this command,
+ * the host may copy the command and indicate that it does not need the command anymore
+ * by not setting VBOXSHGSMI_FLAG_HG_ASYNCH */
+#define VBOXSHGSMI_FLAG_GH_ASYNCH_NOCOMPLETION 0x00000004
+/* guest requires the command to be processed asynchronously,
+ * not setting VBOXSHGSMI_FLAG_HG_ASYNCH by the host in this case is treated as command failure */
+#define VBOXSHGSMI_FLAG_GH_ASYNCH_FORCE 0x00000008
+/* force IRQ on cmd completion */
+#define VBOXSHGSMI_FLAG_GH_ASYNCH_IRQ_FORCE 0x00000010
+/* an IRQ-level callback is associated with the command */
+#define VBOXSHGSMI_FLAG_GH_ASYNCH_CALLBACK_IRQ 0x00000020
+/* guest expects this command to be completed synchronously */
+#define VBOXSHGSMI_FLAG_GH_SYNCH 0x00000040
+
+
+DECLINLINE(uint8_t *) VBoxSHGSMIBufferData (const VBOXSHGSMIHEADER* pHeader)
+{
+ return (uint8_t *)pHeader + sizeof (VBOXSHGSMIHEADER);
+}
+
+#define VBoxSHGSMIBufferHeaderSize() (sizeof (VBOXSHGSMIHEADER))
+
+DECLINLINE(PVBOXSHGSMIHEADER) VBoxSHGSMIBufferHeader (const void *pvData)
+{
+ return (PVBOXSHGSMIHEADER)((uint8_t *)pvData - sizeof (VBOXSHGSMIHEADER));
+}
+
+#ifdef VBOX_WITH_VDMA
+# pragma pack(1)
+
+/* VDMA - Video DMA */
+
+/* VDMA Control API */
+/* VBOXVDMA_CTL::u32Flags */
+typedef enum
+{
+ VBOXVDMA_CTL_TYPE_NONE = 0,
+ VBOXVDMA_CTL_TYPE_ENABLE,
+ VBOXVDMA_CTL_TYPE_DISABLE,
+ VBOXVDMA_CTL_TYPE_FLUSH,
+ VBOXVDMA_CTL_TYPE_WATCHDOG
+} VBOXVDMA_CTL_TYPE;
+
+typedef struct VBOXVDMA_CTL
+{
+ VBOXVDMA_CTL_TYPE enmCtl;
+ uint32_t u32Offset;
+ int32_t i32Result;
+} VBOXVDMA_CTL, *PVBOXVDMA_CTL;
+
+typedef struct VBOXVDMA_RECTL
+{
+ int16_t left;
+ int16_t top;
+ uint16_t width;
+ uint16_t height;
+} VBOXVDMA_RECTL, *PVBOXVDMA_RECTL;
+
+typedef enum
+{
+ VBOXVDMA_PIXEL_FORMAT_UNKNOWN = 0,
+ VBOXVDMA_PIXEL_FORMAT_R8G8B8 = 20,
+ VBOXVDMA_PIXEL_FORMAT_A8R8G8B8 = 21,
+ VBOXVDMA_PIXEL_FORMAT_X8R8G8B8 = 22,
+ VBOXVDMA_PIXEL_FORMAT_R5G6B5 = 23,
+ VBOXVDMA_PIXEL_FORMAT_X1R5G5B5 = 24,
+ VBOXVDMA_PIXEL_FORMAT_A1R5G5B5 = 25,
+ VBOXVDMA_PIXEL_FORMAT_A4R4G4B4 = 26,
+ VBOXVDMA_PIXEL_FORMAT_R3G3B2 = 27,
+ VBOXVDMA_PIXEL_FORMAT_A8 = 28,
+ VBOXVDMA_PIXEL_FORMAT_A8R3G3B2 = 29,
+ VBOXVDMA_PIXEL_FORMAT_X4R4G4B4 = 30,
+ VBOXVDMA_PIXEL_FORMAT_A2B10G10R10 = 31,
+ VBOXVDMA_PIXEL_FORMAT_A8B8G8R8 = 32,
+ VBOXVDMA_PIXEL_FORMAT_X8B8G8R8 = 33,
+ VBOXVDMA_PIXEL_FORMAT_G16R16 = 34,
+ VBOXVDMA_PIXEL_FORMAT_A2R10G10B10 = 35,
+ VBOXVDMA_PIXEL_FORMAT_A16B16G16R16 = 36,
+ VBOXVDMA_PIXEL_FORMAT_A8P8 = 40,
+ VBOXVDMA_PIXEL_FORMAT_P8 = 41,
+ VBOXVDMA_PIXEL_FORMAT_L8 = 50,
+ VBOXVDMA_PIXEL_FORMAT_A8L8 = 51,
+ VBOXVDMA_PIXEL_FORMAT_A4L4 = 52,
+ VBOXVDMA_PIXEL_FORMAT_V8U8 = 60,
+ VBOXVDMA_PIXEL_FORMAT_L6V5U5 = 61,
+ VBOXVDMA_PIXEL_FORMAT_X8L8V8U8 = 62,
+ VBOXVDMA_PIXEL_FORMAT_Q8W8V8U8 = 63,
+ VBOXVDMA_PIXEL_FORMAT_V16U16 = 64,
+ VBOXVDMA_PIXEL_FORMAT_W11V11U10 = 65,
+ VBOXVDMA_PIXEL_FORMAT_A2W10V10U10 = 67
+} VBOXVDMA_PIXEL_FORMAT;
+
+typedef struct VBOXVDMA_SURF_DESC
+{
+ uint32_t width;
+ uint32_t height;
+ VBOXVDMA_PIXEL_FORMAT format;
+ uint32_t bpp;
+ uint32_t pitch;
+ uint32_t fFlags;
+} VBOXVDMA_SURF_DESC, *PVBOXVDMA_SURF_DESC;
+
+/*typedef uint64_t VBOXVDMAPHADDRESS;*/
+typedef uint64_t VBOXVDMASURFHANDLE;
+
+/* region specified as a rectangle, otherwize it is a size of memory pointed to by phys address */
+#define VBOXVDMAOPERAND_FLAGS_RECTL 0x1
+/* Surface handle is valid */
+#define VBOXVDMAOPERAND_FLAGS_PRIMARY 0x2
+/* address is offset in VRAM */
+#define VBOXVDMAOPERAND_FLAGS_VRAMOFFSET 0x4
+
+
+/* VBOXVDMACBUF_DR::phBuf specifies offset in VRAM */
+#define VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET 0x00000001
+/* command buffer follows the VBOXVDMACBUF_DR in VRAM, VBOXVDMACBUF_DR::phBuf is ignored */
+#define VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR 0x00000002
+
+/*
+ * We can not submit the DMA command via VRAM since we do not have control over
+ * DMA command buffer [de]allocation, i.e. we only control the buffer contents.
+ * In other words the system may call one of our callbacks to fill a command buffer
+ * with the necessary commands and then discard the buffer w/o any notification.
+ *
+ * We have only DMA command buffer physical address at submission time.
+ *
+ * so the only way is to */
+typedef struct VBOXVDMACBUF_DR
+{
+ uint16_t fFlags;
+ uint16_t cbBuf;
+ /* RT_SUCCESS() - on success
+ * VERR_INTERRUPTED - on preemption
+ * VERR_xxx - on error */
+ int32_t rc;
+ union
+ {
+ uint64_t phBuf;
+ VBOXVIDEOOFFSET offVramBuf;
+ } Location;
+ uint64_t aGuestData[7];
+} VBOXVDMACBUF_DR, *PVBOXVDMACBUF_DR;
+
+#define VBOXVDMACBUF_DR_TAIL(_pCmd, _t) ( (_t*)(((uint8_t*)(_pCmd)) + sizeof (VBOXVDMACBUF_DR)) )
+#define VBOXVDMACBUF_DR_FROM_TAIL(_pCmd) ( (VBOXVDMACBUF_DR*)(((uint8_t*)(_pCmd)) - sizeof (VBOXVDMACBUF_DR)) )
+
+typedef struct VBOXVDMACMD
+{
+ VBOXVDMACMD_TYPE enmType;
+ uint32_t u32CmdSpecific;
+} VBOXVDMACMD, *PVBOXVDMACMD;
+
+#define VBOXVDMACMD_HEADER_SIZE() sizeof (VBOXVDMACMD)
+#define VBOXVDMACMD_SIZE_FROMBODYSIZE(_s) (VBOXVDMACMD_HEADER_SIZE() + (_s))
+#define VBOXVDMACMD_SIZE(_t) (VBOXVDMACMD_SIZE_FROMBODYSIZE(sizeof (_t)))
+#define VBOXVDMACMD_BODY(_pCmd, _t) ( (_t*)(((uint8_t*)(_pCmd)) + VBOXVDMACMD_HEADER_SIZE()) )
+#define VBOXVDMACMD_BODY_SIZE(_s) ( (_s) - VBOXVDMACMD_HEADER_SIZE() )
+#define VBOXVDMACMD_FROM_BODY(_pCmd) ( (VBOXVDMACMD*)(((uint8_t*)(_pCmd)) - VBOXVDMACMD_HEADER_SIZE()) )
+#define VBOXVDMACMD_BODY_FIELD_OFFSET(_ot, _t, _f) ( (_ot)(uintptr_t)( VBOXVDMACMD_BODY(0, uint8_t) + RT_OFFSETOF(_t, _f) ) )
+
+typedef struct VBOXVDMACMD_DMA_PRESENT_BLT
+{
+ VBOXVIDEOOFFSET offSrc;
+ VBOXVIDEOOFFSET offDst;
+ VBOXVDMA_SURF_DESC srcDesc;
+ VBOXVDMA_SURF_DESC dstDesc;
+ VBOXVDMA_RECTL srcRectl;
+ VBOXVDMA_RECTL dstRectl;
+ uint32_t u32Reserved;
+ uint32_t cDstSubRects;
+ VBOXVDMA_RECTL aDstSubRects[1];
+} VBOXVDMACMD_DMA_PRESENT_BLT, *PVBOXVDMACMD_DMA_PRESENT_BLT;
+
+typedef struct VBOXVDMACMD_DMA_PRESENT_SHADOW2PRIMARY
+{
+ VBOXVDMA_RECTL Rect;
+} VBOXVDMACMD_DMA_PRESENT_SHADOW2PRIMARY, *PVBOXVDMACMD_DMA_PRESENT_SHADOW2PRIMARY;
+
+
+#define VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET 0x00000001
+#define VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET 0x00000002
+
+typedef struct VBOXVDMACMD_DMA_BPB_TRANSFER
+{
+ uint32_t cbTransferSize;
+ uint32_t fFlags;
+ union
+ {
+ uint64_t phBuf;
+ VBOXVIDEOOFFSET offVramBuf;
+ } Src;
+ union
+ {
+ uint64_t phBuf;
+ VBOXVIDEOOFFSET offVramBuf;
+ } Dst;
+} VBOXVDMACMD_DMA_BPB_TRANSFER, *PVBOXVDMACMD_DMA_BPB_TRANSFER;
+
+#define VBOXVDMACMD_SYSMEMEL_F_PAGELIST 0x00000001
+
+typedef struct VBOXVDMACMD_SYSMEMEL
+{
+ uint32_t cPages;
+ uint32_t fFlags;
+ uint64_t phBuf[1];
+} VBOXVDMACMD_SYSMEMEL, *PVBOXVDMACMD_SYSMEMEL;
+
+#define VBOXVDMACMD_SYSMEMEL_NEXT(_pEl) (((_pEl)->fFlags & VBOXVDMACMD_SYSMEMEL_F_PAGELIST) ? \
+ ((PVBOXVDMACMD_SYSMEMEL)(((uint8_t*)(_pEl))+RT_OFFSETOF(VBOXVDMACMD_SYSMEMEL, phBuf[(_pEl)->cPages]))) \
+ : \
+ ((_pEl)+1)
+
+#define VBOXVDMACMD_DMA_BPB_TRANSFER_VRAMSYS_SYS2VRAM 0x00000001
+
+typedef struct VBOXVDMACMD_DMA_BPB_TRANSFER_VRAMSYS
+{
+ uint32_t cTransferPages;
+ uint32_t fFlags;
+ VBOXVIDEOOFFSET offVramBuf;
+ VBOXVDMACMD_SYSMEMEL FirstEl;
+} VBOXVDMACMD_DMA_BPB_TRANSFER_VRAMSYS, *PVBOXVDMACMD_DMA_BPB_TRANSFER_VRAMSYS;
+
+typedef struct VBOXVDMACMD_DMA_BPB_FILL
+{
+ VBOXVIDEOOFFSET offSurf;
+ uint32_t cbFillSize;
+ uint32_t u32FillPattern;
+} VBOXVDMACMD_DMA_BPB_FILL, *PVBOXVDMACMD_DMA_BPB_FILL;
+
+#define VBOXVDMA_CHILD_STATUS_F_CONNECTED 0x01
+#define VBOXVDMA_CHILD_STATUS_F_DISCONNECTED 0x02
+#define VBOXVDMA_CHILD_STATUS_F_ROTATED 0x04
+
+typedef struct VBOXVDMA_CHILD_STATUS
+{
+ uint32_t iChild;
+ uint8_t fFlags;
+ uint8_t u8RotationAngle;
+ uint16_t u16Reserved;
+} VBOXVDMA_CHILD_STATUS, *PVBOXVDMA_CHILD_STATUS;
+
+/* apply the aInfos are applied to all targets, the iTarget is ignored */
+#define VBOXVDMACMD_CHILD_STATUS_IRQ_F_APPLY_TO_ALL 0x00000001
+
+typedef struct VBOXVDMACMD_CHILD_STATUS_IRQ
+{
+ uint32_t cInfos;
+ uint32_t fFlags;
+ VBOXVDMA_CHILD_STATUS aInfos[1];
+} VBOXVDMACMD_CHILD_STATUS_IRQ, *PVBOXVDMACMD_CHILD_STATUS_IRQ;
+
+# pragma pack()
+#endif /* #ifdef VBOX_WITH_VDMA */
+
+#pragma pack(1)
+typedef struct VBOXVDMACMD_CHROMIUM_BUFFER
+{
+ VBOXVIDEOOFFSET offBuffer;
+ uint32_t cbBuffer;
+ uint32_t u32GuestData;
+ uint64_t u64GuestData;
+} VBOXVDMACMD_CHROMIUM_BUFFER, *PVBOXVDMACMD_CHROMIUM_BUFFER;
+
+typedef struct VBOXVDMACMD_CHROMIUM_CMD
+{
+ uint32_t cBuffers;
+ uint32_t u32Reserved;
+ VBOXVDMACMD_CHROMIUM_BUFFER aBuffers[1];
+} VBOXVDMACMD_CHROMIUM_CMD, *PVBOXVDMACMD_CHROMIUM_CMD;
+
+typedef enum
+{
+ VBOXVDMACMD_CHROMIUM_CTL_TYPE_UNKNOWN = 0,
+ VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP,
+ VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN,
+ VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END,
+ VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP_MAINCB,
+ VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRCONNECT,
+ VBOXVDMACMD_CHROMIUM_CTL_TYPE_SIZEHACK = 0x7fffffff
+} VBOXVDMACMD_CHROMIUM_CTL_TYPE;
+
+typedef struct VBOXVDMACMD_CHROMIUM_CTL
+{
+ VBOXVDMACMD_CHROMIUM_CTL_TYPE enmType;
+ uint32_t cbCmd;
+} VBOXVDMACMD_CHROMIUM_CTL, *PVBOXVDMACMD_CHROMIUM_CTL;
+
+
+typedef struct PDMIDISPLAYVBVACALLBACKS *HCRHGSMICMDCOMPLETION;
+typedef DECLCALLBACK(int) FNCRHGSMICMDCOMPLETION(HCRHGSMICMDCOMPLETION hCompletion, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc);
+typedef FNCRHGSMICMDCOMPLETION *PFNCRHGSMICMDCOMPLETION;
+
+/* tells whether 3D backend has some 3D overlay data displayed */
+typedef DECLCALLBACK(bool) FNCROGLHASDATA(void);
+typedef FNCROGLHASDATA *PFNCROGLHASDATA;
+
+/* same as PFNCROGLHASDATA, but for specific screen */
+typedef DECLCALLBACK(bool) FNCROGLHASDATAFORSCREEN(uint32_t i32ScreenID);
+typedef FNCROGLHASDATAFORSCREEN *PFNCROGLHASDATAFORSCREEN;
+
+/* callbacks chrogl gives to main */
+typedef struct CR_MAIN_INTERFACE
+{
+ PFNCROGLHASDATA pfnHasData;
+ PFNCROGLHASDATAFORSCREEN pfnHasDataForScreen;
+} CR_MAIN_INTERFACE;
+
+typedef struct VBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP_MAINCB
+{
+ VBOXVDMACMD_CHROMIUM_CTL Hdr;
+ /*in*/
+ HCRHGSMICMDCOMPLETION hCompletion;
+ PFNCRHGSMICMDCOMPLETION pfnCompletion;
+ /*out*/
+ CR_MAIN_INTERFACE MainInterface;
+} VBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP_MAINCB, *PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP_MAINCB;
+
+typedef struct VBOXCRCON_SERVER *HVBOXCRCON_SERVER;
+typedef struct PDMIDISPLAYVBVACALLBACKS* HVBOXCRCON_CLIENT;
+
+typedef struct VBOXCRCON_3DRGN_CLIENT* HVBOXCRCON_3DRGN_CLIENT;
+typedef struct VBOXCRCON_3DRGN_ASYNCCLIENT* HVBOXCRCON_3DRGN_ASYNCCLIENT;
+
+/* server callbacks */
+/* submit chromium cmd */
+typedef DECLCALLBACK(int) FNVBOXCRCON_SVR_CRCMD(HVBOXCRCON_SERVER hServer, PVBOXVDMACMD_CHROMIUM_CMD pCmd, uint32_t cbCmd);
+typedef FNVBOXCRCON_SVR_CRCMD *PFNVBOXCRCON_SVR_CRCMD;
+
+/* submit chromium control cmd */
+typedef DECLCALLBACK(int) FNVBOXCRCON_SVR_CRCTL(HVBOXCRCON_SERVER hServer, PVBOXVDMACMD_CHROMIUM_CTL pCtl, uint32_t cbCmd);
+typedef FNVBOXCRCON_SVR_CRCTL *PFNVBOXCRCON_SVR_CRCTL;
+
+/* request 3D data.
+ * The protocol is the following:
+ * 1. if there is no 3D data displayed on screen, returns VINF_EOF immediately w/o calling any PFNVBOXCRCON_3DRGN_XXX callbacks
+ * 2. otherwise calls PFNVBOXCRCON_3DRGN_ONSUBMIT, submits the "regions get" request to the CrOpenGL server to process it asynchronously and returns VINF_SUCCESS
+ * 2.a on "regions get" request processing calls PFNVBOXCRCON_3DRGN_BEGIN,
+ * 2.b then PFNVBOXCRCON_3DRGN_REPORT zero or more times for each 3D region,
+ * 2.c and then PFNVBOXCRCON_3DRGN_END
+ * 3. returns VERR_XXX code on failure
+ * */
+typedef DECLCALLBACK(int) FNVBOXCRCON_SVR_3DRGN_GET(HVBOXCRCON_SERVER hServer, HVBOXCRCON_3DRGN_CLIENT hRgnClient, uint32_t idScreen);
+typedef FNVBOXCRCON_SVR_3DRGN_GET *PFNVBOXCRCON_SVR_3DRGN_GET;
+
+/* 3D Regions Client callbacks */
+/* called from the PFNVBOXCRCON_SVR_3DRGN_GET callback in case server has 3D data and is going to process the request asynchronously,
+ * see comments for PFNVBOXCRCON_SVR_3DRGN_GET above */
+typedef DECLCALLBACK(int) FNVBOXCRCON_3DRGN_ONSUBMIT(HVBOXCRCON_3DRGN_CLIENT hRgnClient, uint32_t idScreen, HVBOXCRCON_3DRGN_ASYNCCLIENT *phRgnAsyncClient);
+typedef FNVBOXCRCON_3DRGN_ONSUBMIT *PFNVBOXCRCON_3DRGN_ONSUBMIT;
+
+/* called from the "regions get" command processing thread, to indicate that the "regions get" is started.
+ * see comments for PFNVBOXCRCON_SVR_3DRGN_GET above */
+typedef DECLCALLBACK(int) FNVBOXCRCON_3DRGN_BEGIN(HVBOXCRCON_3DRGN_ASYNCCLIENT hRgnAsyncClient, uint32_t idScreen);
+typedef FNVBOXCRCON_3DRGN_BEGIN *PFNVBOXCRCON_3DRGN_BEGIN;
+
+/* called from the "regions get" command processing thread, to report a 3D region.
+ * see comments for PFNVBOXCRCON_SVR_3DRGN_GET above */
+typedef DECLCALLBACK(int) FNVBOXCRCON_3DRGN_REPORT(HVBOXCRCON_3DRGN_ASYNCCLIENT hRgnAsyncClient, uint32_t idScreen, void *pvData, uint32_t cbStride, const RTRECT *pRect);
+typedef FNVBOXCRCON_3DRGN_REPORT *PFNVBOXCRCON_3DRGN_REPORT;
+
+/* called from the "regions get" command processing thread, to indicate that the "regions get" is completed.
+ * see comments for PFNVBOXCRCON_SVR_3DRGN_GET above */
+typedef DECLCALLBACK(int) FNVBOXCRCON_3DRGN_END(HVBOXCRCON_3DRGN_ASYNCCLIENT hRgnAsyncClient, uint32_t idScreen);
+typedef FNVBOXCRCON_3DRGN_END *PFNVBOXCRCON_3DRGN_END;
+
+
+/* client callbacks */
+/* complete chromium cmd */
+typedef DECLCALLBACK(int) FNVBOXCRCON_CLT_CRCTL_COMPLETE(HVBOXCRCON_CLIENT hClient, PVBOXVDMACMD_CHROMIUM_CTL pCtl, int rc);
+typedef FNVBOXCRCON_CLT_CRCTL_COMPLETE *PFNVBOXCRCON_CLT_CRCTL_COMPLETE;
+
+/* complete chromium control cmd */
+typedef DECLCALLBACK(int) FNVBOXCRCON_CLT_CRCMD_COMPLETE(HVBOXCRCON_CLIENT hClient, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc);
+typedef FNVBOXCRCON_CLT_CRCMD_COMPLETE *PFNVBOXCRCON_CLT_CRCMD_COMPLETE;
+
+typedef struct VBOXCRCON_SERVER_CALLBACKS
+{
+ HVBOXCRCON_SERVER hServer;
+ PFNVBOXCRCON_SVR_CRCMD pfnCrCmd;
+ PFNVBOXCRCON_SVR_CRCTL pfnCrCtl;
+ PFNVBOXCRCON_SVR_3DRGN_GET pfn3DRgnGet;
+} VBOXCRCON_SERVER_CALLBACKS, *PVBOXCRCON_SERVER_CALLBACKS;
+
+typedef struct VBOXCRCON_CLIENT_CALLBACKS
+{
+ HVBOXCRCON_CLIENT hClient;
+ PFNVBOXCRCON_CLT_CRCMD_COMPLETE pfnCrCmdComplete;
+ PFNVBOXCRCON_CLT_CRCTL_COMPLETE pfnCrCtlComplete;
+ PFNVBOXCRCON_3DRGN_ONSUBMIT pfn3DRgnOnSubmit;
+ PFNVBOXCRCON_3DRGN_BEGIN pfn3DRgnBegin;
+ PFNVBOXCRCON_3DRGN_REPORT pfn3DRgnReport;
+ PFNVBOXCRCON_3DRGN_END pfn3DRgnEnd;
+} VBOXCRCON_CLIENT_CALLBACKS, *PVBOXCRCON_CLIENT_CALLBACKS;
+
+/* issued by Main to establish connection between Main and CrOpenGL service */
+typedef struct VBOXVDMACMD_CHROMIUM_CTL_CRCONNECT
+{
+ VBOXVDMACMD_CHROMIUM_CTL Hdr;
+ /*input (filled by Client) :*/
+ /*class VMMDev*/void *pVMMDev;
+ VBOXCRCON_CLIENT_CALLBACKS ClientCallbacks;
+ /*output (filled by Server) :*/
+ VBOXCRCON_SERVER_CALLBACKS ServerCallbacks;
+} VBOXVDMACMD_CHROMIUM_CTL_CRCONNECT, *PVBOXVDMACMD_CHROMIUM_CTL_CRCONNECT;
+
+/* ring command buffer dr */
+#define VBOXCMDVBVA_STATE_SUBMITTED 1
+#define VBOXCMDVBVA_STATE_CANCELLED 2
+#define VBOXCMDVBVA_STATE_IN_PROGRESS 3
+/* the "completed" state is signalled via the ring buffer values */
+
+/* CrHgsmi command */
+#define VBOXCMDVBVA_OPTYPE_CRCMD 1
+/* blit command that does blitting of allocations identified by VRAM offset or host id
+ * for VRAM-offset ones the size and format are same as primary */
+#define VBOXCMDVBVA_OPTYPE_BLT 2
+/* flip */
+#define VBOXCMDVBVA_OPTYPE_FLIP 3
+/* ColorFill */
+#define VBOXCMDVBVA_OPTYPE_CLRFILL 4
+/* allocation paging transfer request */
+#define VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER 5
+/* allocation paging fill request */
+#define VBOXCMDVBVA_OPTYPE_PAGING_FILL 6
+/* same as VBOXCMDVBVA_OPTYPE_NOP, but contains VBOXCMDVBVA_HDR data */
+#define VBOXCMDVBVA_OPTYPE_NOPCMD 7
+/* actual command is stored in guest system memory */
+#define VBOXCMDVBVA_OPTYPE_SYSMEMCMD 8
+/* complex command - i.e. can contain multiple commands
+ * i.e. the VBOXCMDVBVA_OPTYPE_COMPLEXCMD VBOXCMDVBVA_HDR is followed
+ * by one or more VBOXCMDVBVA_HDR commands.
+ * Each command's size is specified in it's VBOXCMDVBVA_HDR's u32FenceID field */
+#define VBOXCMDVBVA_OPTYPE_COMPLEXCMD 9
+
+/* nop - is a one-bit command. The buffer size to skip is determined by VBVA buffer size */
+#define VBOXCMDVBVA_OPTYPE_NOP 0x80
+
+/* u8Flags flags */
+/* transfer from RAM to Allocation */
+#define VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN 0x80
+
+#define VBOXCMDVBVA_OPF_BLT_TYPE_SAMEDIM_A8R8G8B8 0
+#define VBOXCMDVBVA_OPF_BLT_TYPE_GENERIC_A8R8G8B8 1
+#define VBOXCMDVBVA_OPF_BLT_TYPE_OFFPRIMSZFMT_OR_ID 2
+
+#define VBOXCMDVBVA_OPF_BLT_TYPE_MASK 3
+
+
+#define VBOXCMDVBVA_OPF_CLRFILL_TYPE_GENERIC_A8R8G8B8 0
+
+#define VBOXCMDVBVA_OPF_CLRFILL_TYPE_MASK 1
+
+
+/* blit direction is from first operand to second */
+#define VBOXCMDVBVA_OPF_BLT_DIR_IN_2 0x10
+/* operand 1 contains host id */
+#define VBOXCMDVBVA_OPF_OPERAND1_ISID 0x20
+/* operand 2 contains host id */
+#define VBOXCMDVBVA_OPF_OPERAND2_ISID 0x40
+/* primary hint id is src */
+#define VBOXCMDVBVA_OPF_PRIMARY_HINT_SRC 0x80
+
+/* trying to make the header as small as possible,
+ * we'd have pretty few op codes actually, so 8bit is quite enough,
+ * we will be able to extend it in any way. */
+typedef struct VBOXCMDVBVA_HDR
+{
+ /* one VBOXCMDVBVA_OPTYPE_XXX, except NOP, see comments above */
+ uint8_t u8OpCode;
+ /* command-specific
+ * VBOXCMDVBVA_OPTYPE_CRCMD - must be null
+ * VBOXCMDVBVA_OPTYPE_BLT - OR-ed VBOXCMDVBVA_OPF_ALLOC_XXX flags
+ * VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER - must be null
+ * VBOXCMDVBVA_OPTYPE_PAGING_FILL - must be null
+ * VBOXCMDVBVA_OPTYPE_NOPCMD - must be null
+ * VBOXCMDVBVA_OPTYPE_NOP - not applicable (as the entire VBOXCMDVBVA_HDR is not valid) */
+ uint8_t u8Flags;
+ /* one of VBOXCMDVBVA_STATE_XXX*/
+ volatile uint8_t u8State;
+ union
+ {
+ /* result, 0 on success, otherwise contains the failure code TBD */
+ int8_t i8Result;
+ uint8_t u8PrimaryID;
+ } u;
+ union
+ {
+ /* complex command (VBOXCMDVBVA_OPTYPE_COMPLEXCMD) element data */
+ struct
+ {
+ /* command length */
+ uint16_t u16CbCmdHost;
+ /* guest-specific data, host expects it to be NULL */
+ uint16_t u16CbCmdGuest;
+ } complexCmdEl;
+ /* DXGK DDI fence ID */
+ uint32_t u32FenceID;
+ } u2;
+} VBOXCMDVBVA_HDR;
+
+typedef uint32_t VBOXCMDVBVAOFFSET;
+typedef uint64_t VBOXCMDVBVAPHADDR;
+typedef uint32_t VBOXCMDVBVAPAGEIDX;
+
+typedef struct VBOXCMDVBVA_CRCMD_BUFFER
+{
+ uint32_t cbBuffer;
+ VBOXCMDVBVAOFFSET offBuffer;
+} VBOXCMDVBVA_CRCMD_BUFFER;
+
+typedef struct VBOXCMDVBVA_CRCMD_CMD
+{
+ uint32_t cBuffers;
+ VBOXCMDVBVA_CRCMD_BUFFER aBuffers[1];
+} VBOXCMDVBVA_CRCMD_CMD;
+
+typedef struct VBOXCMDVBVA_CRCMD
+{
+ VBOXCMDVBVA_HDR Hdr;
+ VBOXCMDVBVA_CRCMD_CMD Cmd;
+} VBOXCMDVBVA_CRCMD;
+
+typedef struct VBOXCMDVBVA_ALLOCINFO
+{
+ union
+ {
+ VBOXCMDVBVAOFFSET offVRAM;
+ uint32_t id;
+ } u;
+} VBOXCMDVBVA_ALLOCINFO;
+
+typedef struct VBOXCMDVBVA_ALLOCDESC
+{
+ VBOXCMDVBVA_ALLOCINFO Info;
+ uint16_t u16Width;
+ uint16_t u16Height;
+} VBOXCMDVBVA_ALLOCDESC;
+
+typedef struct VBOXCMDVBVA_RECT
+{
+ /** Coordinates of affected rectangle. */
+ int16_t xLeft;
+ int16_t yTop;
+ int16_t xRight;
+ int16_t yBottom;
+} VBOXCMDVBVA_RECT;
+
+typedef struct VBOXCMDVBVA_POINT
+{
+ int16_t x;
+ int16_t y;
+} VBOXCMDVBVA_POINT;
+
+typedef struct VBOXCMDVBVA_BLT_HDR
+{
+ VBOXCMDVBVA_HDR Hdr;
+ VBOXCMDVBVA_POINT Pos;
+} VBOXCMDVBVA_BLT_HDR;
+
+typedef struct VBOXCMDVBVA_BLT_PRIMARY
+{
+ VBOXCMDVBVA_BLT_HDR Hdr;
+ VBOXCMDVBVA_ALLOCINFO alloc;
+ /* the rects count is determined from the command size */
+ VBOXCMDVBVA_RECT aRects[1];
+} VBOXCMDVBVA_BLT_PRIMARY;
+
+typedef struct VBOXCMDVBVA_BLT_PRIMARY_GENERIC_A8R8G8B8
+{
+ VBOXCMDVBVA_BLT_HDR Hdr;
+ VBOXCMDVBVA_ALLOCDESC alloc;
+ /* the rects count is determined from the command size */
+ VBOXCMDVBVA_RECT aRects[1];
+} VBOXCMDVBVA_BLT_PRIMARY_GENERIC_A8R8G8B8;
+
+typedef struct VBOXCMDVBVA_BLT_OFFPRIMSZFMT_OR_ID
+{
+ VBOXCMDVBVA_BLT_HDR Hdr;
+ VBOXCMDVBVA_ALLOCINFO alloc;
+ uint32_t id;
+ /* the rects count is determined from the command size */
+ VBOXCMDVBVA_RECT aRects[1];
+} VBOXCMDVBVA_BLT_OFFPRIMSZFMT_OR_ID;
+
+typedef struct VBOXCMDVBVA_BLT_SAMEDIM_A8R8G8B8
+{
+ VBOXCMDVBVA_BLT_HDR Hdr;
+ VBOXCMDVBVA_ALLOCDESC alloc1;
+ VBOXCMDVBVA_ALLOCINFO info2;
+ /* the rects count is determined from the command size */
+ VBOXCMDVBVA_RECT aRects[1];
+} VBOXCMDVBVA_BLT_SAMEDIM_A8R8G8B8;
+
+typedef struct VBOXCMDVBVA_BLT_GENERIC_A8R8G8B8
+{
+ VBOXCMDVBVA_BLT_HDR Hdr;
+ VBOXCMDVBVA_ALLOCDESC alloc1;
+ VBOXCMDVBVA_ALLOCDESC alloc2;
+ /* the rects count is determined from the command size */
+ VBOXCMDVBVA_RECT aRects[1];
+} VBOXCMDVBVA_BLT_GENERIC_A8R8G8B8;
+
+#define VBOXCMDVBVA_SIZEOF_BLTSTRUCT_MAX (sizeof (VBOXCMDVBVA_BLT_GENERIC_A8R8G8B8))
+
+typedef struct VBOXCMDVBVA_FLIP
+{
+ VBOXCMDVBVA_HDR Hdr;
+ VBOXCMDVBVA_ALLOCINFO src;
+ VBOXCMDVBVA_RECT aRects[1];
+} VBOXCMDVBVA_FLIP;
+
+#define VBOXCMDVBVA_SIZEOF_FLIPSTRUCT_MIN (RT_OFFSETOF(VBOXCMDVBVA_FLIP, aRects))
+
+typedef struct VBOXCMDVBVA_CLRFILL_HDR
+{
+ VBOXCMDVBVA_HDR Hdr;
+ uint32_t u32Color;
+} VBOXCMDVBVA_CLRFILL_HDR;
+
+typedef struct VBOXCMDVBVA_CLRFILL_PRIMARY
+{
+ VBOXCMDVBVA_CLRFILL_HDR Hdr;
+ VBOXCMDVBVA_RECT aRects[1];
+} VBOXCMDVBVA_CLRFILL_PRIMARY;
+
+typedef struct VBOXCMDVBVA_CLRFILL_GENERIC_A8R8G8B8
+{
+ VBOXCMDVBVA_CLRFILL_HDR Hdr;
+ VBOXCMDVBVA_ALLOCDESC dst;
+ VBOXCMDVBVA_RECT aRects[1];
+} VBOXCMDVBVA_CLRFILL_GENERIC_A8R8G8B8;
+
+#define VBOXCMDVBVA_SIZEOF_CLRFILLSTRUCT_MAX (sizeof (VBOXCMDVBVA_CLRFILL_GENERIC_A8R8G8B8))
+
+#if 0
+#define VBOXCMDVBVA_SYSMEMEL_CPAGES_MAX 0x1000
+
+typedef struct VBOXCMDVBVA_SYSMEMEL
+{
+ uint32_t cPagesAfterFirst : 12;
+ uint32_t iPage1 : 20;
+ uint32_t iPage2;
+} VBOXCMDVBVA_SYSMEMEL;
+#endif
+
+typedef struct VBOXCMDVBVA_PAGING_TRANSFER_DATA
+{
+ /* for now can only contain offVRAM.
+ * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
+ VBOXCMDVBVA_ALLOCINFO Alloc;
+ VBOXCMDVBVAPAGEIDX aPageNumbers[1];
+} VBOXCMDVBVA_PAGING_TRANSFER_DATA;
+
+typedef struct VBOXCMDVBVA_PAGING_TRANSFER
+{
+ VBOXCMDVBVA_HDR Hdr;
+ VBOXCMDVBVA_PAGING_TRANSFER_DATA Data;
+} VBOXCMDVBVA_PAGING_TRANSFER;
+
+typedef struct VBOXCMDVBVA_PAGING_FILL
+{
+ VBOXCMDVBVA_HDR Hdr;
+ uint32_t u32CbFill;
+ uint32_t u32Pattern;
+ /* paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
+ VBOXCMDVBVAOFFSET offVRAM;
+} VBOXCMDVBVA_PAGING_FILL;
+
+typedef struct VBOXCMDVBVA_SYSMEMCMD
+{
+ VBOXCMDVBVA_HDR Hdr;
+ VBOXCMDVBVAPHADDR phCmd;
+} VBOXCMDVBVA_SYSMEMCMD;
+
+#define VBOXCMDVBVACTL_TYPE_ENABLE 1
+#define VBOXCMDVBVACTL_TYPE_3DCTL 2
+#define VBOXCMDVBVACTL_TYPE_RESIZE 3
+
+typedef struct VBOXCMDVBVA_CTL
+{
+ uint32_t u32Type;
+ int32_t i32Result;
+} VBOXCMDVBVA_CTL;
+
+typedef struct VBOXCMDVBVA_CTL_ENABLE
+{
+ VBOXCMDVBVA_CTL Hdr;
+ VBVAENABLE Enable;
+} VBOXCMDVBVA_CTL_ENABLE;
+
+#define VBOXCMDVBVA_SCREENMAP_SIZE(_elType) ((VBOX_VIDEO_MAX_SCREENS + sizeof (_elType) - 1) / sizeof (_elType))
+#define VBOXCMDVBVA_SCREENMAP_DECL(_elType, _name) _elType _name[VBOXCMDVBVA_SCREENMAP_SIZE(_elType)]
+
+typedef struct VBOXCMDVBVA_RESIZE_ENTRY
+{
+ VBVAINFOSCREEN Screen;
+ VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
+} VBOXCMDVBVA_RESIZE_ENTRY;
+
+typedef struct VBOXCMDVBVA_RESIZE
+{
+ VBOXCMDVBVA_RESIZE_ENTRY aEntries[1];
+} VBOXCMDVBVA_RESIZE;
+
+typedef struct VBOXCMDVBVA_CTL_RESIZE
+{
+ VBOXCMDVBVA_CTL Hdr;
+ VBOXCMDVBVA_RESIZE Resize;
+} VBOXCMDVBVA_CTL_RESIZE;
+
+#define VBOXCMDVBVA3DCTL_TYPE_CONNECT 1
+#define VBOXCMDVBVA3DCTL_TYPE_DISCONNECT 2
+#define VBOXCMDVBVA3DCTL_TYPE_CMD 3
+
+typedef struct VBOXCMDVBVA_3DCTL
+{
+ uint32_t u32Type;
+ uint32_t u32CmdClientId;
+} VBOXCMDVBVA_3DCTL;
+
+typedef struct VBOXCMDVBVA_3DCTL_CONNECT
+{
+ VBOXCMDVBVA_3DCTL Hdr;
+ uint32_t u32MajorVersion;
+ uint32_t u32MinorVersion;
+ uint64_t u64Pid;
+} VBOXCMDVBVA_3DCTL_CONNECT;
+
+typedef struct VBOXCMDVBVA_3DCTL_CMD
+{
+ VBOXCMDVBVA_3DCTL Hdr;
+ VBOXCMDVBVA_HDR Cmd;
+} VBOXCMDVBVA_3DCTL_CMD;
+
+typedef struct VBOXCMDVBVA_CTL_3DCTL_CMD
+{
+ VBOXCMDVBVA_CTL Hdr;
+ VBOXCMDVBVA_3DCTL_CMD Cmd;
+} VBOXCMDVBVA_CTL_3DCTL_CMD;
+
+typedef struct VBOXCMDVBVA_CTL_3DCTL_CONNECT
+{
+ VBOXCMDVBVA_CTL Hdr;
+ VBOXCMDVBVA_3DCTL_CONNECT Connect;
+} VBOXCMDVBVA_CTL_3DCTL_CONNECT;
+
+typedef struct VBOXCMDVBVA_CTL_3DCTL
+{
+ VBOXCMDVBVA_CTL Hdr;
+ VBOXCMDVBVA_3DCTL Ctl;
+} VBOXCMDVBVA_CTL_3DCTL;
+
+#pragma pack()
+
+
+#ifdef VBOXVDMA_WITH_VBVA
+# pragma pack(1)
+
+typedef struct VBOXVDMAVBVACMD
+{
+ HGSMIOFFSET offCmd;
+} VBOXVDMAVBVACMD;
+
+#pragma pack()
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,180 @@
+/** @file
+ * VBox Host Guest Shared Memory Interface (HGSMI).
+ * OS-independent guest structures.
+ */
+
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef ___VBox_Graphics_VBoxVideoGuest_h___
+#define ___VBox_Graphics_VBoxVideoGuest_h___
+
+#include <HGSMIBase.h>
+#include <VBoxVideo.h>
+#include <VBoxVideoIPRT.h>
+
+RT_C_DECLS_BEGIN
+
+/**
+ * Structure grouping the context needed for sending graphics acceleration
+ * information to the host via VBVA. Each screen has its own VBVA buffer.
+ */
+typedef struct VBVABUFFERCONTEXT
+{
+ /** Offset of the buffer in the VRAM section for the screen */
+ uint32_t offVRAMBuffer;
+ /** Length of the buffer in bytes */
+ uint32_t cbBuffer;
+ /** This flag is set if we wrote to the buffer faster than the host could
+ * read it. */
+ bool fHwBufferOverflow;
+ /** The VBVA record that we are currently preparing for the host, NULL if
+ * none. */
+ struct VBVARECORD *pRecord;
+ /** Pointer to the VBVA buffer mapped into the current address space. Will
+ * be NULL if VBVA is not enabled. */
+ struct VBVABUFFER *pVBVA;
+} VBVABUFFERCONTEXT, *PVBVABUFFERCONTEXT;
+
+/** @name Base HGSMI APIs
+ * @{ */
+
+DECLHIDDEN(bool) VBoxHGSMIIsSupported(void);
+DECLHIDDEN(void) VBoxHGSMIGetBaseMappingInfo(uint32_t cbVRAM,
+ uint32_t *poffVRAMBaseMapping,
+ uint32_t *pcbMapping,
+ uint32_t *poffGuestHeapMemory,
+ uint32_t *pcbGuestHeapMemory,
+ uint32_t *poffHostFlags);
+DECLHIDDEN(int) VBoxHGSMIReportFlagsLocation(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ HGSMIOFFSET offLocation);
+DECLHIDDEN(int) VBoxHGSMISendCapsInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t fCaps);
+DECLHIDDEN(void) VBoxHGSMIGetHostAreaMapping(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t cbVRAM,
+ uint32_t offVRAMBaseMapping,
+ uint32_t *poffVRAMHostArea,
+ uint32_t *pcbHostArea);
+DECLHIDDEN(int) VBoxHGSMISendHostCtxInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ HGSMIOFFSET offVRAMFlagsLocation,
+ uint32_t fCaps,
+ uint32_t offVRAMHostArea,
+ uint32_t cbHostArea);
+DECLHIDDEN(int) VBoxQueryConfHGSMI(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t u32Index, uint32_t *pulValue);
+DECLHIDDEN(int) VBoxQueryConfHGSMIDef(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t u32Index, uint32_t u32DefValue, uint32_t *pulValue);
+DECLHIDDEN(int) VBoxHGSMIUpdatePointerShape(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t fFlags,
+ uint32_t cHotX,
+ uint32_t cHotY,
+ uint32_t cWidth,
+ uint32_t cHeight,
+ uint8_t *pPixels,
+ uint32_t cbLength);
+DECLHIDDEN(int) VBoxHGSMICursorPosition(PHGSMIGUESTCOMMANDCONTEXT pCtx, bool fReportPosition, uint32_t x, uint32_t y,
+ uint32_t *pxHost, uint32_t *pyHost);
+
+/** @} */
+
+/** @name VBVA APIs
+ * @{ */
+DECLHIDDEN(bool) VBoxVBVAEnable(PVBVABUFFERCONTEXT pCtx,
+ PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
+ struct VBVABUFFER *pVBVA, int32_t cScreen);
+DECLHIDDEN(void) VBoxVBVADisable(PVBVABUFFERCONTEXT pCtx,
+ PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
+ int32_t cScreen);
+DECLHIDDEN(bool) VBoxVBVABufferBeginUpdate(PVBVABUFFERCONTEXT pCtx,
+ PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx);
+DECLHIDDEN(void) VBoxVBVABufferEndUpdate(PVBVABUFFERCONTEXT pCtx);
+DECLHIDDEN(bool) VBoxVBVAWrite(PVBVABUFFERCONTEXT pCtx,
+ PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
+ const void *pv, uint32_t cb);
+DECLHIDDEN(bool) VBoxVBVAOrderSupported(PVBVABUFFERCONTEXT pCtx, unsigned code);
+DECLHIDDEN(void) VBoxVBVASetupBufferContext(PVBVABUFFERCONTEXT pCtx,
+ uint32_t offVRAMBuffer,
+ uint32_t cbBuffer);
+
+/** @} */
+
+/** @name Modesetting APIs
+ * @{ */
+
+DECLHIDDEN(uint32_t) VBoxHGSMIGetMonitorCount(PHGSMIGUESTCOMMANDCONTEXT pCtx);
+DECLHIDDEN(uint32_t) VBoxVideoGetVRAMSize(void);
+DECLHIDDEN(bool) VBoxVideoAnyWidthAllowed(void);
+DECLHIDDEN(uint16_t) VBoxHGSMIGetScreenFlags(PHGSMIGUESTCOMMANDCONTEXT pCtx);
+
+struct VBVAINFOVIEW;
+/**
+ * Callback funtion called from @a VBoxHGSMISendViewInfo to initialise
+ * the @a VBVAINFOVIEW structure for each screen.
+ *
+ * @returns iprt status code
+ * @param pvData context data for the callback, passed to @a
+ * VBoxHGSMISendViewInfo along with the callback
+ * @param pInfo array of @a VBVAINFOVIEW structures to be filled in
+ * @todo explicitly pass the array size
+ */
+typedef DECLCALLBACK(int) FNHGSMIFILLVIEWINFO(void *pvData,
+ struct VBVAINFOVIEW *pInfo,
+ uint32_t cViews);
+/** Pointer to a FNHGSMIFILLVIEWINFO callback */
+typedef FNHGSMIFILLVIEWINFO *PFNHGSMIFILLVIEWINFO;
+
+DECLHIDDEN(int) VBoxHGSMISendViewInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t u32Count,
+ PFNHGSMIFILLVIEWINFO pfnFill,
+ void *pvData);
+DECLHIDDEN(void) VBoxVideoSetModeRegisters(uint16_t cWidth, uint16_t cHeight,
+ uint16_t cVirtWidth, uint16_t cBPP,
+ uint16_t fFlags,
+ uint16_t cx, uint16_t cy);
+DECLHIDDEN(bool) VBoxVideoGetModeRegisters(uint16_t *pcWidth,
+ uint16_t *pcHeight,
+ uint16_t *pcVirtWidth,
+ uint16_t *pcBPP,
+ uint16_t *pfFlags);
+DECLHIDDEN(void) VBoxVideoDisableVBE(void);
+DECLHIDDEN(void) VBoxHGSMIProcessDisplayInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ uint32_t cDisplay,
+ int32_t cOriginX,
+ int32_t cOriginY,
+ uint32_t offStart,
+ uint32_t cbPitch,
+ uint32_t cWidth,
+ uint32_t cHeight,
+ uint16_t cBPP,
+ uint16_t fFlags);
+DECLHIDDEN(int) VBoxHGSMIUpdateInputMapping(PHGSMIGUESTCOMMANDCONTEXT pCtx, int32_t cOriginX, int32_t cOriginY,
+ uint32_t cWidth, uint32_t cHeight);
+DECLHIDDEN(int) VBoxHGSMIGetModeHints(PHGSMIGUESTCOMMANDCONTEXT pCtx,
+ unsigned cScreens, VBVAMODEHINT *paHints);
+
+/** @} */
+
+RT_C_DECLS_END
+
+#endif
+
new file mode 100644
@@ -0,0 +1,137 @@
+/** @file
+ * VirtualBox Video driver, common code - iprt and VirtualBox macros and
+ * definitions.
+ */
+
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ___VBox_VBoxVideoIPRT_h
+#define ___VBox_VBoxVideoIPRT_h
+
+#include <asm/io.h>
+#include <linux/string.h>
+#include <linux/version.h>
+
+/** @name VirtualBox error macros
+ * @{ */
+
+#define VINF_SUCCESS 0
+#define VERR_INVALID_PARAMETER (-2)
+#define VERR_INVALID_POINTER (-6)
+#define VERR_NO_MEMORY (-8)
+#define VERR_NOT_IMPLEMENTED (-12)
+#define VERR_INVALID_FUNCTION (-36)
+#define VERR_NOT_SUPPORTED (-37)
+#define VERR_TOO_MUCH_DATA (-42)
+#define VERR_INVALID_STATE (-79)
+#define VERR_OUT_OF_RESOURCES (-80)
+#define VERR_ALREADY_EXISTS (-105)
+#define VERR_INTERNAL_ERROR (-225)
+
+#define RT_SUCCESS_NP(rc) ( (int)(rc) >= VINF_SUCCESS )
+#define RT_SUCCESS(rc) ( likely(RT_SUCCESS_NP(rc)) )
+#define RT_FAILURE(rc) ( unlikely(!RT_SUCCESS_NP(rc)) )
+
+/** @} */
+
+/** @name VirtualBox helper functions
+ * @{ */
+
+#define ASMCompilerBarrier() mb()
+
+/** @} */
+
+/** @name VirtualBox assertions
+ * @{ */
+
+#define Assert(a) WARN_ON_ONCE(!(a))
+#define AssertPtr(a) WARN_ON_ONCE(!(a))
+#define AssertReturnVoid(a) do { if (WARN_ON_ONCE(!(a))) return; } while(0)
+#define AssertRC(a) WARN_ON_ONCE(RT_FAILURE(a))
+#define AssertPtrNullReturnVoid(a) do {} while(0)
+#define AssertPtrReturnVoid(a) do { if (WARN_ON_ONCE(!(a))) return; } while(0)
+
+extern int RTASSERTVAR[1];
+#define AssertCompile(expr) \
+ extern int RTASSERTVAR[1] __attribute__((__unused__)), \
+ RTASSERTVAR[(expr) ? 1 : 0] __attribute__((__unused__))
+#define AssertCompileSize(type, size) \
+ AssertCompile(sizeof(type) == (size))
+
+#define VALID_PTR(p) (p != NULL)
+
+/** @} */
+
+/** @name Port I/O helpers
+ * @{ */
+/** Write an 8-bit value to an I/O port. */
+#define VBVO_PORT_WRITE_U8(Port, Value) \
+ outb(Value, Port)
+/** Write a 16-bit value to an I/O port. */
+#define VBVO_PORT_WRITE_U16(Port, Value) \
+ outw(Value, Port)
+/** Write a 32-bit value to an I/O port. */
+#define VBVO_PORT_WRITE_U32(Port, Value) \
+ outl(Value, Port)
+/** Read an 8-bit value from an I/O port. */
+#define VBVO_PORT_READ_U8(Port) \
+ inb(Port)
+/** Read a 16-bit value from an I/O port. */
+#define VBVO_PORT_READ_U16(Port) \
+ inw(Port)
+/** Read a 32-bit value from an I/O port. */
+#define VBVO_PORT_READ_U32(Port) \
+ inl(Port)
+
+/** @} */
+
+/** @name types for VirtualBox OS-independent code
+ * @{ */
+
+/* HGSMI uses 32 bit offsets and sizes. */
+typedef uint32_t HGSMISIZE;
+typedef uint32_t HGSMIOFFSET;
+
+typedef void RTRECT;
+
+#define UINT32_C(val) (val ## U)
+#define UINT32_MAX UINT32_C(0xffffffff)
+
+/** @} */
+
+/** @name iprt/desc.h replacement macros
+ * @{ */
+
+#define RT_C_DECLS_BEGIN
+#define RT_C_DECLS_END
+#define DECLCALLBACK(type) type
+#define DECLCALLBACKMEMBER(type, name) type (*name)
+#define DECLHIDDEN(type) __attribute__((visibility("hidden"))) type
+#define DECLINLINE(type) static __inline__ type
+#define RT_BOOL(val) (!!(val))
+#define RT_BIT BIT
+#define _1K 0x00000400
+
+/** @} */
+
+#endif /* ___VBox_VBoxVideoIPRT_h */
new file mode 100644
@@ -0,0 +1,85 @@
+/** @file
+ * VirtualBox graphics card port I/O definitions
+ */
+
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ___VBox_Graphics_VBoxVideoVBE_h
+#define ___VBox_Graphics_VBoxVideoVBE_h
+
+/* GUEST <-> HOST Communication API */
+
+/** @todo FIXME: Either dynamicly ask host for this or put somewhere high in
+ * physical memory like 0xE0000000. */
+
+#define VBE_DISPI_BANK_ADDRESS 0xA0000
+#define VBE_DISPI_BANK_SIZE_KB 64
+
+#define VBE_DISPI_MAX_XRES 16384
+#define VBE_DISPI_MAX_YRES 16384
+#define VBE_DISPI_MAX_BPP 32
+
+#define VBE_DISPI_IOPORT_INDEX 0x01CE
+#define VBE_DISPI_IOPORT_DATA 0x01CF
+
+#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8
+#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9
+
+#define VBE_DISPI_INDEX_ID 0x0
+#define VBE_DISPI_INDEX_XRES 0x1
+#define VBE_DISPI_INDEX_YRES 0x2
+#define VBE_DISPI_INDEX_BPP 0x3
+#define VBE_DISPI_INDEX_ENABLE 0x4
+#define VBE_DISPI_INDEX_BANK 0x5
+#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
+#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
+#define VBE_DISPI_INDEX_X_OFFSET 0x8
+#define VBE_DISPI_INDEX_Y_OFFSET 0x9
+#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa
+#define VBE_DISPI_INDEX_FB_BASE_HI 0xb
+
+#define VBE_DISPI_ID0 0xB0C0
+#define VBE_DISPI_ID1 0xB0C1
+#define VBE_DISPI_ID2 0xB0C2
+#define VBE_DISPI_ID3 0xB0C3
+#define VBE_DISPI_ID4 0xB0C4
+
+#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00
+/* The VBOX interface id. Indicates support for VBVA shared memory interface. */
+#define VBE_DISPI_ID_HGSMI 0xBE01
+#define VBE_DISPI_ID_ANYX 0xBE02
+
+#define VBE_DISPI_DISABLED 0x00
+#define VBE_DISPI_ENABLED 0x01
+#define VBE_DISPI_GETCAPS 0x02
+#define VBE_DISPI_8BIT_DAC 0x20
+/** @note this definition is a BOCHS legacy, used only in the video BIOS
+ * code and ignored by the emulated hardware. */
+#define VBE_DISPI_LFB_ENABLED 0x40
+#define VBE_DISPI_NOCLEARMEM 0x80
+
+#define VGA_PORT_HGSMI_HOST 0x3b0
+#define VGA_PORT_HGSMI_GUEST 0x3d0
+
+#endif /* !___VBox_Graphics_VBoxVideoVBE_h */
+
new file mode 100644
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_drv.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#include "vbox_drv.h"
+
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/vt_kern.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+int vbox_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, vbox_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static const struct pci_device_id pciidlist[] = {
+ { 0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0, 0, 0},
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void vbox_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static int vbox_drm_freeze(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+
+ pci_save_state(dev->pdev);
+
+ console_lock();
+ vbox_fbdev_set_suspend(dev, 1);
+ console_unlock();
+
+ return 0;
+}
+
+static int vbox_drm_thaw(struct drm_device *dev)
+{
+ drm_mode_config_reset(dev);
+ drm_helper_resume_force_mode(dev);
+
+ console_lock();
+ vbox_fbdev_set_suspend(dev, 0);
+ console_unlock();
+
+ return 0;
+}
+
+static int vbox_drm_resume(struct drm_device *dev)
+{
+ int ret;
+
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ ret = vbox_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+
+ return 0;
+}
+
+static int vbox_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ int error;
+
+ error = vbox_drm_freeze(ddev);
+ if (error)
+ return error;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int vbox_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+ return vbox_drm_resume(ddev);
+}
+
+static int vbox_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ if (!ddev || !ddev->dev_private)
+ return -ENODEV;
+
+ return vbox_drm_freeze(ddev);
+}
+
+static int vbox_pm_thaw(struct device *dev)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+ return vbox_drm_thaw(ddev);
+}
+
+static int vbox_pm_poweroff(struct device *dev)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+ return vbox_drm_freeze(ddev);
+}
+
+static const struct dev_pm_ops vbox_pm_ops = {
+ .suspend = vbox_pm_suspend,
+ .resume = vbox_pm_resume,
+ .freeze = vbox_pm_freeze,
+ .thaw = vbox_pm_thaw,
+ .poweroff = vbox_pm_poweroff,
+ .restore = vbox_pm_resume,
+};
+
+static struct pci_driver vbox_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = vbox_pci_probe,
+ .remove = vbox_pci_remove,
+ .driver.pm = &vbox_pm_ops,
+};
+
+static const struct file_operations vbox_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = vbox_mmap,
+ .poll = drm_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .read = drm_read,
+};
+
+static int vbox_master_set(struct drm_device *dev,
+ struct drm_file *file_priv, bool from_open)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ /*
+ * We do not yet know whether the new owner can handle hotplug, so we
+ * do not advertise dynamic modes on the first query and send a
+ * tentative hotplug notification after that to see if they query again.
+ */
+ vbox->initial_mode_queried = false;
+
+ mutex_lock(&vbox->hw_mutex);
+ /*
+ * Disable VBVA when someone releases master in case the next person
+ * tries tries to do VESA.
+ */
+ /** @todo work out if anyone is likely to and whether it will work. */
+ /*
+ * Update: we also disable it because if the new master does not do
+ * dirty rectangle reporting (e.g. old versions of Plymouth) then at
+ * least the first screen will still be updated. We enable it as soon
+ * as we receive a dirty rectangle report.
+ */
+ vbox_disable_accel(vbox);
+ mutex_unlock(&vbox->hw_mutex);
+
+ return 0;
+}
+
+static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ /* See vbox_master_set() */
+ vbox->initial_mode_queried = false;
+
+ mutex_lock(&vbox->hw_mutex);
+ vbox_disable_accel(vbox);
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static struct drm_driver driver = {
+ .driver_features =
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+ DRIVER_PRIME,
+ .dev_priv_size = 0,
+
+ .load = vbox_driver_load,
+ .unload = vbox_driver_unload,
+ .lastclose = vbox_driver_lastclose,
+ .master_set = vbox_master_set,
+ .master_drop = vbox_master_drop,
+ .set_busid = drm_pci_set_busid,
+
+ .fops = &vbox_fops,
+ .irq_handler = vbox_irq_handler,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_free_object = vbox_gem_free_object,
+ .dumb_create = vbox_dumb_create,
+ .dumb_map_offset = vbox_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = vbox_gem_prime_pin,
+ .gem_prime_unpin = vbox_gem_prime_unpin,
+ .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
+ .gem_prime_vmap = vbox_gem_prime_vmap,
+ .gem_prime_vunmap = vbox_gem_prime_vunmap,
+ .gem_prime_mmap = vbox_gem_prime_mmap,
+};
+
+static int __init vbox_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && vbox_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (vbox_modeset == 0)
+ return -EINVAL;
+
+ return drm_pci_init(&driver, &vbox_pci_driver);
+}
+
+static void __exit vbox_exit(void)
+{
+ drm_pci_exit(&driver, &vbox_pci_driver);
+}
+
+module_init(vbox_init);
+module_exit(vbox_exit);
+
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
new file mode 100644
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_drv.h
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#ifndef __VBOX_DRV_H__
+#define __VBOX_DRV_H__
+
+#define LOG_GROUP LOG_GROUP_DEV_VGA
+
+#include <VBoxVideoGuest.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+
+#define DRIVER_NAME "vboxvideo"
+#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
+#define DRIVER_DATE "20130823"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define VBOX_MAX_CURSOR_WIDTH 64
+#define VBOX_MAX_CURSOR_HEIGHT 64
+#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT)
+#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8)
+
+#define VBOX_MAX_SCREENS 32
+
+#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \
+ VBVA_ADAPTER_INFORMATION_SIZE)
+#define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE
+#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \
+ sizeof(HGSMIHOSTFLAGS))
+#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
+
+struct vbox_fbdev;
+
+struct vbox_private {
+ struct drm_device *dev;
+
+ u8 __iomem *guest_heap;
+ u8 __iomem *vbva_buffers;
+ struct gen_pool *guest_pool;
+ struct VBVABUFFERCONTEXT *vbva_info;
+ bool any_pitch;
+ unsigned int num_crtcs;
+ /** Amount of available VRAM, including space used for buffers. */
+ u32 full_vram_size;
+ /** Amount of available VRAM, not including space used for buffers. */
+ u32 available_vram_size;
+ /** Array of structures for receiving mode hints. */
+ VBVAMODEHINT *last_mode_hints;
+
+ struct vbox_fbdev *fbdev;
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ bool mm_initialised;
+ } ttm;
+
+ struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
+ bool isr_installed;
+ /**
+ * We decide whether or not user-space supports display hot-plug
+ * depending on whether they react to a hot-plug event after the initial
+ * mode query.
+ */
+ bool initial_mode_queried;
+ struct work_struct hotplug_work;
+ u32 input_mapping_width;
+ u32 input_mapping_height;
+ /**
+ * Is user-space using an X.Org-style layout of one large frame-buffer
+ * encompassing all screen ones or is the fbdev console active?
+ */
+ bool single_framebuffer;
+ u32 cursor_width;
+ u32 cursor_height;
+ u32 cursor_hot_x;
+ u32 cursor_hot_y;
+ size_t cursor_data_size;
+ u8 cursor_data[CURSOR_DATA_SIZE];
+};
+
+#undef CURSOR_PIXEL_COUNT
+#undef CURSOR_DATA_SIZE
+
+int vbox_driver_load(struct drm_device *dev, unsigned long flags);
+void vbox_driver_unload(struct drm_device *dev);
+void vbox_driver_lastclose(struct drm_device *dev);
+
+struct vbox_gem_object;
+
+#ifndef VGA_PORT_HGSMI_HOST
+#define VGA_PORT_HGSMI_HOST 0x3b0
+#define VGA_PORT_HGSMI_GUEST 0x3d0
+#endif
+
+struct vbox_connector {
+ struct drm_connector base;
+ char name[32];
+ struct vbox_crtc *vbox_crtc;
+ struct {
+ u16 width;
+ u16 height;
+ bool disconnected;
+ } mode_hint;
+};
+
+struct vbox_crtc {
+ struct drm_crtc base;
+ bool blanked;
+ bool disconnected;
+ unsigned int crtc_id;
+ u32 fb_offset;
+ bool cursor_enabled;
+ u16 x_hint;
+ u16 y_hint;
+};
+
+struct vbox_encoder {
+ struct drm_encoder base;
+};
+
+struct vbox_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct vbox_fbdev {
+ struct drm_fb_helper helper;
+ struct vbox_framebuffer afb;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
+};
+
+#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
+#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
+#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
+#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
+
+int vbox_mode_init(struct drm_device *dev);
+void vbox_mode_fini(struct drm_device *dev);
+
+#define DRM_MODE_FB_CMD drm_mode_fb_cmd2
+#define CRTC_FB(crtc) ((crtc)->primary->fb)
+
+void vbox_enable_accel(struct vbox_private *vbox);
+void vbox_disable_accel(struct vbox_private *vbox);
+void vbox_report_caps(struct vbox_private *vbox);
+
+void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects);
+
+int vbox_framebuffer_init(struct drm_device *dev,
+ struct vbox_framebuffer *vbox_fb,
+ const struct DRM_MODE_FB_CMD *mode_cmd,
+ struct drm_gem_object *obj);
+
+int vbox_fbdev_init(struct drm_device *dev);
+void vbox_fbdev_fini(struct drm_device *dev);
+void vbox_fbdev_set_suspend(struct drm_device *dev, int state);
+void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr);
+
+struct vbox_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ struct ttm_place placements[3];
+ int pin_count;
+};
+
+#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
+
+static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vbox_bo, bo);
+}
+
+#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
+
+int vbox_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+void vbox_gem_free_object(struct drm_gem_object *obj);
+int vbox_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle, u64 *offset);
+
+#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT)
+
+int vbox_mm_init(struct vbox_private *vbox);
+void vbox_mm_fini(struct vbox_private *vbox);
+
+int vbox_bo_create(struct drm_device *dev, int size, int align,
+ u32 flags, struct vbox_bo **pvboxbo);
+
+int vbox_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel, struct drm_gem_object **obj);
+
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int vbox_bo_unpin(struct vbox_bo *bo);
+
+static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
+ if (ret) {
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+static inline void vbox_bo_unreserve(struct vbox_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+void vbox_ttm_placement(struct vbox_bo *bo, int domain);
+int vbox_bo_push_sysram(struct vbox_bo *bo);
+int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* vbox_prime.c */
+int vbox_gem_prime_pin(struct drm_gem_object *obj);
+void vbox_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *vbox_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table);
+void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
+void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int vbox_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *area);
+
+/* vbox_irq.c */
+int vbox_irq_init(struct vbox_private *vbox);
+void vbox_irq_fini(struct vbox_private *vbox);
+void vbox_report_hotplug(struct vbox_private *vbox);
+irqreturn_t vbox_irq_handler(int irq, void *arg);
+
+#endif
new file mode 100644
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_fb.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ */
+/* Include from most specific to most general to be able to override things. */
+#include "vbox_drv.h"
+#include <VBoxVideo.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include "vbox_drv.h"
+
+#define VBOX_DIRTY_DELAY (HZ / 30)
+/**
+ * Tell the host about dirty rectangles to update.
+ */
+static void vbox_dirty_update(struct vbox_fbdev *fbdev,
+ int x, int y, int width, int height)
+{
+ struct drm_gem_object *obj;
+ struct vbox_bo *bo;
+ int ret = -EBUSY;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
+ struct drm_clip_rect rect;
+
+ obj = fbdev->afb.obj;
+ bo = gem_to_vbox_bo(obj);
+
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
+ if (drm_can_sleep())
+ ret = vbox_bo_reserve(bo, true);
+ if (ret) {
+ if (ret != -EBUSY)
+ return;
+
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&fbdev->dirty_lock, flags);
+
+ if (fbdev->y1 < y)
+ y = fbdev->y1;
+ if (fbdev->y2 > y2)
+ y2 = fbdev->y2;
+ if (fbdev->x1 < x)
+ x = fbdev->x1;
+ if (fbdev->x2 > x2)
+ x2 = fbdev->x2;
+
+ if (store_for_later) {
+ fbdev->x1 = x;
+ fbdev->x2 = x2;
+ fbdev->y1 = y;
+ fbdev->y2 = y2;
+ spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
+ return;
+ }
+
+ fbdev->x1 = INT_MAX;
+ fbdev->y1 = INT_MAX;
+ fbdev->x2 = 0;
+ fbdev->y2 = 0;
+
+ spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
+
+ /*
+ * Not sure why the original code subtracted 1 here, but I will keep
+ * it that way to avoid unnecessary differences.
+ */
+ rect.x1 = x;
+ rect.x2 = x2 + 1;
+ rect.y1 = y;
+ rect.y2 = y2 + 1;
+ vbox_framebuffer_dirty_rectangles(&fbdev->afb.base, &rect, 1);
+
+ vbox_bo_unreserve(bo);
+}
+
+#ifdef CONFIG_FB_DEFERRED_IO
+static void vbox_deferred_io(struct fb_info *info, struct list_head *pagelist)
+{
+ struct vbox_fbdev *fbdev = info->par;
+ unsigned long start, end, min, max;
+ struct page *page;
+ int y1, y2;
+
+ min = ULONG_MAX;
+ max = 0;
+ list_for_each_entry(page, pagelist, lru) {
+ start = page->index << PAGE_SHIFT;
+ end = start + PAGE_SIZE - 1;
+ min = min(min, start);
+ max = max(max, end);
+ }
+
+ if (min < max) {
+ y1 = min / info->fix.line_length;
+ y2 = (max / info->fix.line_length) + 1;
+ DRM_INFO("%s: Calling dirty update: 0, %d, %d, %d\n",
+ __func__, y1, info->var.xres, y2 - y1 - 1);
+ vbox_dirty_update(fbdev, 0, y1, info->var.xres, y2 - y1 - 1);
+ }
+}
+
+static struct fb_deferred_io vbox_defio = {
+ .delay = VBOX_DIRTY_DELAY,
+ .deferred_io = vbox_deferred_io,
+};
+#endif
+
+static void vbox_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct vbox_fbdev *fbdev = info->par;
+
+ sys_fillrect(info, rect);
+ vbox_dirty_update(fbdev, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void vbox_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct vbox_fbdev *fbdev = info->par;
+
+ sys_copyarea(info, area);
+ vbox_dirty_update(fbdev, area->dx, area->dy, area->width, area->height);
+}
+
+static void vbox_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct vbox_fbdev *fbdev = info->par;
+
+ sys_imageblit(info, image);
+ vbox_dirty_update(fbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+static struct fb_ops vboxfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = vbox_fillrect,
+ .fb_copyarea = vbox_copyarea,
+ .fb_imageblit = vbox_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int vboxfb_create_object(struct vbox_fbdev *fbdev,
+ struct DRM_MODE_FB_CMD *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = fbdev->helper.dev;
+ u32 size;
+ struct drm_gem_object *gobj;
+ u32 pitch = mode_cmd->pitches[0];
+ int ret = 0;
+
+ size = pitch * mode_cmd->height;
+ ret = vbox_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int vboxfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct vbox_fbdev *fbdev =
+ container_of(helper, struct vbox_fbdev, helper);
+ struct drm_device *dev = fbdev->helper.dev;
+ struct DRM_MODE_FB_CMD mode_cmd;
+ struct drm_framebuffer *fb;
+ struct fb_info *info;
+ struct device *device = &dev->pdev->dev;
+ struct drm_gem_object *gobj = NULL;
+ struct vbox_bo *bo = NULL;
+ int size, ret;
+ u32 pitch;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ mode_cmd.pitches[0] = pitch;
+
+ size = pitch * mode_cmd.height;
+
+ ret = vboxfb_create_object(fbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ ret = vbox_framebuffer_init(dev, &fbdev->afb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ bo = gem_to_vbox_bo(gobj);
+
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+ if (ret) {
+ vbox_bo_unreserve(bo);
+ return ret;
+ }
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ vbox_bo_unreserve(bo);
+ if (ret) {
+ DRM_ERROR("failed to kmap fbcon\n");
+ return ret;
+ }
+
+ info = framebuffer_alloc(0, device);
+ if (!info)
+ return -ENOMEM;
+ info->par = fbdev;
+
+ fbdev->size = size;
+
+ fb = &fbdev->afb.base;
+ fbdev->helper.fb = fb;
+ fbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "vboxdrmfb");
+
+ /*
+ * The last flag forces a mode set on VT switches even if the kernel
+ * does not think it is needed.
+ */
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT |
+ FBINFO_MISC_ALWAYS_SETPAR;
+ info->fbops = &vboxfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret)
+ return -ENOMEM;
+
+ /*
+ * This seems to be done for safety checking that the framebuffer
+ * is not registered twice by different drivers.
+ */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures)
+ return -ENOMEM;
+ info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
+ drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = bo->kmap.virtual;
+ info->screen_size = size;
+
+#ifdef CONFIG_FB_DEFERRED_IO
+ info->fbdefio = &vbox_defio;
+ fb_deferred_io_init(info);
+#endif
+
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
+
+ return 0;
+}
+
+static void vbox_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+}
+
+static void vbox_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ *red = regno;
+ *green = regno;
+ *blue = regno;
+}
+
+static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
+ .gamma_set = vbox_fb_gamma_set,
+ .gamma_get = vbox_fb_gamma_get,
+ .fb_probe = vboxfb_create,
+};
+
+static void vbox_fbdev_destroy(struct drm_device *dev, struct vbox_fbdev *fbdev)
+{
+ struct fb_info *info;
+ struct vbox_framebuffer *afb = &fbdev->afb;
+
+ if (fbdev->helper.fbdev) {
+ info = fbdev->helper.fbdev;
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (afb->obj) {
+ struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
+
+ if (!vbox_bo_reserve(bo, false)) {
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+ /*
+ * QXL does this, but is it really needed before
+ * freeing?
+ */
+ if (bo->pin_count)
+ vbox_bo_unpin(bo);
+ vbox_bo_unreserve(bo);
+ }
+ drm_gem_object_unreference_unlocked(afb->obj);
+ afb->obj = NULL;
+ }
+ drm_fb_helper_fini(&fbdev->helper);
+
+ drm_framebuffer_unregister_private(&afb->base);
+ drm_framebuffer_cleanup(&afb->base);
+}
+
+int vbox_fbdev_init(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+ struct vbox_fbdev *fbdev;
+ int ret;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ return -ENOMEM;
+
+ vbox->fbdev = fbdev;
+ spin_lock_init(&fbdev->dirty_lock);
+
+ drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs);
+ ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs);
+ if (ret)
+ goto free;
+
+ ret = drm_fb_helper_single_add_all_connectors(&fbdev->helper);
+ if (ret)
+ goto fini;
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(&fbdev->helper, 32);
+ if (ret)
+ goto fini;
+
+ return 0;
+
+fini:
+ drm_fb_helper_fini(&fbdev->helper);
+free:
+ kfree(fbdev);
+ vbox->fbdev = NULL;
+
+ return ret;
+}
+
+void vbox_fbdev_fini(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ if (!vbox->fbdev)
+ return;
+
+ vbox_fbdev_destroy(dev, vbox->fbdev);
+ kfree(vbox->fbdev);
+ vbox->fbdev = NULL;
+}
+
+void vbox_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ if (!vbox->fbdev)
+ return;
+
+ fb_set_suspend(vbox->fbdev->helper.fbdev, state);
+}
+
+void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr)
+{
+ vbox->fbdev->helper.fbdev->fix.smem_start =
+ vbox->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr;
+ vbox->fbdev->helper.fbdev->fix.smem_len =
+ vbox->available_vram_size - gpu_addr;
+}
new file mode 100644
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <HGSMIBase.h>
+#include <VBoxVideoVBE.h>
+
+/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */
+static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size)
+{
+ while (size--) {
+ hash += *data++;
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ return hash;
+}
+
+static u32 hgsmi_hash_end(u32 hash)
+{
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash;
+}
+
+/* Not really a checksum but that is the naming used in all vbox code */
+static u32 hgsmi_checksum(u32 offset,
+ const HGSMIBUFFERHEADER *header,
+ const HGSMIBUFFERTAIL *tail)
+{
+ u32 checksum;
+
+ checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset));
+ checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header));
+ /* 4 -> Do not checksum the checksum itself */
+ checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4);
+
+ return hgsmi_hash_end(checksum);
+}
+
+void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
+ u8 channel, u16 channel_info)
+{
+ HGSMIBUFFERHEADER *h;
+ HGSMIBUFFERTAIL *t;
+ size_t total_size;
+ dma_addr_t offset;
+
+ total_size = size + sizeof(HGSMIBUFFERHEADER) + sizeof(HGSMIBUFFERTAIL);
+ h = gen_pool_dma_alloc(guest_pool, total_size, &offset);
+ if (!h)
+ return NULL;
+
+ t = (HGSMIBUFFERTAIL *)((u8 *)h + sizeof(HGSMIBUFFERHEADER) + size);
+
+ h->u8Flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE;
+ h->u32DataSize = size;
+ h->u8Channel = channel;
+ h->u16ChannelInfo = channel_info;
+ memset(&h->u.au8Union, 0, sizeof(h->u.au8Union));
+
+ t->u32Reserved = 0;
+ t->u32Checksum = hgsmi_checksum(offset, h, t);
+
+ return (u8 *)h + sizeof(HGSMIBUFFERHEADER);
+}
+
+void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf)
+{
+ HGSMIBUFFERHEADER *h =
+ (HGSMIBUFFERHEADER *)((u8 *)buf - sizeof(HGSMIBUFFERHEADER));
+ size_t total_size = h->u32DataSize + sizeof(HGSMIBUFFERHEADER) +
+ sizeof(HGSMIBUFFERTAIL);
+
+ gen_pool_free(guest_pool, (unsigned long)h, total_size);
+}
+
+int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf)
+{
+ phys_addr_t offset;
+
+ offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf -
+ sizeof(HGSMIBUFFERHEADER));
+ outl(offset, VGA_PORT_HGSMI_GUEST);
+ /* Make the compiler aware that the host has changed memory. */
+ mb();
+
+ return VINF_SUCCESS;
+}
new file mode 100644
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2016-2017 Oracle Corporation
+ * This file is based on qxl_irq.c
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include "vbox_drv.h"
+
+#include <VBoxVideo.h>
+
+#include <drm/drm_crtc_helper.h>
+
+static void vbox_clear_irq(void)
+{
+ outl((u32)~0, VGA_PORT_HGSMI_HOST);
+}
+
+static u32 vbox_get_flags(struct vbox_private *vbox)
+{
+ return readl(vbox->guest_heap + HOST_FLAGS_OFFSET);
+}
+
+void vbox_report_hotplug(struct vbox_private *vbox)
+{
+ schedule_work(&vbox->hotplug_work);
+}
+
+irqreturn_t vbox_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
+ u32 host_flags = vbox_get_flags(vbox);
+
+ if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
+ return IRQ_NONE;
+
+ /*
+ * Due to a bug in the initial host implementation of hot-plug irqs,
+ * the hot-plug and cursor capability flags were never cleared.
+ * Fortunately we can tell when they would have been set by checking
+ * that the VSYNC flag is not set.
+ */
+ if (host_flags &
+ (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) &&
+ !(host_flags & HGSMIHOSTFLAGS_VSYNC))
+ vbox_report_hotplug(vbox);
+
+ vbox_clear_irq();
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Check that the position hints provided by the host are suitable for GNOME
+ * shell (i.e. all screens disjoint and hints for all enabled screens) and if
+ * not replace them with default ones. Providing valid hints improves the
+ * chances that we will get a known screen layout for pointer mapping.
+ */
+static void validate_or_set_position_hints(struct vbox_private *vbox)
+{
+ int i, j;
+ u16 currentx = 0;
+ bool valid = true;
+
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ for (j = 0; j < i; ++j) {
+ struct VBVAMODEHINT *hintsi = &vbox->last_mode_hints[i];
+ struct VBVAMODEHINT *hintsj = &vbox->last_mode_hints[j];
+
+ if (hintsi->fEnabled && hintsj->fEnabled) {
+ if (hintsi->dx >= 0xffff ||
+ hintsi->dy >= 0xffff ||
+ hintsj->dx >= 0xffff ||
+ hintsj->dy >= 0xffff ||
+ (hintsi->dx <
+ hintsj->dx + (hintsj->cx & 0x8fff) &&
+ hintsi->dx + (hintsi->cx & 0x8fff) >
+ hintsj->dx) ||
+ (hintsi->dy <
+ hintsj->dy + (hintsj->cy & 0x8fff) &&
+ hintsi->dy + (hintsi->cy & 0x8fff) >
+ hintsj->dy))
+ valid = false;
+ }
+ }
+ }
+ if (!valid)
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ if (vbox->last_mode_hints[i].fEnabled) {
+ vbox->last_mode_hints[i].dx = currentx;
+ vbox->last_mode_hints[i].dy = 0;
+ currentx +=
+ vbox->last_mode_hints[i].cx & 0x8fff;
+ }
+ }
+}
+
+/**
+ * Query the host for the most recent video mode hints.
+ */
+static void vbox_update_mode_hints(struct vbox_private *vbox)
+{
+ struct drm_device *dev = vbox->dev;
+ struct drm_connector *connector;
+ struct vbox_connector *vbox_connector;
+ struct VBVAMODEHINT *hints;
+ u16 flags;
+ bool disconnected;
+ unsigned int crtc_id;
+ int rc;
+
+ rc = VBoxHGSMIGetModeHints(vbox->guest_pool, vbox->num_crtcs,
+ vbox->last_mode_hints);
+ if (RT_FAILURE(rc)) {
+ DRM_ERROR("vboxvideo: VBoxHGSMIGetModeHints failed, rc=%i.\n",
+ rc);
+ return;
+ }
+ validate_or_set_position_hints(vbox);
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ vbox_connector = to_vbox_connector(connector);
+ hints =
+ &vbox->last_mode_hints[vbox_connector->vbox_crtc->crtc_id];
+ if (hints->magic == VBVAMODEHINT_MAGIC) {
+ disconnected = !(hints->fEnabled);
+ crtc_id = vbox_connector->vbox_crtc->crtc_id;
+ flags = VBVA_SCREEN_F_ACTIVE
+ | (disconnected ? VBVA_SCREEN_F_DISABLED :
+ VBVA_SCREEN_F_BLANK);
+ vbox_connector->mode_hint.width = hints->cx & 0x8fff;
+ vbox_connector->mode_hint.height = hints->cy & 0x8fff;
+ vbox_connector->vbox_crtc->x_hint = hints->dx;
+ vbox_connector->vbox_crtc->y_hint = hints->dy;
+ vbox_connector->mode_hint.disconnected = disconnected;
+ if (vbox_connector->vbox_crtc->disconnected !=
+ disconnected) {
+ VBoxHGSMIProcessDisplayInfo(vbox->guest_pool,
+ crtc_id, 0, 0, 0,
+ hints->cx * 4,
+ hints->cx,
+ hints->cy, 0,
+ flags);
+ vbox_connector->vbox_crtc->disconnected =
+ disconnected;
+ }
+ }
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static void vbox_hotplug_worker(struct work_struct *work)
+{
+ struct vbox_private *vbox = container_of(work, struct vbox_private,
+ hotplug_work);
+
+ vbox_update_mode_hints(vbox);
+ drm_kms_helper_hotplug_event(vbox->dev);
+}
+
+int vbox_irq_init(struct vbox_private *vbox)
+{
+ int ret;
+
+ vbox_update_mode_hints(vbox);
+ ret = drm_irq_install(vbox->dev, vbox->dev->pdev->irq);
+ if (unlikely(ret != 0)) {
+ vbox_irq_fini(vbox);
+ DRM_ERROR("Failed installing irq: %d\n", ret);
+ return 1;
+ }
+ INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
+ vbox->isr_installed = true;
+ return 0;
+}
+
+void vbox_irq_fini(struct vbox_private *vbox)
+{
+ if (vbox->isr_installed) {
+ drm_irq_uninstall(vbox->dev);
+ flush_work(&vbox->hotplug_work);
+ vbox->isr_installed = false;
+ }
+}
new file mode 100644
@@ -0,0 +1,512 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_main.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>,
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#include "vbox_drv.h"
+
+#include <VBoxVideoGuest.h>
+#include <VBoxVideoVBE.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
+
+ if (vbox_fb->obj)
+ drm_gem_object_unreference_unlocked(vbox_fb->obj);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+void vbox_enable_accel(struct vbox_private *vbox)
+{
+ unsigned int i;
+ struct VBVABUFFER *vbva;
+
+ if (!vbox->vbva_info || !vbox->vbva_buffers) {
+ /* Should never happen... */
+ DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
+ return;
+ }
+
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ if (!vbox->vbva_info[i].pVBVA) {
+ vbva = (struct VBVABUFFER *)
+ ((u8 *)vbox->vbva_buffers +
+ i * VBVA_MIN_BUFFER_SIZE);
+ if (!VBoxVBVAEnable(&vbox->vbva_info[i],
+ vbox->guest_pool, vbva, i)) {
+ /* very old host or driver error. */
+ DRM_ERROR("vboxvideo: VBoxVBVAEnable failed - heap allocation error.\n");
+ return;
+ }
+ }
+ }
+}
+
+void vbox_disable_accel(struct vbox_private *vbox)
+{
+ unsigned int i;
+
+ for (i = 0; i < vbox->num_crtcs; ++i)
+ VBoxVBVADisable(&vbox->vbva_info[i], vbox->guest_pool, i);
+}
+
+void vbox_report_caps(struct vbox_private *vbox)
+{
+ u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION
+ | VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
+ if (vbox->initial_mode_queried)
+ caps |= VBVACAPS_VIDEO_MODE_HINTS;
+ VBoxHGSMISendCapsInfo(vbox->guest_pool, caps);
+}
+
+/**
+ * Send information about dirty rectangles to VBVA. If necessary we enable
+ * VBVA first, as this is normally disabled after a change of master in case
+ * the new master does not send dirty rectangle information (is this even
+ * allowed?)
+ */
+void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects)
+{
+ struct vbox_private *vbox = fb->dev->dev_private;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ mutex_lock(&vbox->hw_mutex);
+ list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
+ if (CRTC_FB(crtc) == fb) {
+ vbox_enable_accel(vbox);
+ for (i = 0; i < num_rects; ++i) {
+ VBVACMDHDR cmd_hdr;
+ unsigned int crtc_id =
+ to_vbox_crtc(crtc)->crtc_id;
+
+ if ((rects[i].x1 >
+ crtc->x + crtc->hwmode.hdisplay) ||
+ (rects[i].y1 >
+ crtc->y + crtc->hwmode.vdisplay) ||
+ (rects[i].x2 < crtc->x) ||
+ (rects[i].y2 < crtc->y))
+ continue;
+
+ cmd_hdr.x = (s16)rects[i].x1;
+ cmd_hdr.y = (s16)rects[i].y1;
+ cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
+ cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
+
+ if (VBoxVBVABufferBeginUpdate(
+ &vbox->vbva_info[crtc_id],
+ vbox->guest_pool)) {
+ VBoxVBVAWrite(&vbox->vbva_info[crtc_id],
+ vbox->guest_pool,
+ &cmd_hdr,
+ sizeof(cmd_hdr));
+ VBoxVBVABufferEndUpdate(
+ &vbox->vbva_info[crtc_id]);
+ }
+ }
+ }
+ }
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects)
+{
+ vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs vbox_fb_funcs = {
+ .destroy = vbox_user_framebuffer_destroy,
+ .dirty = vbox_user_framebuffer_dirty,
+};
+
+int vbox_framebuffer_init(struct drm_device *dev,
+ struct vbox_framebuffer *vbox_fb,
+ const struct DRM_MODE_FB_CMD *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
+ vbox_fb->obj = obj;
+ ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct drm_framebuffer *vbox_user_framebuffer_create(
+ struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct vbox_framebuffer *vbox_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
+ if (!vbox_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(vbox_fb);
+ return ERR_PTR(ret);
+ }
+
+ return &vbox_fb->base;
+}
+
+static const struct drm_mode_config_funcs vbox_mode_funcs = {
+ .fb_create = vbox_user_framebuffer_create,
+};
+
+static void vbox_accel_fini(struct vbox_private *vbox)
+{
+ if (vbox->vbva_info) {
+ vbox_disable_accel(vbox);
+ kfree(vbox->vbva_info);
+ vbox->vbva_info = NULL;
+ }
+ if (vbox->vbva_buffers) {
+ pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
+ vbox->vbva_buffers = NULL;
+ }
+}
+
+static int vbox_accel_init(struct vbox_private *vbox)
+{
+ unsigned int i;
+
+ vbox->vbva_info = kcalloc(vbox->num_crtcs, sizeof(*vbox->vbva_info),
+ GFP_KERNEL);
+ if (!vbox->vbva_info)
+ return -ENOMEM;
+
+ /* Take a command buffer for each screen from the end of usable VRAM. */
+ vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
+
+ vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
+ vbox->available_vram_size,
+ vbox->num_crtcs *
+ VBVA_MIN_BUFFER_SIZE);
+ if (!vbox->vbva_buffers)
+ return -ENOMEM;
+
+ for (i = 0; i < vbox->num_crtcs; ++i)
+ VBoxVBVASetupBufferContext(&vbox->vbva_info[i],
+ vbox->available_vram_size +
+ i * VBVA_MIN_BUFFER_SIZE,
+ VBVA_MIN_BUFFER_SIZE);
+
+ return 0;
+}
+
+/** Do we support the 4.3 plus mode hint reporting interface? */
+static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
+{
+ u32 have_hints, have_cursor;
+ int ret;
+
+ ret = VBoxQueryConfHGSMI(vbox->guest_pool,
+ VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
+ &have_hints);
+ if (RT_FAILURE(ret))
+ return false;
+
+ ret = VBoxQueryConfHGSMI(vbox->guest_pool,
+ VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
+ &have_cursor);
+ if (RT_FAILURE(ret))
+ return false;
+
+ return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
+}
+
+/**
+ * Set up our heaps and data exchange buffers in VRAM before handing the rest
+ * to the memory manager.
+ */
+static int vbox_hw_init(struct vbox_private *vbox)
+{
+ int ret;
+
+ vbox->full_vram_size = VBoxVideoGetVRAMSize();
+ vbox->any_pitch = VBoxVideoAnyWidthAllowed();
+
+ DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
+
+ /* Map guest-heap at end of vram */
+ vbox->guest_heap =
+ pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
+ GUEST_HEAP_SIZE);
+ if (!vbox->guest_heap)
+ return -ENOMEM;
+
+ /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
+ vbox->guest_pool = gen_pool_create(4, -1);
+ if (!vbox->guest_pool)
+ return -ENOMEM;
+
+ ret = gen_pool_add_virt(vbox->guest_pool,
+ (unsigned long)vbox->guest_heap,
+ GUEST_HEAP_OFFSET(vbox),
+ GUEST_HEAP_USABLE_SIZE, -1);
+ if (ret)
+ return ret;
+
+ /* Reduce available VRAM size to reflect the guest heap. */
+ vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
+ /* Linux drm represents monitors as a 32-bit array. */
+ vbox->num_crtcs = min_t(u32, VBoxHGSMIGetMonitorCount(vbox->guest_pool),
+ VBOX_MAX_SCREENS);
+
+ if (!have_hgsmi_mode_hints(vbox))
+ return -ENOTSUPP;
+
+ vbox->last_mode_hints =
+ kcalloc(vbox->num_crtcs, sizeof(VBVAMODEHINT), GFP_KERNEL);
+ if (!vbox->last_mode_hints)
+ return -ENOMEM;
+
+ return vbox_accel_init(vbox);
+}
+
+static void vbox_hw_fini(struct vbox_private *vbox)
+{
+ vbox_accel_fini(vbox);
+ kfree(vbox->last_mode_hints);
+ vbox->last_mode_hints = NULL;
+}
+
+int vbox_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct vbox_private *vbox;
+ int ret = 0;
+
+ if (!VBoxHGSMIIsSupported())
+ return -ENODEV;
+
+ vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
+ if (!vbox)
+ return -ENOMEM;
+
+ dev->dev_private = vbox;
+ vbox->dev = dev;
+
+ mutex_init(&vbox->hw_mutex);
+
+ ret = vbox_hw_init(vbox);
+ if (ret)
+ goto out_free;
+
+ ret = vbox_mm_init(vbox);
+ if (ret)
+ goto out_free;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.funcs = (void *)&vbox_mode_funcs;
+ dev->mode_config.min_width = 64;
+ dev->mode_config.min_height = 64;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
+ dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
+
+ ret = vbox_mode_init(dev);
+ if (ret)
+ goto out_free;
+
+ ret = vbox_irq_init(vbox);
+ if (ret)
+ goto out_free;
+
+ ret = vbox_fbdev_init(dev);
+ if (ret)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ vbox_driver_unload(dev);
+ return ret;
+}
+
+void vbox_driver_unload(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ vbox_fbdev_fini(dev);
+ vbox_irq_fini(vbox);
+ vbox_mode_fini(dev);
+ if (dev->mode_config.funcs)
+ drm_mode_config_cleanup(dev);
+
+ vbox_hw_fini(vbox);
+ vbox_mm_fini(vbox);
+ if (vbox->guest_pool)
+ gen_pool_destroy(vbox->guest_pool);
+ if (vbox->guest_heap)
+ pci_iounmap(dev->pdev, vbox->guest_heap);
+ kfree(vbox);
+ dev->dev_private = NULL;
+}
+
+/**
+ * @note this is described in the DRM framework documentation. AST does not
+ * have it, but we get an oops on driver unload if it is not present.
+ */
+void vbox_driver_lastclose(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ if (vbox->fbdev)
+ drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
+}
+
+int vbox_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel, struct drm_gem_object **obj)
+{
+ struct vbox_bo *vboxbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+
+ *obj = &vboxbo->gem;
+
+ return 0;
+}
+
+int vbox_dumb_create(struct drm_file *file,
+ struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = vbox_gem_create(dev, args->size, false, &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+
+ return 0;
+}
+
+static void vbox_bo_unref(struct vbox_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (!tbo)
+ *bo = NULL;
+}
+
+void vbox_gem_free_object(struct drm_gem_object *obj)
+{
+ struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
+
+ vbox_bo_unref(&vbox_bo);
+}
+
+static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+int
+vbox_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct vbox_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_vbox_bo(obj);
+ *offset = vbox_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
new file mode 100644
@@ -0,0 +1,864 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_mode.c
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#include "vbox_drv.h"
+
+#include <VBoxVideo.h>
+
+#include <linux/export.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
+ u32 handle, u32 width, u32 height,
+ s32 hot_x, s32 hot_y);
+static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y);
+
+/**
+ * Set a graphics mode. Poke any required values into registers, do an HGSMI
+ * mode set and tell the host we support advanced graphics functions.
+ */
+static void vbox_do_modeset(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct vbox_private *vbox;
+ int width, height, bpp, pitch;
+ unsigned int crtc_id;
+ u16 flags;
+ s32 x_offset, y_offset;
+
+ vbox = crtc->dev->dev_private;
+ width = mode->hdisplay ? mode->hdisplay : 640;
+ height = mode->vdisplay ? mode->vdisplay : 480;
+ crtc_id = vbox_crtc->crtc_id;
+ bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32;
+ pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8;
+ x_offset = vbox->single_framebuffer ? crtc->x : vbox_crtc->x_hint;
+ y_offset = vbox->single_framebuffer ? crtc->y : vbox_crtc->y_hint;
+
+ /*
+ * This is the old way of setting graphics modes. It assumed one screen
+ * and a frame-buffer at the start of video RAM. On older versions of
+ * VirtualBox, certain parts of the code still assume that the first
+ * screen is programmed this way, so try to fake it.
+ */
+ if (vbox_crtc->crtc_id == 0 && crtc->enabled &&
+ vbox_crtc->fb_offset / pitch < 0xffff - crtc->y &&
+ vbox_crtc->fb_offset % (bpp / 8) == 0)
+ VBoxVideoSetModeRegisters(
+ width, height, pitch * 8 / bpp,
+ CRTC_FB(crtc)->format->cpp[0] * 8,
+ 0,
+ vbox_crtc->fb_offset % pitch / bpp * 8 + crtc->x,
+ vbox_crtc->fb_offset / pitch + crtc->y);
+
+ flags = VBVA_SCREEN_F_ACTIVE;
+ flags |= (crtc->enabled && !vbox_crtc->blanked) ?
+ 0 : VBVA_SCREEN_F_BLANK;
+ flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0;
+ VBoxHGSMIProcessDisplayInfo(vbox->guest_pool, vbox_crtc->crtc_id,
+ x_offset, y_offset,
+ crtc->x * bpp / 8 + crtc->y * pitch,
+ pitch, width, height,
+ vbox_crtc->blanked ? 0 : bpp, flags);
+}
+
+static int vbox_set_view(struct drm_crtc *crtc)
+{
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ void *p;
+
+ /*
+ * Tell the host about the view. This design originally targeted the
+ * Windows XP driver architecture and assumed that each screen would
+ * have a dedicated frame buffer with the command buffer following it,
+ * the whole being a "view". The host works out which screen a command
+ * buffer belongs to by checking whether it is in the first view, then
+ * whether it is in the second and so on. The first match wins. We
+ * cheat around this by making the first view be the managed memory
+ * plus the first command buffer, the second the same plus the second
+ * buffer and so on.
+ */
+ p = VBoxHGSMIBufferAlloc(vbox->guest_pool, sizeof(VBVAINFOVIEW),
+ HGSMI_CH_VBVA, VBVA_INFO_VIEW);
+ if (p) {
+ VBVAINFOVIEW *pInfo = (VBVAINFOVIEW *) p;
+
+ pInfo->u32ViewIndex = vbox_crtc->crtc_id;
+ pInfo->u32ViewOffset = vbox_crtc->fb_offset;
+ pInfo->u32ViewSize =
+ vbox->available_vram_size - vbox_crtc->fb_offset +
+ vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE;
+ pInfo->u32MaxScreenSize =
+ vbox->available_vram_size - vbox_crtc->fb_offset;
+ VBoxHGSMIBufferSubmit(vbox->guest_pool, p);
+ VBoxHGSMIBufferFree(vbox->guest_pool, p);
+ } else {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void vbox_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct vbox_private *vbox = crtc->dev->dev_private;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ vbox_crtc->blanked = false;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ vbox_crtc->blanked = true;
+ break;
+ }
+
+ mutex_lock(&vbox->hw_mutex);
+ vbox_do_modeset(crtc, &crtc->hwmode);
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static bool vbox_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/*
+ * Try to map the layout of virtual screens to the range of the input device.
+ * Return true if we need to re-set the crtc modes due to screen offset
+ * changes.
+ */
+static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
+{
+ struct drm_crtc *crtci;
+ struct drm_connector *connectori;
+ struct drm_framebuffer *fb1 = NULL;
+ bool single_framebuffer = true;
+ bool old_single_framebuffer = vbox->single_framebuffer;
+ u16 width = 0, height = 0;
+
+ /*
+ * Are we using an X.Org-style single large frame-buffer for all crtcs?
+ * If so then screen layout can be deduced from the crtc offsets.
+ * Same fall-back if this is the fbdev frame-buffer.
+ */
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
+ if (!fb1) {
+ fb1 = CRTC_FB(crtci);
+ if (to_vbox_framebuffer(fb1) == &vbox->fbdev->afb)
+ break;
+ } else if (CRTC_FB(crtci) && fb1 != CRTC_FB(crtci)) {
+ single_framebuffer = false;
+ }
+ }
+ if (single_framebuffer) {
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+ head) {
+ if (to_vbox_crtc(crtci)->crtc_id == 0) {
+ vbox->single_framebuffer = true;
+ vbox->input_mapping_width =
+ CRTC_FB(crtci)->width;
+ vbox->input_mapping_height =
+ CRTC_FB(crtci)->height;
+ return old_single_framebuffer !=
+ vbox->single_framebuffer;
+ }
+ }
+ }
+ /* Otherwise calculate the total span of all screens. */
+ list_for_each_entry(connectori, &vbox->dev->mode_config.connector_list,
+ head) {
+ struct vbox_connector *vbox_connector =
+ to_vbox_connector(connectori);
+ struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc;
+
+ width = max_t(u16, width, vbox_crtc->x_hint +
+ vbox_connector->mode_hint.width);
+ height = max_t(u16, height, vbox_crtc->y_hint +
+ vbox_connector->mode_hint.height);
+ }
+
+ vbox->single_framebuffer = false;
+ vbox->input_mapping_width = width;
+ vbox->input_mapping_height = height;
+
+ return old_single_framebuffer != vbox->single_framebuffer;
+}
+
+static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *old_fb, int x, int y)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct vbox_framebuffer *vbox_fb;
+ struct vbox_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* Unpin the previous fb. */
+ if (old_fb) {
+ vbox_fb = to_vbox_framebuffer(old_fb);
+ obj = vbox_fb->obj;
+ bo = gem_to_vbox_bo(obj);
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ vbox_bo_unpin(bo);
+ vbox_bo_unreserve(bo);
+ }
+
+ vbox_fb = to_vbox_framebuffer(CRTC_FB(crtc));
+ obj = vbox_fb->obj;
+ bo = gem_to_vbox_bo(obj);
+
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ vbox_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&vbox->fbdev->afb == vbox_fb)
+ vbox_fbdev_set_base(vbox, gpu_addr);
+ vbox_bo_unreserve(bo);
+
+ /* vbox_set_start_address_crt1(crtc, (u32)gpu_addr); */
+ vbox_crtc->fb_offset = gpu_addr;
+ if (vbox_set_up_input_mapping(vbox)) {
+ struct drm_crtc *crtci;
+
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+ head) {
+ vbox_set_view(crtc);
+ vbox_do_modeset(crtci, &crtci->mode);
+ }
+ }
+
+ return 0;
+}
+
+static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return vbox_crtc_do_set_base(crtc, old_fb, x, y);
+}
+
+static int vbox_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ int rc = 0;
+
+ vbox_crtc_mode_set_base(crtc, x, y, old_fb);
+
+ mutex_lock(&vbox->hw_mutex);
+ rc = vbox_set_view(crtc);
+ if (!rc)
+ vbox_do_modeset(crtc, mode);
+ VBoxHGSMIUpdateInputMapping(vbox->guest_pool, 0, 0,
+ vbox->input_mapping_width,
+ vbox->input_mapping_height);
+ mutex_unlock(&vbox->hw_mutex);
+
+ return rc;
+}
+
+static void vbox_crtc_disable(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
+ .dpms = vbox_crtc_dpms,
+ .mode_fixup = vbox_crtc_mode_fixup,
+ .mode_set = vbox_crtc_mode_set,
+ /* .mode_set_base = vbox_crtc_mode_set_base, */
+ .disable = vbox_crtc_disable,
+ .load_lut = vbox_crtc_load_lut,
+ .prepare = vbox_crtc_prepare,
+ .commit = vbox_crtc_commit,
+};
+
+static void vbox_crtc_reset(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_funcs vbox_crtc_funcs = {
+ .cursor_move = vbox_cursor_move,
+ .cursor_set2 = vbox_cursor_set2,
+ .reset = vbox_crtc_reset,
+ .set_config = drm_crtc_helper_set_config,
+ /* .gamma_set = vbox_crtc_gamma_set, */
+ .destroy = vbox_crtc_destroy,
+};
+
+static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i)
+{
+ struct vbox_crtc *vbox_crtc;
+
+ vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL);
+ if (!vbox_crtc)
+ return NULL;
+
+ vbox_crtc->crtc_id = i;
+
+ drm_crtc_init(dev, &vbox_crtc->base, &vbox_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
+ drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
+
+ return vbox_crtc;
+}
+
+static void vbox_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static struct drm_encoder *vbox_best_single_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+
+ /* pick the encoder ids */
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
+
+ return NULL;
+}
+
+static const struct drm_encoder_funcs vbox_enc_funcs = {
+ .destroy = vbox_encoder_destroy,
+};
+
+static void vbox_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool vbox_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void vbox_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void vbox_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void vbox_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs vbox_enc_helper_funcs = {
+ .dpms = vbox_encoder_dpms,
+ .mode_fixup = vbox_mode_fixup,
+ .prepare = vbox_encoder_prepare,
+ .commit = vbox_encoder_commit,
+ .mode_set = vbox_encoder_mode_set,
+};
+
+static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
+ unsigned int i)
+{
+ struct vbox_encoder *vbox_encoder;
+
+ vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL);
+ if (!vbox_encoder)
+ return NULL;
+
+ drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ drm_encoder_helper_add(&vbox_encoder->base, &vbox_enc_helper_funcs);
+
+ vbox_encoder->base.possible_crtcs = 1 << i;
+ return &vbox_encoder->base;
+}
+
+/**
+ * Generate EDID data with a mode-unique serial number for the virtual
+ * monitor to try to persuade Unity that different modes correspond to
+ * different monitors and it should not try to force the same resolution on
+ * them.
+ */
+static void vbox_set_edid(struct drm_connector *connector, int width,
+ int height)
+{
+ enum { EDID_SIZE = 128 };
+ unsigned char edid[EDID_SIZE] = {
+ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */
+ 0x58, 0x58, /* manufacturer (VBX) */
+ 0x00, 0x00, /* product code */
+ 0x00, 0x00, 0x00, 0x00, /* serial number goes here */
+ 0x01, /* week of manufacture */
+ 0x00, /* year of manufacture */
+ 0x01, 0x03, /* EDID version */
+ 0x80, /* capabilities - digital */
+ 0x00, /* horiz. res in cm, zero for projectors */
+ 0x00, /* vert. res in cm */
+ 0x78, /* display gamma (120 == 2.2). */
+ 0xEE, /* features (standby, suspend, off, RGB, std */
+ /* colour space, preferred timing mode) */
+ 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54,
+ /* chromaticity for standard colour space. */
+ 0x00, 0x00, 0x00, /* no default timings */
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, /* no standard timings */
+ 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02,
+ 0x02, 0x02,
+ /* descriptor block 1 goes below */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* descriptor block 2, monitor ranges */
+ 0x00, 0x00, 0x00, 0xFD, 0x00,
+ 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20,
+ /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */
+ 0x20,
+ /* descriptor block 3, monitor name */
+ 0x00, 0x00, 0x00, 0xFC, 0x00,
+ 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r',
+ '\n',
+ /* descriptor block 4: dummy data */
+ 0x00, 0x00, 0x00, 0x10, 0x00,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20,
+ 0x00, /* number of extensions */
+ 0x00 /* checksum goes here */
+ };
+ int clock = (width + 6) * (height + 6) * 60 / 10000;
+ unsigned int i, sum = 0;
+
+ edid[12] = width & 0xff;
+ edid[13] = width >> 8;
+ edid[14] = height & 0xff;
+ edid[15] = height >> 8;
+ edid[54] = clock & 0xff;
+ edid[55] = clock >> 8;
+ edid[56] = width & 0xff;
+ edid[58] = (width >> 4) & 0xf0;
+ edid[59] = height & 0xff;
+ edid[61] = (height >> 4) & 0xf0;
+ for (i = 0; i < EDID_SIZE - 1; ++i)
+ sum += edid[i];
+ edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
+ drm_mode_connector_update_edid_property(connector, (struct edid *)edid);
+}
+
+static int vbox_get_modes(struct drm_connector *connector)
+{
+ struct vbox_connector *vbox_connector = NULL;
+ struct drm_display_mode *mode = NULL;
+ struct vbox_private *vbox = NULL;
+ unsigned int num_modes = 0;
+ int preferred_width, preferred_height;
+
+ vbox_connector = to_vbox_connector(connector);
+ vbox = connector->dev->dev_private;
+ /*
+ * Heuristic: we do not want to tell the host that we support dynamic
+ * resizing unless we feel confident that the user space client using
+ * the video driver can handle hot-plug events. So the first time modes
+ * are queried after a "master" switch we tell the host that we do not,
+ * and immediately after we send the client a hot-plug notification as
+ * a test to see if they will respond and query again.
+ * That is also the reason why capabilities are reported to the host at
+ * this place in the code rather than elsewhere.
+ * We need to report the flags location before reporting the IRQ
+ * capability.
+ */
+ VBoxHGSMIReportFlagsLocation(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
+ HOST_FLAGS_OFFSET);
+ if (vbox_connector->vbox_crtc->crtc_id == 0)
+ vbox_report_caps(vbox);
+ if (!vbox->initial_mode_queried) {
+ if (vbox_connector->vbox_crtc->crtc_id == 0) {
+ vbox->initial_mode_queried = true;
+ vbox_report_hotplug(vbox);
+ }
+ return drm_add_modes_noedid(connector, 800, 600);
+ }
+ num_modes = drm_add_modes_noedid(connector, 2560, 1600);
+ preferred_width = vbox_connector->mode_hint.width ?
+ vbox_connector->mode_hint.width : 1024;
+ preferred_height = vbox_connector->mode_hint.height ?
+ vbox_connector->mode_hint.height : 768;
+ mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height,
+ 60, false, false, false);
+ if (mode) {
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ ++num_modes;
+ }
+ vbox_set_edid(connector, preferred_width, preferred_height);
+ drm_object_property_set_value(
+ &connector->base, vbox->dev->mode_config.suggested_x_property,
+ vbox_connector->vbox_crtc->x_hint);
+ drm_object_property_set_value(
+ &connector->base, vbox->dev->mode_config.suggested_y_property,
+ vbox_connector->vbox_crtc->y_hint);
+
+ return num_modes;
+}
+
+static int vbox_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void vbox_connector_destroy(struct drm_connector *connector)
+{
+ struct vbox_connector *vbox_connector = NULL;
+
+ vbox_connector = to_vbox_connector(connector);
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static enum drm_connector_status
+vbox_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct vbox_connector *vbox_connector = NULL;
+
+ (void)force;
+ vbox_connector = to_vbox_connector(connector);
+
+ return vbox_connector->mode_hint.disconnected ?
+ connector_status_disconnected : connector_status_connected;
+}
+
+static int vbox_fill_modes(struct drm_connector *connector, u32 max_x,
+ u32 max_y)
+{
+ struct vbox_connector *vbox_connector;
+ struct drm_device *dev;
+ struct drm_display_mode *mode, *iterator;
+
+ vbox_connector = to_vbox_connector(connector);
+ dev = vbox_connector->base.dev;
+ list_for_each_entry_safe(mode, iterator, &connector->modes, head) {
+ list_del(&mode->head);
+ drm_mode_destroy(dev, mode);
+ }
+
+ return drm_helper_probe_single_connector_modes(connector, max_x, max_y);
+}
+
+static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
+ .mode_valid = vbox_mode_valid,
+ .get_modes = vbox_get_modes,
+ .best_encoder = vbox_best_single_encoder,
+};
+
+static const struct drm_connector_funcs vbox_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = vbox_connector_detect,
+ .fill_modes = vbox_fill_modes,
+ .destroy = vbox_connector_destroy,
+};
+
+static int vbox_connector_init(struct drm_device *dev,
+ struct vbox_crtc *vbox_crtc,
+ struct drm_encoder *encoder)
+{
+ struct vbox_connector *vbox_connector;
+ struct drm_connector *connector;
+
+ vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL);
+ if (!vbox_connector)
+ return -ENOMEM;
+
+ connector = &vbox_connector->base;
+ vbox_connector->vbox_crtc = vbox_crtc;
+
+ drm_connector_init(dev, connector, &vbox_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_mode_create_suggested_offset_properties(dev);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, -1);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, -1);
+ drm_connector_register(connector);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+int vbox_mode_init(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct vbox_crtc *vbox_crtc;
+ unsigned int i;
+
+ /* vbox_cursor_init(dev); */
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ vbox_crtc = vbox_crtc_init(dev, i);
+ if (!vbox_crtc)
+ return -ENOMEM;
+ encoder = vbox_encoder_init(dev, i);
+ if (!encoder)
+ return -ENOMEM;
+ vbox_connector_init(dev, vbox_crtc, encoder);
+ }
+
+ return 0;
+}
+
+void vbox_mode_fini(struct drm_device *dev)
+{
+ /* vbox_cursor_fini(dev); */
+}
+
+/**
+ * Copy the ARGB image and generate the mask, which is needed in case the host
+ * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set
+ * if the corresponding alpha value in the ARGB image is greater than 0xF0.
+ */
+static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
+ size_t mask_size)
+{
+ size_t line_size = (width + 7) / 8;
+ u32 i, j;
+
+ memcpy(dst + mask_size, src, width * height * 4);
+ for (i = 0; i < height; ++i)
+ for (j = 0; j < width; ++j)
+ if (((u32 *)src)[i * width + j] > 0xf0000000)
+ dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
+}
+
+static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
+ u32 handle, u32 width, u32 height,
+ s32 hot_x, s32 hot_y)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct vbox_bo *bo;
+ int ret, rc;
+ struct ttm_bo_kmap_obj uobj_map;
+ u8 *src;
+ u8 *dst = NULL;
+ u32 caps = 0;
+ size_t data_size, mask_size;
+ bool src_isiomem;
+
+ /*
+ * Re-set this regularly as in 5.0.20 and earlier the information was
+ * lost on save and restore.
+ */
+ VBoxHGSMIUpdateInputMapping(vbox->guest_pool, 0, 0,
+ vbox->input_mapping_width,
+ vbox->input_mapping_height);
+ if (!handle) {
+ bool cursor_enabled = false;
+ struct drm_crtc *crtci;
+
+ /* Hide cursor. */
+ vbox_crtc->cursor_enabled = false;
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+ head)
+ if (to_vbox_crtc(crtci)->cursor_enabled)
+ cursor_enabled = true;
+
+ if (!cursor_enabled)
+ VBoxHGSMIUpdatePointerShape(vbox->guest_pool, 0, 0, 0,
+ 0, 0, NULL, 0);
+ return 0;
+ }
+ vbox_crtc->cursor_enabled = true;
+ if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
+ width == 0 || height == 0)
+ return -EINVAL;
+ rc = VBoxQueryConfHGSMI(vbox->guest_pool,
+ VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
+ ret = rc == VINF_SUCCESS ? 0 : rc == VERR_NO_MEMORY ? -ENOMEM : -EINVAL;
+ if (ret)
+ return ret;
+
+ if (!(caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE))
+ /*
+ * -EINVAL means cursor_set2() not supported, -EAGAIN means
+ * retry at once.
+ */
+ return -EBUSY;
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (obj) {
+ bo = gem_to_vbox_bo(obj);
+ ret = vbox_bo_reserve(bo, false);
+ if (!ret) {
+ /*
+ * The mask must be calculated based on the alpha
+ * channel, one bit per ARGB word, and must be 32-bit
+ * padded.
+ */
+ mask_size = ((width + 7) / 8 * height + 3) & ~3;
+ data_size = width * height * 4 + mask_size;
+ vbox->cursor_hot_x = min_t(u32, max(hot_x, 0), width);
+ vbox->cursor_hot_y = min_t(u32, max(hot_y, 0), height);
+ vbox->cursor_width = width;
+ vbox->cursor_height = height;
+ vbox->cursor_data_size = data_size;
+ dst = vbox->cursor_data;
+ ret =
+ ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages,
+ &uobj_map);
+ if (!ret) {
+ src =
+ ttm_kmap_obj_virtual(&uobj_map,
+ &src_isiomem);
+ if (!src_isiomem) {
+ u32 flags =
+ VBOX_MOUSE_POINTER_VISIBLE |
+ VBOX_MOUSE_POINTER_SHAPE |
+ VBOX_MOUSE_POINTER_ALPHA;
+ copy_cursor_image(src, dst, width,
+ height, mask_size);
+ rc = VBoxHGSMIUpdatePointerShape(
+ vbox->guest_pool, flags,
+ vbox->cursor_hot_x,
+ vbox->cursor_hot_y,
+ width, height, dst, data_size);
+ ret =
+ rc == VINF_SUCCESS ? 0 : rc ==
+ VERR_NO_MEMORY ? -ENOMEM : rc ==
+ VERR_NOT_SUPPORTED ? -EBUSY :
+ -EINVAL;
+ } else {
+ DRM_ERROR("src cursor bo should be in main memory\n");
+ }
+ ttm_bo_kunmap(&uobj_map);
+ } else {
+ vbox->cursor_data_size = 0;
+ }
+ vbox_bo_unreserve(bo);
+ }
+ drm_gem_object_unreference_unlocked(obj);
+ } else {
+ DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
+ ret = -ENOENT;
+ }
+
+ return ret;
+}
+
+static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ u32 flags = VBOX_MOUSE_POINTER_VISIBLE |
+ VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA;
+ s32 crtc_x =
+ vbox->single_framebuffer ? crtc->x : to_vbox_crtc(crtc)->x_hint;
+ s32 crtc_y =
+ vbox->single_framebuffer ? crtc->y : to_vbox_crtc(crtc)->y_hint;
+ u32 host_x, host_y;
+ u32 hot_x = 0;
+ u32 hot_y = 0;
+ int rc;
+
+ /*
+ * We compare these to unsigned later and don't
+ * need to handle negative.
+ */
+ if (x + crtc_x < 0 || y + crtc_y < 0 || vbox->cursor_data_size == 0)
+ return 0;
+
+ rc = VBoxHGSMICursorPosition(vbox->guest_pool, true, x + crtc_x,
+ y + crtc_y, &host_x, &host_y);
+ /* Work around a bug after save and restore in 5.0.20 and earlier. */
+ if (RT_FAILURE(rc) || (host_x == 0 && host_y == 0))
+ return rc == VINF_SUCCESS ? 0
+ : rc == VERR_NO_MEMORY ? -ENOMEM : -EINVAL;
+ if (x + crtc_x < host_x)
+ hot_x = min(host_x - x - crtc_x, vbox->cursor_width);
+ if (y + crtc_y < host_y)
+ hot_y = min(host_y - y - crtc_y, vbox->cursor_height);
+ if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y)
+ return 0;
+ vbox->cursor_hot_x = hot_x;
+ vbox->cursor_hot_y = hot_y;
+ rc = VBoxHGSMIUpdatePointerShape(vbox->guest_pool, flags, hot_x, hot_y,
+ vbox->cursor_width,
+ vbox->cursor_height, vbox->cursor_data,
+ vbox->cursor_data_size);
+ return rc == VINF_SUCCESS ? 0 : rc == VERR_NO_MEMORY ? -ENOMEM : rc ==
+ VERR_NOT_SUPPORTED ? -EBUSY : -EINVAL;
+}
new file mode 100644
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ * Copyright 2017 Canonical
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andreas Pokorny
+ */
+
+#include "vbox_drv.h"
+
+/*
+ * Based on qxl_prime.c:
+ * Empty Implementations as there should not be any other driver for a virtual
+ * device that might share buffers with vboxvideo
+ */
+
+int vbox_gem_prime_pin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return -ENOSYS;
+}
+
+void vbox_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+struct drm_gem_object *vbox_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
+{
+ WARN_ONCE(1, "not implemented");
+ return -ENOSYS;
+}
new file mode 100644
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_ttm.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com>
+ */
+#include "vbox_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct vbox_private, ttm.bdev);
+}
+
+static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+/**
+ * Adds the vbox memory manager object/structures to the global memory manager.
+ */
+static int vbox_ttm_global_init(struct vbox_private *vbox)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &vbox->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &vbox_ttm_mem_global_init;
+ global_ref->release = &vbox_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting subsystem.\n");
+ return r;
+ }
+
+ vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
+ global_ref = &vbox->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&vbox->ttm.mem_global_ref);
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * Removes the vbox memory manager object from the global memory manager.
+ */
+static void vbox_ttm_global_release(struct vbox_private *vbox)
+{
+ if (!vbox->ttm.mem_global_ref.release)
+ return;
+
+ drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&vbox->ttm.mem_global_ref);
+ vbox->ttm.mem_global_ref.release = NULL;
+}
+
+static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct vbox_bo *bo;
+
+ bo = container_of(tbo, struct vbox_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &vbox_bo_ttm_destroy)
+ return true;
+
+ return false;
+}
+
+static int
+vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct vbox_bo *vboxbo = vbox_bo(bo);
+
+ if (!vbox_ttm_bo_is_vbox_bo(bo))
+ return;
+
+ vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
+ *pl = vboxbo->placement;
+}
+
+static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ return 0;
+}
+
+static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct vbox_private *vbox = vbox_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+}
+
+static int vbox_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+ return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
+}
+
+static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func vbox_tt_backend_func = {
+ .destroy = &vbox_ttm_backend_destroy,
+};
+
+static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ u32 page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt)
+ return NULL;
+
+ tt->func = &vbox_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+
+ return tt;
+}
+
+static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver vbox_bo_driver = {
+ .ttm_tt_create = vbox_ttm_tt_create,
+ .ttm_tt_populate = vbox_ttm_tt_populate,
+ .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
+ .init_mem_type = vbox_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = vbox_bo_evict_flags,
+ .move = vbox_bo_move,
+ .verify_access = vbox_bo_verify_access,
+ .io_mem_reserve = &vbox_ttm_io_mem_reserve,
+ .io_mem_free = &vbox_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
+};
+
+int vbox_mm_init(struct vbox_private *vbox)
+{
+ int ret;
+ struct drm_device *dev = vbox->dev;
+ struct ttm_bo_device *bdev = &vbox->ttm.bdev;
+
+ ret = vbox_ttm_global_init(vbox);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&vbox->ttm.bdev,
+ vbox->ttm.bo_global_ref.ref.object,
+ &vbox_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET, true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ vbox->available_vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+#ifdef DRM_MTRR_WC
+ vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+#else
+ vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+#endif
+
+ vbox->ttm.mm_initialised = true;
+
+ return 0;
+}
+
+void vbox_mm_fini(struct vbox_private *vbox)
+{
+#ifdef DRM_MTRR_WC
+ struct drm_device *dev = vbox->dev;
+#endif
+ if (!vbox->ttm.mm_initialised)
+ return;
+ ttm_bo_device_release(&vbox->ttm.bdev);
+
+ vbox_ttm_global_release(vbox);
+
+#ifdef DRM_MTRR_WC
+ drm_mtrr_del(vbox->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+#else
+ arch_phys_wc_del(vbox->fb_mtrr);
+#endif
+}
+
+void vbox_ttm_placement(struct vbox_bo *bo, int domain)
+{
+ unsigned int i;
+ u32 c = 0;
+
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++].flags =
+ TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++].flags =
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++].flags =
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+
+ for (i = 0; i < c; ++i) {
+ bo->placements[i].fpfn = 0;
+ bo->placements[i].lpfn = 0;
+ }
+}
+
+int vbox_bo_create(struct drm_device *dev, int size, int align,
+ u32 flags, struct vbox_bo **pvboxbo)
+{
+ struct vbox_private *vbox = dev->dev_private;
+ struct vbox_bo *vboxbo;
+ size_t acc_size;
+ int ret;
+
+ vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
+ if (!vboxbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &vboxbo->gem, size);
+ if (ret) {
+ kfree(vboxbo);
+ return ret;
+ }
+
+ vboxbo->bo.bdev = &vbox->ttm.bdev;
+
+ vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
+ sizeof(struct vbox_bo));
+
+ ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
+ ttm_bo_type_device, &vboxbo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, NULL, vbox_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pvboxbo = vboxbo;
+
+ return 0;
+}
+
+static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = vbox_bo_gpu_offset(bo);
+
+ return 0;
+ }
+
+ vbox_ttm_placement(bo, pl_flag);
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+
+ if (gpu_addr)
+ *gpu_addr = vbox_bo_gpu_offset(bo);
+
+ return 0;
+}
+
+int vbox_bo_unpin(struct vbox_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Move a vbox-owned buffer object to system memory if no one else has it
+ * pinned. The caller must have pinned it previously, and this call will
+ * release the caller's pin.
+ */
+int vbox_bo_push_sysram(struct vbox_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct vbox_private *vbox;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return -EINVAL;
+
+ file_priv = filp->private_data;
+ vbox = file_priv->minor->dev->dev_private;
+
+ return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
+}