@@ -15,3 +15,6 @@ config DRM_AMDGPU_USERPTR
help
This option selects CONFIG_MMU_NOTIFIER if it isn't already
selected to enabled full userptr support.
+
+source "drivers/gpu/drm/amd/dal/Kconfig"
+
@@ -3,13 +3,19 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
FULL_AMD_PATH=$(src)/..
+DAL_FOLDER_NAME=dal
+FULL_AMD_DAL_PATH = $(FULL_AMD_PATH)/$(DAL_FOLDER_NAME)
ccflags-y := -Iinclude/drm -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/scheduler \
-I$(FULL_AMD_PATH)/powerplay/inc \
- -I$(FULL_AMD_PATH)/acp/include
+ -I$(FULL_AMD_PATH)/acp/include \
+ -I$(FULL_AMD_DAL_PATH) \
+ -I$(FULL_AMD_DAL_PATH)/include \
+ -I$(FULL_AMD_DAL_PATH)/dc \
+ -I$(FULL_AMD_DAL_PATH)/amdgpu_dm
amdgpu-y := amdgpu_drv.o
@@ -118,6 +124,15 @@ amdgpu-y += $(AMD_POWERPLAY_FILES)
endif
+ifneq ($(CONFIG_DRM_AMD_DAL),)
+
+RELATIVE_AMD_DAL_PATH = ../$(DAL_FOLDER_NAME)
+include $(FULL_AMD_DAL_PATH)/Makefile
+
+amdgpu-y += $(AMD_DAL_FILES)
+
+endif
+
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
CFLAGS_amdgpu_trace_points.o := -I$(src)
@@ -54,6 +54,7 @@
#include "amdgpu_gds.h"
#include "amd_powerplay.h"
#include "amdgpu_acp.h"
+#include "amdgpu_dm.h"
#include "gpu_scheduler.h"
@@ -85,6 +86,7 @@ extern int amdgpu_vm_debug;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_powerplay;
+extern int amdgpu_dal;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -2017,6 +2019,7 @@ struct amdgpu_device {
/* display */
struct amdgpu_mode_info mode_info;
+ /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src pageflip_irq;
@@ -2064,6 +2067,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* display related functionality */
+ struct amdgpu_display_manager dm;
+
const struct amdgpu_ip_block_version *ip_blocks;
int num_ip_blocks;
struct amdgpu_ip_block_status *ip_block_status;
@@ -2100,7 +2106,7 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
-
+bool amdgpu_device_has_dal_support(struct amdgpu_device *adev);
/*
* Cast helper
*/
@@ -2345,6 +2351,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
+#define amdgpu_has_dal_support(adev) (amdgpu_dal && amdgpu_device_has_dal_support(adev))
+
/* Common functions */
int amdgpu_gpu_reset(struct amdgpu_device *adev);
void amdgpu_pci_config_reset(struct amdgpu_device *adev);
@@ -1369,6 +1369,32 @@ static int amdgpu_resume(struct amdgpu_device *adev)
return 0;
}
+
+/**
+ * amdgpu_device_has_dal_support - check if dal is supported
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * Returns true for supported, false for not supported
+ */
+bool amdgpu_device_has_dal_support(struct amdgpu_device *adev)
+{
+ switch(adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DAL) && defined(CONFIG_DRM_AMD_DAL_DCE11_0)
+ case CHIP_CARRIZO:
+ return true;
+#endif
+#if defined(CONFIG_DRM_AMD_DAL) && defined(CONFIG_DRM_AMD_DAL_DCE10_0)
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ return true;
+#endif
+ default:
+ return false;
+ }
+}
+
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -1522,7 +1548,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
/* init i2c buses */
- amdgpu_atombios_i2c_init(adev);
+ if (!amdgpu_has_dal_support(adev))
+ amdgpu_atombios_i2c_init(adev);
/* Fence driver */
r = amdgpu_fence_driver_init(adev);
@@ -1628,7 +1655,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
- amdgpu_i2c_fini(adev);
+ if (!amdgpu_has_dal_support(adev))
+ amdgpu_i2c_fini(adev);
amdgpu_atombios_fini(adev);
kfree(adev->bios);
adev->bios = NULL;
@@ -1676,12 +1704,14 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
- /* turn off display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ if (!amdgpu_has_dal_support(adev)) {
+ /* turn off display hw */
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+ drm_modeset_unlock_all(dev);
}
- drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1773,6 +1803,9 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
r = amdgpu_resume(adev);
+ if (r)
+ DRM_ERROR("amdgpu_resume failed (%d).\n", r);
+
amdgpu_fence_driver_resume(adev);
r = amdgpu_ib_ring_tests(adev);
@@ -1803,17 +1836,25 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
/* blat the mode back in */
if (fbcon) {
- drm_helper_resume_force_mode(dev);
- /* turn on display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ if (!amdgpu_has_dal_support(adev)) {
+ /* pre DCE11 */
+ drm_helper_resume_force_mode(dev);
+
+ /* turn on display hw */
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+ drm_modeset_unlock_all(dev);
}
- drm_modeset_unlock_all(dev);
}
drm_kms_helper_poll_enable(dev);
- drm_helper_hpd_irq_event(dev);
+
+ if (!amdgpu_has_dal_support(adev))
+ drm_helper_hpd_irq_event(dev);
+ else
+ drm_kms_helper_hotplug_event(dev);
if (fbcon) {
amdgpu_fbdev_set_suspend(adev, 0);
@@ -80,6 +80,7 @@ int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_powerplay = -1;
+int amdgpu_dal = 1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -158,6 +159,9 @@ MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 =
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
#endif
+MODULE_PARM_DESC(dal, "DAL display driver (1 = enable (default), 0 = disable)");
+module_param_named(dal, amdgpu_dal, int, 0444);
+
static struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
@@ -42,11 +42,6 @@
this contains a helper + a amdgpu fb
the helper contains a pointer to amdgpu framebuffer baseclass.
*/
-struct amdgpu_fbdev {
- struct drm_fb_helper helper;
- struct amdgpu_framebuffer rfb;
- struct amdgpu_device *adev;
-};
static struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
@@ -36,6 +36,10 @@
#include <linux/pm_runtime.h>
+#ifdef CONFIG_DRM_AMD_DAL
+#include "amdgpu_dm_irq.h"
+#endif
+
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
/*
@@ -215,10 +219,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
int r = 0;
spin_lock_init(&adev->irq.lock);
- r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
- if (r) {
- return r;
- }
+
/* enable msi */
adev->irq.msi_enabled = false;
@@ -230,7 +231,16 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
}
- INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
+ if (!amdgpu_has_dal_support(adev)) {
+ r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ /* pre DCE11 */
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_hotplug_work_func);
+ }
+
INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
adev->irq.installed = true;
@@ -35,11 +35,13 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_fb_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <drm/drm_dp_mst_helper.h>
+
struct amdgpu_bo;
struct amdgpu_device;
struct amdgpu_encoder;
@@ -305,6 +307,18 @@ struct amdgpu_display_funcs {
struct amdgpu_mode_mc_save *save);
};
+struct amdgpu_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct amdgpu_fbdev {
+ struct drm_fb_helper helper;
+ struct amdgpu_framebuffer rfb;
+ struct list_head fbdev_list;
+ struct amdgpu_device *adev;
+};
+
struct amdgpu_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
@@ -410,6 +424,9 @@ struct amdgpu_crtc {
u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
+
+ /* After Set Mode target will be non-NULL */
+ struct dc_target *target;
};
struct amdgpu_encoder_atom_dig {
@@ -499,6 +516,13 @@ enum amdgpu_connector_dither {
AMDGPU_FMT_DITHER_ENABLE = 1,
};
+struct amdgpu_dm_dp_aux {
+ struct drm_dp_aux aux;
+ uint32_t link_index;
+};
+
+#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
+
struct amdgpu_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -510,6 +534,13 @@ struct amdgpu_connector {
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
+ /* number of modes generated from EDID at 'dc_sink' */
+ int num_modes;
+ /* The 'old' sink - before an HPD.
+ * The 'current' sink is in dc_link->sink. */
+ const struct dc_sink *dc_sink;
+ const struct dc_link *dc_link;
+ const struct dc_target *target;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
@@ -520,11 +551,26 @@ struct amdgpu_connector {
enum amdgpu_connector_audio audio;
enum amdgpu_connector_dither dither;
unsigned pixelclock_for_modeset;
+
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_connector *mst_port;
+ bool is_mst_connector;
+ struct amdgpu_encoder *mst_encoder;
+ struct semaphore mst_sem;
};
-struct amdgpu_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
+/* TODO: start to use this struct and remove same field from base one */
+struct amdgpu_mst_connector {
+ struct amdgpu_connector base;
+
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_connector *mst_port;
+ bool is_mst_connector;
+ struct amdgpu_encoder *mst_encoder;
};
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
@@ -77,6 +77,7 @@
#if defined(CONFIG_DRM_AMD_ACP)
#include "amdgpu_acp.h"
#endif
+#include "amdgpu_dm.h"
/*
* Indirect registers accessor
@@ -984,6 +985,225 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] =
#endif
};
+/*
+ * This is temporary. After we've gone through full testing with
+ * DAL we want to remove dce_v11
+ */
+#if defined(CONFIG_DRM_AMD_DAL)
+static const struct amdgpu_ip_block_version cz_ip_blocks_dal[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+#if defined(CONFIG_DRM_AMD_ACP)
+ {
+ .type = AMD_IP_BLOCK_TYPE_ACP,
+ .major = 2,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &acp_ip_funcs,
+ },
+#endif
+};
+
+static const struct amdgpu_ip_block_version tonga_ip_blocks_dal[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v5_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
+static const struct amdgpu_ip_block_version fiji_ip_blocks_dal[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+#endif
+
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
@@ -992,17 +1212,47 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
break;
case CHIP_FIJI:
+#if defined(CONFIG_DRM_AMD_DAL)
+ if (amdgpu_dal && amdgpu_device_has_dal_support(adev)) {
+ adev->ip_blocks = fiji_ip_blocks_dal;
+ adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_dal);
+ } else {
+ adev->ip_blocks = fiji_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+ }
+#else
adev->ip_blocks = fiji_ip_blocks;
adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+#endif
break;
case CHIP_TONGA:
+#if defined(CONFIG_DRM_AMD_DAL)
+ if (amdgpu_dal && amdgpu_device_has_dal_support(adev)) {
+ adev->ip_blocks = tonga_ip_blocks_dal;
+ adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_dal);
+ } else {
+ adev->ip_blocks = tonga_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
+ }
+#else
adev->ip_blocks = tonga_ip_blocks;
adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
+#endif
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
+#if defined(CONFIG_DRM_AMD_DAL)
+ if (amdgpu_dal && amdgpu_device_has_dal_support(adev)) {
+ adev->ip_blocks = cz_ip_blocks_dal;
+ adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_dal);
+ } else {
+ adev->ip_blocks = cz_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
+ }
+#else
adev->ip_blocks = cz_ip_blocks;
adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
+#endif
break;
default:
/* FIXME: not supported yet */