From dda5b59f3adc61900e9fa5846e844be406343b86 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexdeucher@gmail.com>
Date: Wed, 24 Mar 2010 13:36:43 -0400
Subject: [PATCH] drm/radeon/kms/evergreen: setup and enable the CP
The command processor (CP) fetches command buffers and
feeds the GPU. This patch requires the evergreen
family me and pfp ucode files.
Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
---
drivers/gpu/drm/radeon/evergreen.c | 117 ++++++++++++++++++++++++++++------
drivers/gpu/drm/radeon/evergreend.h | 2 +
drivers/gpu/drm/radeon/r600.c | 47 ++++++++++++--
drivers/gpu/drm/radeon/radeon.h | 4 +
drivers/gpu/drm/radeon/radeon_asic.c | 6 +-
drivers/gpu/drm/radeon/rv770.c | 10 ++-
6 files changed, 154 insertions(+), 32 deletions(-)
@@ -32,6 +32,9 @@
#include "avivod.h"
#include "evergreen_reg.h"
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
@@ -418,23 +421,91 @@ static void evergreen_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
-#if 0
/*
* CP.
*/
-static void evergreen_cp_stop(struct radeon_device *rdev)
-{
- /* XXX */
-}
-
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
{
- /* XXX */
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+ r700_cp_stop(rdev);
+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
return 0;
}
+
+int evergreen_cp_resume(struct radeon_device *rdev)
+{
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_SH |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
#endif
+ WREG32(CP_RB_CNTL, tmp);
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB_RPTR_WR, 0);
+ WREG32(CP_RB_WPTR, 0);
+ WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+ mdelay(1);
+ WREG32(CP_RB_CNTL, tmp);
+
+ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ rdev->cp.wptr = RREG32(CP_RB_WPTR);
+
+ r600_cp_start(rdev);
+ rdev->cp.ready = true;
+ r = radeon_ring_test(rdev);
+ if (r) {
+ rdev->cp.ready = false;
+ return r;
+ }
+ return 0;
+}
/*
* Core functions
@@ -1133,15 +1204,15 @@ static int evergreen_startup(struct radeon_device *rdev)
{
int r;
-#if 0
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ /* XXX until interrupts are supported */
+ if (!rdev->me_fw || !rdev->pfp_fw /*|| !rdev->rlc_fw*/) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
-#endif
+
evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
@@ -1179,6 +1250,7 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
r600_irq_set(rdev);
+#endif
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
@@ -1186,12 +1258,12 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_cp_load_microcode(rdev);
if (r)
return r;
- r = r600_cp_resume(rdev);
+ r = evergreen_cp_resume(rdev);
if (r)
return r;
/* write back buffer are not vital so don't worry about failure */
r600_wb_enable(rdev);
-#endif
+
return 0;
}
@@ -1216,13 +1288,13 @@ int evergreen_resume(struct radeon_device *rdev)
DRM_ERROR("r600 startup failed on resume\n");
return r;
}
-#if 0
+
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
-#endif
+
return r;
}
@@ -1231,12 +1303,11 @@ int evergreen_suspend(struct radeon_device *rdev)
{
#if 0
int r;
-
+#endif
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
r600_wb_disable(rdev);
-#endif
evergreen_pcie_gart_disable(rdev);
#if 0
@@ -1343,10 +1414,10 @@ int evergreen_init(struct radeon_device *rdev)
r = radeon_irq_kms_init(rdev);
if (r)
return r;
-
+#endif
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
-
+#if 0
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
#endif
@@ -1357,9 +1428,13 @@ int evergreen_init(struct radeon_device *rdev)
rdev->accel_working = false;
r = evergreen_startup(rdev);
if (r) {
- evergreen_suspend(rdev);
- /*r600_wb_fini(rdev);*/
- /*radeon_ring_fini(rdev);*/
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r700_cp_fini(rdev);
+ r600_wb_fini(rdev);
+#if 0
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+#endif
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
@@ -89,6 +89,7 @@
#define CP_QUEUE_THRESHOLDS 0x8760
#define ROQ_IB1_START(x) ((x) << 0)
#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_RB_BASE 0xC100
#define CP_RB_CNTL 0xC104
#define RB_BUFSZ(x) ((x) << 0)
#define RB_BLKSZ(x) ((x) << 8)
@@ -104,6 +105,7 @@
#define CP_RB_WPTR_ADDR_HI 0xC11C
#define CP_RB_WPTR_DELAY 0x8704
#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_DEBUG 0xC1FC
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
@@ -43,6 +43,8 @@
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
#define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -67,6 +69,14 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/CYRPESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -1444,10 +1454,31 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "RV710";
rlc_chip_name = "R700";
break;
+ case CHIP_CEDAR:
+ chip_name = "CEDAR";
+ rlc_chip_name = "";
+ break;
+ case CHIP_REDWOOD:
+ chip_name = "REDWOOD";
+ rlc_chip_name = "";
+ break;
+ case CHIP_JUNIPER:
+ chip_name = "JUNIPER";
+ rlc_chip_name = "";
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_name = "CYPRESS";
+ rlc_chip_name = "";
+ break;
default: BUG();
}
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = 0;
+ } else if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@@ -1482,6 +1513,8 @@ int r600_init_microcode(struct radeon_device *rdev)
err = -EINVAL;
}
+ /* XXX until evergreen interrupts are supported */
+ if (rdev->family < CHIP_CEDAR) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
if (err)
@@ -1492,6 +1525,7 @@ int r600_init_microcode(struct radeon_device *rdev)
rdev->rlc_fw->size, fw_name);
err = -EINVAL;
}
+ }
out:
platform_device_unregister(pdev);
@@ -1561,12 +1595,15 @@ int r600_cp_start(struct radeon_device *rdev)
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
- if (rdev->family < CHIP_RV770) {
- radeon_ring_write(rdev, 0x3);
- radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
- } else {
+ if (rdev->family >= CHIP_CEDAR) {
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ } else if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+ } else {
+ radeon_ring_write(rdev, 0x3);
+ radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
}
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
@@ -1297,6 +1297,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
+extern int r600_cp_start(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
extern void r600_cp_fini(struct radeon_device *rdev);
@@ -1337,6 +1338,9 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
uint8_t status_bits,
uint8_t category_code);
+extern void r700_cp_stop(struct radeon_device *rdev);
+extern void r700_cp_fini(struct radeon_device *rdev);
+
/* evergreen */
struct evergreen_mc_save {
u32 vga_control[6];
@@ -635,14 +635,14 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = NULL,
+ .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
- .ring_test = NULL,
- .ring_ib_execute = NULL,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
.irq_set = NULL,
.irq_process = NULL,
.get_vblank_counter = NULL,
@@ -236,7 +236,6 @@ void r700_cp_stop(struct radeon_device *rdev)
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
}
-
static int rv770_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -271,6 +270,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
return 0;
}
+void r700_cp_fini(struct radeon_device *rdev)
+{
+ r700_cp_stop(rdev);
+ radeon_ring_fini(rdev);
+}
/*
* Core functions
@@ -1120,7 +1124,7 @@ int rv770_init(struct radeon_device *rdev)
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -1154,7 +1158,7 @@ void rv770_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
r600_blit_fini(rdev);
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
--
1.5.6.3