@@ -1364,17 +1364,25 @@ void evergreen_mc_program(struct radeon_device *rdev)
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 next_rptr;
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
if (ring->rptr_save_reg) {
- uint32_t next_rptr = ring->wptr + 3 + 4;
+ next_rptr = ring->wptr + 3 + 4;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((ring->rptr_save_reg -
PACKET3_SET_CONFIG_REG_START) >> 2));
radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+ radeon_ring_write(ring, next_rptr);
+ radeon_ring_write(ring, 0);
}
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -2163,10 +2163,12 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
ring->ring_size = ring_size;
ring->align_mask = 16 - 1;
- r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
- if (r) {
- DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
- ring->rptr_save_reg = 0;
+ if (radeon_ring_supports_scratch_reg(rdev, ring)) {
+ r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+ if (r) {
+ DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+ ring->rptr_save_reg = 0;
+ }
}
}
@@ -2576,13 +2578,21 @@ void r600_fini(struct radeon_device *rdev)
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 next_rptr;
if (ring->rptr_save_reg) {
- uint32_t next_rptr = ring->wptr + 3 + 4;
+ next_rptr = ring->wptr + 3 + 4;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((ring->rptr_save_reg -
PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+ radeon_ring_write(ring, next_rptr);
+ radeon_ring_write(ring, 0);
}
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -623,6 +623,8 @@ struct radeon_ring {
unsigned rptr_offs;
unsigned rptr_reg;
unsigned rptr_save_reg;
+ u64 next_rptr_gpu_addr;
+ volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
unsigned wptr_reg;
@@ -758,6 +760,8 @@ int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_ring_tests(struct radeon_device *rdev);
/* Ring access between begin & end cannot sleep */
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+ struct radeon_ring *ring);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
@@ -871,6 +875,7 @@ struct radeon_wb {
};
#define RADEON_WB_SCRATCH_OFFSET 0
+#define RADEON_WB_RING0_NEXT_RPTR 256
#define RADEON_WB_CP_RPTR_OFFSET 1024
#define RADEON_WB_CP1_RPTR_OFFSET 1280
#define RADEON_WB_CP2_RPTR_OFFSET 1536
@@ -207,6 +207,19 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
ring->ring_free_dw--;
}
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ switch (ring->idx) {
+ case RADEON_RING_TYPE_GFX_INDEX:
+ case CAYMAN_RING_TYPE_CP1_INDEX:
+ case CAYMAN_RING_TYPE_CP2_INDEX:
+ return true;
+ default:
+ return false;
+ }
+}
+
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rptr;
@@ -372,7 +385,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
mutex_lock(&rdev->ring_lock);
*data = NULL;
- if (ring->ring_obj == NULL || !ring->rptr_save_reg) {
+ if (ring->ring_obj == NULL) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
@@ -384,7 +397,16 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
}
/* calculate the number of dw on the ring */
- ptr = RREG32(ring->rptr_save_reg);
+ if (ring->rptr_save_reg)
+ ptr = RREG32(ring->rptr_save_reg);
+ else if (rdev->wb.enabled)
+ ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
+ else {
+ /* no way to read back the next rptr */
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
size = ring->wptr + (ring->ring_size / 4);
size -= ptr;
size &= ring->ptr_mask;
@@ -478,6 +500,11 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->ring_free_dw = ring->ring_size / 4;
+ if (rdev->wb.enabled) {
+ u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
+ ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
+ ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
+ }
if (radeon_debugfs_ring_init(rdev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
@@ -1772,12 +1772,20 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
} else {
+ u32 next_rptr;
if (ring->rptr_save_reg) {
- uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
+ next_rptr = ring->wptr + 3 + 4 + 8;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((ring->rptr_save_reg -
PACKET3_SET_CONFIG_REG_START) >> 2));
radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4 + 8;
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ radeon_ring_write(ring, (1 << 8));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, next_rptr);
}
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);