@@ -1338,13 +1338,14 @@ int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ drm_intel_bufmgr_gem *bufmgr_gem;
int ret = 0;
if (bo == NULL)
return 0;
+ bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->map_count <= 0) {
@@ -3893,7 +3893,7 @@ drm_intel_decode(struct drm_intel_decode *ctx)
int ret;
unsigned int index = 0;
uint32_t devid;
- int size = ctx->base_count * 4;
+ int size;
void *temp;
if (!ctx)
@@ -3903,6 +3903,7 @@ drm_intel_decode(struct drm_intel_decode *ctx)
* the batchbuffer. This lets us avoid a bunch of length
* checking in statically sized packets.
*/
+ size = ctx->base_count * 4;
temp = malloc(size + 4096);
memcpy(temp, ctx->base_data, size);
memset((char *)temp + size, 0xd0, 4096);