aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/radeon_gem.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2010-01-07 12:39:21 +0100
committerDave Airlie <airlied@redhat.com>2010-01-08 13:09:59 +1000
commitcafe6609d6dc0a6a278f9fdbb59ce4d761a35ddd (patch)
treea3e15eabffd6e10bed1ef639fc2f2e087c67b047 /drivers/gpu/drm/radeon/radeon_gem.c
parent62cdc0c20663ef840a94850892517b2b7f584904 (diff)
drm/radeon/kms: Schedule host path read cache flush through the ring V2
R300 family will hard lockup if host path read cache flush is done through MMIO to HOST_PATH_CNTL. But scheduling same flush through ring seems harmless. This patch remove the hdp_flush callback and add a flush after each fence emission which means a flush after each IB schedule. Thus we should have same behavior without the hard lockup. Tested on R100,R200,R300,R400,R500,R600,R700 family. V2: Adjust fence counts in r600_blit_prepare_copy() Signed-off-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gem.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
1 files changed, 0 insertions, 2 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 60df2d7e7e4..0e1325e1853 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -131,7 +131,6 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
printk(KERN_ERR "Failed to wait for object !\n");
return r;
}
- radeon_hdp_flush(robj->rdev);
}
return 0;
}
@@ -312,7 +311,6 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
- radeon_hdp_flush(robj->rdev);
return r;
}