From 804852e4ffc983f9ee7600f78218698546fdc58d Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 11 Feb 2009 23:42:59 +0530 Subject: drm_proc.c fix compilation warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/drm_proc.c: In function ‘drm__vma_info’: drivers/gpu/drm/drm_proc.c:681: warning: format ‘%08lx’ expects type ‘long unsigned int’, but argument 5 has type ‘phys_addr_t’ Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- drivers/gpu/drm/drm_proc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index 8df849f6683..b756f043a5f 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c @@ -678,9 +678,9 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request, *start = &buf[offset]; *eof = 0; - DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", + DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%llx\n", atomic_read(&dev->vma_count), - high_memory, virt_to_phys(high_memory)); + high_memory, (u64)virt_to_phys(high_memory)); list_for_each_entry(pt, &dev->vmalist, head) { if (!(vma = pt->vma)) continue; -- cgit v1.2.3 From 3255aa2eb636a508fc82a73fabbb8aaf2ff23c0f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 25 Feb 2009 08:21:52 +0100 Subject: x86, mm: pass in 'total' to __copy_from_user_*nocache() Impact: cleanup, enable future change Add a 'total bytes copied' parameter to __copy_from_user_*nocache(), and update all the callsites. The parameter is not used yet - architecture code can use it to more intelligently decide whether the copy should be cached or non-temporal. Cc: Salman Qazi Cc: Nick Piggin Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 81857665409..6b209db8370 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -215,7 +215,7 @@ fast_user_write(struct io_mapping *mapping, vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, - user_data, length); + user_data, length, length); io_mapping_unmap_atomic(vaddr_atomic); if (unwritten) return -EFAULT; -- cgit v1.2.3 From f180053694b43d5714bf56cb95499a3c32ff155c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 2 Mar 2009 11:00:57 +0100 Subject: x86, mm: dont use non-temporal stores in pagecache accesses Impact: standardize IO on cached ops On modern CPUs it is almost always a bad idea to use non-temporal stores, as the regression in this commit has shown it: 30d697f: x86: fix performance regression in write() syscall The kernel simply has no good information about whether using non-temporal stores is a good idea or not - and trying to add heuristics only increases complexity and inserts fragility. The regression on cached write()s took very long to be found - over two years. So dont take any chances and let the hardware decide how it makes use of its caches. The only exception is drivers/gpu/drm/i915/i915_gem.c: there were we are absolutely sure that another entity (the GPU) will pick up the dirty data immediately and that the CPU will not touch that data before the GPU will. Also, keep the _nocache() primitives to make it easier for people to experiment with these details. There may be more clear-cut cases where non-cached copies can be used, outside of filemap.c. Cc: Salman Qazi Cc: Nick Piggin Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6b209db8370..81857665409 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -215,7 +215,7 @@ fast_user_write(struct io_mapping *mapping, vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, - user_data, length, length); + user_data, length); io_mapping_unmap_atomic(vaddr_atomic); if (unwritten) return -EFAULT; -- cgit v1.2.3 From 60aa49243d09afc873f082567d2e3c16634ced84 Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Sun, 1 Feb 2009 14:52:56 -0700 Subject: Rationalize fasync return values Most fasync implementations do something like: return fasync_helper(...); But fasync_helper() will return a positive value at times - a feature used in at least one place. Thus, a number of other drivers do: err = fasync_helper(...); if (err < 0) return err; return 0; In the interests of consistency and more concise code, it makes sense to map positive return values onto zero where ->fasync() is called. Cc: Al Viro Signed-off-by: Jonathan Corbet --- drivers/gpu/drm/drm_fops.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index f52663ebe01..e13cb62bbae 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -337,14 +337,10 @@ int drm_fasync(int fd, struct file *filp, int on) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; - int retcode; DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, (long)old_encode_dev(priv->minor->device)); - retcode = fasync_helper(fd, filp, on, &dev->buf_async); - if (retcode < 0) - return retcode; - return 0; + return fasync_helper(fd, filp, on, &dev->buf_async); } EXPORT_SYMBOL(drm_fasync); -- cgit v1.2.3 From 2ead054cd26752c7ce47dfbf320dd021ef70682d Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Tue, 24 Mar 2009 16:38:22 -0700 Subject: drm: struct device - replace bus_id with dev_name(), dev_set_name() Cc: airlied@linux.ie Acked-by: Greg Kroah-Hartman Signed-off-by: Kay Sievers --- drivers/gpu/drm/drm_sysfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 5aa6780652a..186d08159d4 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -359,8 +359,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector) DRM_DEBUG("adding \"%s\" to sysfs\n", drm_get_connector_name(connector)); - snprintf(connector->kdev.bus_id, BUS_ID_SIZE, "card%d-%s", - dev->primary->index, drm_get_connector_name(connector)); + dev_set_name(&connector->kdev, "card%d-%s", + dev->primary->index, drm_get_connector_name(connector)); ret = device_register(&connector->kdev); if (ret) { -- cgit v1.2.3 From 568d9a8f6d4bf81e0672c74573dc02981d31e3ea Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 12 Mar 2009 16:27:11 -0700 Subject: drm/i915: Change DCC tiling detection case to cover only mobile parts. Later spec investigation has revealed that every 9xx mobile part has had this register in this format. Also, no non-mobile parts have been shown to have this register. So make all mobile use the same code, and all non-mobile use the hack 965 detection. Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem_tiling.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 7fb4191ef93..4cce1aef438 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) || - IS_GM45(dev)) { + } else if (IS_MOBILE(dev)) { uint32_t dcc; - /* On 915-945 and GM965, channel interleave by the CPU is - * determined by DCC. The CPU will alternate based on bit 6 - * in interleaved mode, and the GPU will then also alternate - * on bit 6, 9, and 10 for X, but the CPU may also optionally - * alternate based on bit 17 (XOR not disabled and XOR - * bit == 17). + /* On mobile 9xx chipsets, channel interleave by the CPU is + * determined by DCC. For single-channel, neither the CPU + * nor the GPU do swizzling. For dual channel interleaved, + * the GPU's interleave is bit 9 and 10 for X tiled, and bit + * 9 for Y tiled. The CPU's interleave is independent, and + * can be based on either bit 11 (haven't seen this yet) or + * bit 17 (common). */ dcc = I915_READ(DCC); switch (dcc & DCC_ADDRESSING_MODE_MASK) { @@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) swizzle_y = I915_BIT_6_SWIZZLE_NONE; break; case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: - if (IS_I915G(dev) || IS_I915GM(dev) || - dcc & DCC_CHANNEL_XOR_DISABLE) { + if (dcc & DCC_CHANNEL_XOR_DISABLE) { + /* This is the base swizzling by the GPU for + * tiled buffers. + */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; - } else if ((IS_I965GM(dev) || IS_GM45(dev)) && - (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { - /* GM965/GM45 does either bit 11 or bit 17 - * swizzling. - */ + } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { + /* Bit 11 swizzling by the CPU in addition. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; swizzle_y = I915_BIT_6_SWIZZLE_9_11; } else { - /* Bit 17 or perhaps other swizzling */ + /* Bit 17 swizzling by the CPU in addition. */ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } -- cgit v1.2.3 From 044c7c415a68077b7c444c753aa03a35149e881a Mon Sep 17 00:00:00 2001 From: Ma Ling Date: Wed, 18 Mar 2009 20:13:23 +0800 Subject: drm/i915: Use documented PLL timing limits for G4X platform The values come from the internal reference spreadsheet on PLL timing limits for the G4X chipsets. Part of fixing fd.o bug #17508 Signed-off-by: Ma Ling [anholt: Cleaned up some whitespace] Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_display.c | 187 ++++++++++++++++++++++++++++++++++- 1 file changed, 186 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a2834276cb3..89f7af0bdb1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -115,6 +115,89 @@ typedef struct { #define INTEL_LIMIT_I8XX_LVDS 1 #define INTEL_LIMIT_I9XX_SDVO_DAC 2 #define INTEL_LIMIT_I9XX_LVDS 3 +#define INTEL_LIMIT_G4X_SDVO 4 +#define INTEL_LIMIT_G4X_HDMI_DAC 5 +#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6 +#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 + +/*The parameter is for SDVO on G4x platform*/ +#define G4X_DOT_SDVO_MIN 25000 +#define G4X_DOT_SDVO_MAX 270000 +#define G4X_VCO_MIN 1750000 +#define G4X_VCO_MAX 3500000 +#define G4X_N_SDVO_MIN 1 +#define G4X_N_SDVO_MAX 4 +#define G4X_M_SDVO_MIN 104 +#define G4X_M_SDVO_MAX 138 +#define G4X_M1_SDVO_MIN 17 +#define G4X_M1_SDVO_MAX 23 +#define G4X_M2_SDVO_MIN 5 +#define G4X_M2_SDVO_MAX 11 +#define G4X_P_SDVO_MIN 10 +#define G4X_P_SDVO_MAX 30 +#define G4X_P1_SDVO_MIN 1 +#define G4X_P1_SDVO_MAX 3 +#define G4X_P2_SDVO_SLOW 10 +#define G4X_P2_SDVO_FAST 10 +#define G4X_P2_SDVO_LIMIT 270000 + +/*The parameter is for HDMI_DAC on G4x platform*/ +#define G4X_DOT_HDMI_DAC_MIN 22000 +#define G4X_DOT_HDMI_DAC_MAX 400000 +#define G4X_N_HDMI_DAC_MIN 1 +#define G4X_N_HDMI_DAC_MAX 4 +#define G4X_M_HDMI_DAC_MIN 104 +#define G4X_M_HDMI_DAC_MAX 138 +#define G4X_M1_HDMI_DAC_MIN 16 +#define G4X_M1_HDMI_DAC_MAX 23 +#define G4X_M2_HDMI_DAC_MIN 5 +#define G4X_M2_HDMI_DAC_MAX 11 +#define G4X_P_HDMI_DAC_MIN 5 +#define G4X_P_HDMI_DAC_MAX 80 +#define G4X_P1_HDMI_DAC_MIN 1 +#define G4X_P1_HDMI_DAC_MAX 8 +#define G4X_P2_HDMI_DAC_SLOW 10 +#define G4X_P2_HDMI_DAC_FAST 5 +#define G4X_P2_HDMI_DAC_LIMIT 165000 + +/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/ +#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000 +#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000 +#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1 +#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3 +#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104 +#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138 +#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17 +#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23 +#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5 +#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11 +#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28 +#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112 +#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2 +#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8 +#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14 +#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14 +#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0 + +/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/ +#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000 +#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000 +#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1 +#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3 +#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104 +#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138 +#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17 +#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23 +#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5 +#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11 +#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14 +#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42 +#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2 +#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6 +#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7 +#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 +#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 + static const intel_limit_t intel_limits[] = { { /* INTEL_LIMIT_I8XX_DVO_DAC */ @@ -168,14 +251,116 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, }, + /* below parameter and function is for G4X Chipset Family*/ + { /* INTEL_LIMIT_G4X_SDVO */ + .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, + .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, + .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, + .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX }, + .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX }, + .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX }, + .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX }, + .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX}, + .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT, + .p2_slow = G4X_P2_SDVO_SLOW, + .p2_fast = G4X_P2_SDVO_FAST + }, + }, + { /* INTEL_LIMIT_G4X_HDMI_DAC */ + .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, + .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, + .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, + .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX }, + .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX }, + .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX }, + .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX }, + .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX}, + .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT, + .p2_slow = G4X_P2_HDMI_DAC_SLOW, + .p2_fast = G4X_P2_HDMI_DAC_FAST + }, + }, + { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ + .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, + .vco = { .min = G4X_VCO_MIN, + .max = G4X_VCO_MAX }, + .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX }, + .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX }, + .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX }, + .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX }, + .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX }, + .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN, + .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX }, + .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT, + .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW, + .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST + }, + }, + { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ + .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, + .vco = { .min = G4X_VCO_MIN, + .max = G4X_VCO_MAX }, + .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_N_DUAL_CHANNEL_LVDS_MAX }, + .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_M_DUAL_CHANNEL_LVDS_MAX }, + .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX }, + .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX }, + .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_P_DUAL_CHANNEL_LVDS_MAX }, + .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN, + .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX }, + .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT, + .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW, + .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST + }, + }, }; +static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + const intel_limit_t *limit; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == + LVDS_CLKB_POWER_UP) + /* LVDS with dual channel */ + limit = &intel_limits + [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS]; + else + /* LVDS with dual channel */ + limit = &intel_limits + [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS]; + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { + limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC]; + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { + limit = &intel_limits[INTEL_LIMIT_G4X_SDVO]; + } else /* The option is for other outputs */ + limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; + + return limit; +} + static const intel_limit_t *intel_limit(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; const intel_limit_t *limit; - if (IS_I9XX(dev)) { + if (IS_G4X(dev)) { + limit = intel_g4x_limit(crtc); + } else if (IS_I9XX(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; else -- cgit v1.2.3 From d490609321828c62e8dfa6220f0acd82e5cb3756 Mon Sep 17 00:00:00 2001 From: Ma Ling Date: Wed, 18 Mar 2009 20:13:27 +0800 Subject: drm/i915: Use a different PLL timing search function on G4X. This improves the PLL timings according to the suggestion of the hardware engineers. This results in some outputs being able to sync that weren't able to before. This is part of fixing fd.o bug #17508. Signed-off-by: Ma Ling [anholt: cleaned up a couple of redundant comments] Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_display.c | 100 ++++++++++++++++++++++++++++++----- 1 file changed, 88 insertions(+), 12 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 89f7af0bdb1..8e29545273b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -56,11 +56,13 @@ typedef struct { } intel_p2_t; #define INTEL_P2_NUM 2 - -typedef struct { +typedef struct intel_limit intel_limit_t; +struct intel_limit { intel_range_t dot, vco, n, m, m1, m2, p, p1; intel_p2_t p2; -} intel_limit_t; + bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, + int, int, intel_clock_t *); +}; #define I8XX_DOT_MIN 25000 #define I8XX_DOT_MAX 350000 @@ -198,6 +200,12 @@ typedef struct { #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 +static bool +intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock); +static bool +intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock); static const intel_limit_t intel_limits[] = { { /* INTEL_LIMIT_I8XX_DVO_DAC */ @@ -211,6 +219,7 @@ static const intel_limit_t intel_limits[] = { .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, + .find_pll = intel_find_best_PLL, }, { /* INTEL_LIMIT_I8XX_LVDS */ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, @@ -223,6 +232,7 @@ static const intel_limit_t intel_limits[] = { .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, + .find_pll = intel_find_best_PLL, }, { /* INTEL_LIMIT_I9XX_SDVO_DAC */ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, @@ -235,6 +245,7 @@ static const intel_limit_t intel_limits[] = { .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, + .find_pll = intel_find_best_PLL, }, { /* INTEL_LIMIT_I9XX_LVDS */ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, @@ -250,6 +261,7 @@ static const intel_limit_t intel_limits[] = { */ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, + .find_pll = intel_find_best_PLL, }, /* below parameter and function is for G4X Chipset Family*/ { /* INTEL_LIMIT_G4X_SDVO */ @@ -265,6 +277,7 @@ static const intel_limit_t intel_limits[] = { .p2_slow = G4X_P2_SDVO_SLOW, .p2_fast = G4X_P2_SDVO_FAST }, + .find_pll = intel_g4x_find_best_PLL, }, { /* INTEL_LIMIT_G4X_HDMI_DAC */ .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, @@ -279,6 +292,7 @@ static const intel_limit_t intel_limits[] = { .p2_slow = G4X_P2_HDMI_DAC_SLOW, .p2_fast = G4X_P2_HDMI_DAC_FAST }, + .find_pll = intel_g4x_find_best_PLL, }, { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, @@ -301,6 +315,7 @@ static const intel_limit_t intel_limits[] = { .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW, .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST }, + .find_pll = intel_g4x_find_best_PLL, }, { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, @@ -323,6 +338,7 @@ static const intel_limit_t intel_limits[] = { .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW, .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST }, + .find_pll = intel_g4x_find_best_PLL, }, }; @@ -437,18 +453,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) return true; } -/** - * Returns a set of divisors for the desired target clock with the given - * refclk, or FALSE. The returned values represent the clock equation: - * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. - */ -static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, - int refclk, intel_clock_t *best_clock) +static bool +intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock) + { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; intel_clock_t clock; - const intel_limit_t *limit = intel_limit(crtc); int err = target; if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && @@ -500,6 +512,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, return (err != target); } +static bool +intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + intel_clock_t clock; + int max_n; + bool found; + /* approximately equals target * 0.00488 */ + int err_most = (target >> 8) + (target >> 10); + found = false; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == + LVDS_CLKB_POWER_UP) + clock.p2 = limit->p2.p2_fast; + else + clock.p2 = limit->p2.p2_slow; + } else { + if (target < limit->p2.dot_limit) + clock.p2 = limit->p2.p2_slow; + else + clock.p2 = limit->p2.p2_fast; + } + + memset(best_clock, 0, sizeof(*best_clock)); + max_n = limit->n.max; + /* based on hardware requriment prefer smaller n to precision */ + for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { + /* based on hardware requirment prefere larger m1,m2, p1 */ + for (clock.m1 = limit->m1.max; + clock.m1 >= limit->m1.min; clock.m1--) { + for (clock.m2 = limit->m2.max; + clock.m2 >= limit->m2.min; clock.m2--) { + for (clock.p1 = limit->p1.max; + clock.p1 >= limit->p1.min; clock.p1--) { + int this_err; + + intel_clock(refclk, &clock); + if (!intel_PLL_is_valid(crtc, &clock)) + continue; + this_err = abs(clock.dot - target) ; + if (this_err < err_most) { + *best_clock = clock; + err_most = this_err; + max_n = clock.n; + found = true; + } + } + } + } + } + + return found; +} + void intel_wait_for_vblank(struct drm_device *dev) { @@ -918,6 +987,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, bool is_crt = false, is_lvds = false, is_tv = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; + const intel_limit_t *limit; int ret; drm_vblank_pre_modeset(dev, pipe); @@ -961,7 +1031,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, refclk = 48000; } - ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); + /* + * Returns a set of divisors for the desired target clock with the given + * refclk, or FALSE. The returned values represent the clock equation: + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. + */ + limit = intel_limit(crtc); + ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return -EINVAL; -- cgit v1.2.3 From 13520b051e8888dd3af9bda639d83e7df76613d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Fri, 13 Mar 2009 15:42:14 -0400 Subject: drm/i915: Read the right SDVO register when detecting SVDO/HDMI. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes incorrect detection of the second SDVO/HDMI output on G4X, and extra boot time on pre-G4X. Signed-off-by: Kristian Høgsberg Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_display.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8e29545273b..0d40b4b6979 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1735,13 +1735,21 @@ static void intel_setup_outputs(struct drm_device *dev) if (IS_I9XX(dev)) { int found; + u32 reg; if (I915_READ(SDVOB) & SDVO_DETECTED) { found = intel_sdvo_init(dev, SDVOB); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) intel_hdmi_init(dev, SDVOB); } - if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { + + /* Before G4X SDVOC doesn't have its own detect register */ + if (IS_G4X(dev)) + reg = SDVOC; + else + reg = SDVOB; + + if (I915_READ(reg) & SDVO_DETECTED) { found = intel_sdvo_init(dev, SDVOC); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) intel_hdmi_init(dev, SDVOC); -- cgit v1.2.3 From 3de09aa3b38910d366f4710ffdf430c9d387d1a3 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 9 Mar 2009 09:42:23 -0700 Subject: drm/i915: Fix lock order reversal in GTT pwrite path. Since the pagefault path determines that the lock order we use has to be mmap_sem -> struct_mutex, we can't allow page faults to occur while the struct_mutex is held. To fix this in pwrite, we first try optimistically to see if we can copy from user without faulting. If it fails, fall back to using get_user_pages to pin the user's memory, and map those pages atomically when copying it to the GPU. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes --- drivers/gpu/drm/i915/i915_gem.c | 166 +++++++++++++++++++++++++++++++++------- 1 file changed, 139 insertions(+), 27 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 37427e4016c..35f8c7bd0d3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -223,29 +223,34 @@ fast_user_write(struct io_mapping *mapping, */ static inline int -slow_user_write(struct io_mapping *mapping, - loff_t page_base, int page_offset, - char __user *user_data, - int length) +slow_kernel_write(struct io_mapping *mapping, + loff_t gtt_base, int gtt_offset, + struct page *user_page, int user_offset, + int length) { - char __iomem *vaddr; + char *src_vaddr, *dst_vaddr; unsigned long unwritten; - vaddr = io_mapping_map_wc(mapping, page_base); - if (vaddr == NULL) - return -EFAULT; - unwritten = __copy_from_user(vaddr + page_offset, - user_data, length); - io_mapping_unmap(vaddr); + dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); + src_vaddr = kmap_atomic(user_page, KM_USER1); + unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, + src_vaddr + user_offset, + length); + kunmap_atomic(src_vaddr, KM_USER1); + io_mapping_unmap_atomic(dst_vaddr); if (unwritten) return -EFAULT; return 0; } +/** + * This is the fast pwrite path, where we copy the data directly from the + * user into the GTT, uncached. + */ static int -i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) +i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) { struct drm_i915_gem_object *obj_priv = obj->driver_private; drm_i915_private_t *dev_priv = dev->dev_private; @@ -273,7 +278,6 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, obj_priv = obj->driver_private; offset = obj_priv->gtt_offset + args->offset; - obj_priv->dirty = 1; while (remain > 0) { /* Operation in this page @@ -292,16 +296,11 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, page_offset, user_data, page_length); /* If we get a fault while copying data, then (presumably) our - * source page isn't available. In this case, use the - * non-atomic function + * source page isn't available. Return the error and we'll + * retry in the slow path. */ - if (ret) { - ret = slow_user_write (dev_priv->mm.gtt_mapping, - page_base, page_offset, - user_data, page_length); - if (ret) - goto fail; - } + if (ret) + goto fail; remain -= page_length; user_data += page_length; @@ -315,6 +314,115 @@ fail: return ret; } +/** + * This is the fallback GTT pwrite path, which uses get_user_pages to pin + * the memory and maps it using kmap_atomic for copying. + * + * This code resulted in x11perf -rgb10text consuming about 10% more CPU + * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). + */ +static int +i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + drm_i915_private_t *dev_priv = dev->dev_private; + ssize_t remain; + loff_t gtt_page_base, offset; + loff_t first_data_page, last_data_page, num_pages; + loff_t pinned_pages, i; + struct page **user_pages; + struct mm_struct *mm = current->mm; + int gtt_page_offset, data_page_offset, data_page_index, page_length; + int ret; + uint64_t data_ptr = args->data_ptr; + + remain = args->size; + + /* Pin the user pages containing the data. We can't fault while + * holding the struct mutex, and all of the pwrite implementations + * want to hold it while dereferencing the user data. + */ + first_data_page = data_ptr / PAGE_SIZE; + last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; + num_pages = last_data_page - first_data_page + 1; + + user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); + if (user_pages == NULL) + return -ENOMEM; + + down_read(&mm->mmap_sem); + pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, + num_pages, 0, 0, user_pages, NULL); + up_read(&mm->mmap_sem); + if (pinned_pages < num_pages) { + ret = -EFAULT; + goto out_unpin_pages; + } + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_object_pin(obj, 0); + if (ret) + goto out_unlock; + + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret) + goto out_unpin_object; + + obj_priv = obj->driver_private; + offset = obj_priv->gtt_offset + args->offset; + + while (remain > 0) { + /* Operation in this page + * + * gtt_page_base = page offset within aperture + * gtt_page_offset = offset within page in aperture + * data_page_index = page number in get_user_pages return + * data_page_offset = offset with data_page_index page. + * page_length = bytes to copy for this page + */ + gtt_page_base = offset & PAGE_MASK; + gtt_page_offset = offset & ~PAGE_MASK; + data_page_index = data_ptr / PAGE_SIZE - first_data_page; + data_page_offset = data_ptr & ~PAGE_MASK; + + page_length = remain; + if ((gtt_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - gtt_page_offset; + if ((data_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - data_page_offset; + + ret = slow_kernel_write(dev_priv->mm.gtt_mapping, + gtt_page_base, gtt_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length); + + /* If we get a fault while copying data, then (presumably) our + * source page isn't available. Return the error and we'll + * retry in the slow path. + */ + if (ret) + goto out_unpin_object; + + remain -= page_length; + offset += page_length; + data_ptr += page_length; + } + +out_unpin_object: + i915_gem_object_unpin(obj); +out_unlock: + mutex_unlock(&dev->struct_mutex); +out_unpin_pages: + for (i = 0; i < pinned_pages; i++) + page_cache_release(user_pages[i]); + kfree(user_pages); + + return ret; +} + static int i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -388,9 +496,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (obj_priv->phys_obj) ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); else if (obj_priv->tiling_mode == I915_TILING_NONE && - dev->gtt_total != 0) - ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); - else + dev->gtt_total != 0) { + ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); + if (ret == -EFAULT) { + ret = i915_gem_gtt_pwrite_slow(dev, obj, args, + file_priv); + } + } else ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); #if WATCH_PWRITE -- cgit v1.2.3 From 856fa1988ea483fc2dab84a16681dcfde821b740 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Mar 2009 14:10:50 -0700 Subject: drm/i915: Make GEM object's page lists refcounted instead of get/free. We've wanted this for a few consumers that touch the pages directly (such as the following commit), which have been doing the refcounting outside of get/put pages. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes --- drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_gem.c | 70 +++++++++++++++++++++-------------------- 2 files changed, 38 insertions(+), 35 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d6cc9861e0a..75e33844146 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -404,7 +404,8 @@ struct drm_i915_gem_object { /** AGP memory structure for our GTT binding. */ DRM_AGP_MEM *agp_mem; - struct page **page_list; + struct page **pages; + int pages_refcount; /** * Current offset of the object in GTT space. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 35f8c7bd0d3..b998d659fd9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, uint64_t offset, uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); -static int i915_gem_object_get_page_list(struct drm_gem_object *obj); -static void i915_gem_object_free_page_list(struct drm_gem_object *obj); +static int i915_gem_object_get_pages(struct drm_gem_object *obj); +static void i915_gem_object_put_pages(struct drm_gem_object *obj); static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment); @@ -928,29 +928,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, } static void -i915_gem_object_free_page_list(struct drm_gem_object *obj) +i915_gem_object_put_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; int page_count = obj->size / PAGE_SIZE; int i; - if (obj_priv->page_list == NULL) - return; + BUG_ON(obj_priv->pages_refcount == 0); + if (--obj_priv->pages_refcount != 0) + return; for (i = 0; i < page_count; i++) - if (obj_priv->page_list[i] != NULL) { + if (obj_priv->pages[i] != NULL) { if (obj_priv->dirty) - set_page_dirty(obj_priv->page_list[i]); - mark_page_accessed(obj_priv->page_list[i]); - page_cache_release(obj_priv->page_list[i]); + set_page_dirty(obj_priv->pages[i]); + mark_page_accessed(obj_priv->pages[i]); + page_cache_release(obj_priv->pages[i]); } obj_priv->dirty = 0; - drm_free(obj_priv->page_list, + drm_free(obj_priv->pages, page_count * sizeof(struct page *), DRM_MEM_DRIVER); - obj_priv->page_list = NULL; + obj_priv->pages = NULL; } static void @@ -1402,7 +1403,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) if (obj_priv->fence_reg != I915_FENCE_REG_NONE) i915_gem_clear_fence_reg(obj); - i915_gem_object_free_page_list(obj); + i915_gem_object_put_pages(obj); if (obj_priv->gtt_space) { atomic_dec(&dev->gtt_count); @@ -1521,7 +1522,7 @@ i915_gem_evict_everything(struct drm_device *dev) } static int -i915_gem_object_get_page_list(struct drm_gem_object *obj) +i915_gem_object_get_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; int page_count, i; @@ -1530,18 +1531,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) struct page *page; int ret; - if (obj_priv->page_list) + if (obj_priv->pages_refcount++ != 0) return 0; /* Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. */ page_count = obj->size / PAGE_SIZE; - BUG_ON(obj_priv->page_list != NULL); - obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), - DRM_MEM_DRIVER); - if (obj_priv->page_list == NULL) { + BUG_ON(obj_priv->pages != NULL); + obj_priv->pages = drm_calloc(page_count, sizeof(struct page *), + DRM_MEM_DRIVER); + if (obj_priv->pages == NULL) { DRM_ERROR("Faled to allocate page list\n"); + obj_priv->pages_refcount--; return -ENOMEM; } @@ -1552,10 +1554,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) if (IS_ERR(page)) { ret = PTR_ERR(page); DRM_ERROR("read_mapping_page failed: %d\n", ret); - i915_gem_object_free_page_list(obj); + i915_gem_object_put_pages(obj); return ret; } - obj_priv->page_list[i] = page; + obj_priv->pages[i] = page; } return 0; } @@ -1878,7 +1880,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) DRM_INFO("Binding object of size %d at 0x%08x\n", obj->size, obj_priv->gtt_offset); #endif - ret = i915_gem_object_get_page_list(obj); + ret = i915_gem_object_get_pages(obj); if (ret) { drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; @@ -1890,12 +1892,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) * into the GTT. */ obj_priv->agp_mem = drm_agp_bind_pages(dev, - obj_priv->page_list, + obj_priv->pages, page_count, obj_priv->gtt_offset, obj_priv->agp_type); if (obj_priv->agp_mem == NULL) { - i915_gem_object_free_page_list(obj); + i915_gem_object_put_pages(obj); drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; return -ENOMEM; @@ -1922,10 +1924,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj) * to GPU, and we can ignore the cache flush because it'll happen * again at bind time. */ - if (obj_priv->page_list == NULL) + if (obj_priv->pages == NULL) return; - drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); + drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); } /** Flushes any GPU write domain for the object if it's dirty. */ @@ -2270,7 +2272,7 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { if (obj_priv->page_cpu_valid[i]) continue; - drm_clflush_pages(obj_priv->page_list + i, 1); + drm_clflush_pages(obj_priv->pages + i, 1); } drm_agp_chipset_flush(dev); } @@ -2336,7 +2338,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, if (obj_priv->page_cpu_valid[i]) continue; - drm_clflush_pages(obj_priv->page_list + i, 1); + drm_clflush_pages(obj_priv->pages + i, 1); obj_priv->page_cpu_valid[i] = 1; } @@ -3304,7 +3306,7 @@ i915_gem_init_hws(struct drm_device *dev) dev_priv->status_gfx_addr = obj_priv->gtt_offset; - dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); + dev_priv->hw_status_page = kmap(obj_priv->pages[0]); if (dev_priv->hw_status_page == NULL) { DRM_ERROR("Failed to map status page.\n"); memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); @@ -3334,7 +3336,7 @@ i915_gem_cleanup_hws(struct drm_device *dev) obj = dev_priv->hws_obj; obj_priv = obj->driver_private; - kunmap(obj_priv->page_list[0]); + kunmap(obj_priv->pages[0]); i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); dev_priv->hws_obj = NULL; @@ -3637,20 +3639,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev, if (!obj_priv->phys_obj) return; - ret = i915_gem_object_get_page_list(obj); + ret = i915_gem_object_get_pages(obj); if (ret) goto out; page_count = obj->size / PAGE_SIZE; for (i = 0; i < page_count; i++) { - char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); + char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); memcpy(dst, src, PAGE_SIZE); kunmap_atomic(dst, KM_USER0); } - drm_clflush_pages(obj_priv->page_list, page_count); + drm_clflush_pages(obj_priv->pages, page_count); drm_agp_chipset_flush(dev); out: obj_priv->phys_obj->cur_obj = NULL; @@ -3693,7 +3695,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; obj_priv->phys_obj->cur_obj = obj; - ret = i915_gem_object_get_page_list(obj); + ret = i915_gem_object_get_pages(obj); if (ret) { DRM_ERROR("failed to get page list\n"); goto out; @@ -3702,7 +3704,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, page_count = obj->size / PAGE_SIZE; for (i = 0; i < page_count; i++) { - char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); + char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); memcpy(dst, src, PAGE_SIZE); -- cgit v1.2.3 From 40123c1f8dd920dcff7a42cde5b351d7d0b0422e Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 9 Mar 2009 13:42:30 -0700 Subject: drm/i915: Fix lock order reversal in shmem pwrite path. Like the GTT pwrite path fix, this uses an optimistic path and a fallback to get_user_pages. Note that this means we have to stop using vfs_write and roll it ourselves. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes --- drivers/gpu/drm/i915/i915_gem.c | 225 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 205 insertions(+), 20 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b998d659fd9..bdc7326052d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -136,6 +136,33 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return 0; } +static inline int +slow_shmem_copy(struct page *dst_page, + int dst_offset, + struct page *src_page, + int src_offset, + int length) +{ + char *dst_vaddr, *src_vaddr; + + dst_vaddr = kmap_atomic(dst_page, KM_USER0); + if (dst_vaddr == NULL) + return -ENOMEM; + + src_vaddr = kmap_atomic(src_page, KM_USER1); + if (src_vaddr == NULL) { + kunmap_atomic(dst_vaddr, KM_USER0); + return -ENOMEM; + } + + memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); + + kunmap_atomic(src_vaddr, KM_USER1); + kunmap_atomic(dst_vaddr, KM_USER0); + + return 0; +} + /** * Reads data from the object referenced by handle. * @@ -243,6 +270,23 @@ slow_kernel_write(struct io_mapping *mapping, return 0; } +static inline int +fast_shmem_write(struct page **pages, + loff_t page_base, int page_offset, + char __user *data, + int length) +{ + char __iomem *vaddr; + + vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); + if (vaddr == NULL) + return -ENOMEM; + __copy_from_user_inatomic(vaddr + page_offset, data, length); + kunmap_atomic(vaddr, KM_USER0); + + return 0; +} + /** * This is the fast pwrite path, where we copy the data directly from the * user into the GTT, uncached. @@ -423,39 +467,175 @@ out_unpin_pages: return ret; } +/** + * This is the fast shmem pwrite path, which attempts to directly + * copy_from_user into the kmapped pages backing the object. + */ static int -i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) +i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) { + struct drm_i915_gem_object *obj_priv = obj->driver_private; + ssize_t remain; + loff_t offset, page_base; + char __user *user_data; + int page_offset, page_length; int ret; - loff_t offset; - ssize_t written; + + user_data = (char __user *) (uintptr_t) args->data_ptr; + remain = args->size; mutex_lock(&dev->struct_mutex); + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ret; + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; + offset = args->offset; + obj_priv->dirty = 1; + + while (remain > 0) { + /* Operation in this page + * + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page + */ + page_base = (offset & ~(PAGE_SIZE-1)); + page_offset = offset & (PAGE_SIZE-1); + page_length = remain; + if ((page_offset + remain) > PAGE_SIZE) + page_length = PAGE_SIZE - page_offset; + + ret = fast_shmem_write(obj_priv->pages, + page_base, page_offset, + user_data, page_length); + if (ret) + goto fail_put_pages; + + remain -= page_length; + user_data += page_length; + offset += page_length; } +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +/** + * This is the fallback shmem pwrite path, which uses get_user_pages to pin + * the memory and maps it using kmap_atomic for copying. + * + * This avoids taking mmap_sem for faulting on the user's address while the + * struct_mutex is held. + */ +static int +i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct mm_struct *mm = current->mm; + struct page **user_pages; + ssize_t remain; + loff_t offset, pinned_pages, i; + loff_t first_data_page, last_data_page, num_pages; + int shmem_page_index, shmem_page_offset; + int data_page_index, data_page_offset; + int page_length; + int ret; + uint64_t data_ptr = args->data_ptr; + + remain = args->size; + + /* Pin the user pages containing the data. We can't fault while + * holding the struct mutex, and all of the pwrite implementations + * want to hold it while dereferencing the user data. + */ + first_data_page = data_ptr / PAGE_SIZE; + last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; + num_pages = last_data_page - first_data_page + 1; + + user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); + if (user_pages == NULL) + return -ENOMEM; + + down_read(&mm->mmap_sem); + pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, + num_pages, 0, 0, user_pages, NULL); + up_read(&mm->mmap_sem); + if (pinned_pages < num_pages) { + ret = -EFAULT; + goto fail_put_user_pages; + } + + mutex_lock(&dev->struct_mutex); + + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; offset = args->offset; + obj_priv->dirty = 1; - written = vfs_write(obj->filp, - (char __user *)(uintptr_t) args->data_ptr, - args->size, &offset); - if (written != args->size) { - mutex_unlock(&dev->struct_mutex); - if (written < 0) - return written; - else - return -EINVAL; + while (remain > 0) { + /* Operation in this page + * + * shmem_page_index = page number within shmem file + * shmem_page_offset = offset within page in shmem file + * data_page_index = page number in get_user_pages return + * data_page_offset = offset with data_page_index page. + * page_length = bytes to copy for this page + */ + shmem_page_index = offset / PAGE_SIZE; + shmem_page_offset = offset & ~PAGE_MASK; + data_page_index = data_ptr / PAGE_SIZE - first_data_page; + data_page_offset = data_ptr & ~PAGE_MASK; + + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; + if ((data_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - data_page_offset; + + ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length); + if (ret) + goto fail_put_pages; + + remain -= page_length; + data_ptr += page_length; + offset += page_length; } +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: mutex_unlock(&dev->struct_mutex); +fail_put_user_pages: + for (i = 0; i < pinned_pages; i++) + page_cache_release(user_pages[i]); + kfree(user_pages); - return 0; + return ret; } /** @@ -502,8 +682,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file_priv); } - } else - ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); + } else { + ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); + if (ret == -EFAULT) { + ret = i915_gem_shmem_pwrite_slow(dev, obj, args, + file_priv); + } + } #if WATCH_PWRITE if (ret) -- cgit v1.2.3 From eb01459fbbccb4ca0b879cbfc97e33ac6eabf975 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 10 Mar 2009 11:44:52 -0700 Subject: drm/i915: Fix lock order reversal in shmem pread path. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes --- drivers/gpu/drm/i915/i915_gem.c | 221 +++++++++++++++++++++++++++++++++++----- 1 file changed, 195 insertions(+), 26 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bdc7326052d..010af908bdb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -136,6 +136,24 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return 0; } +static inline int +fast_shmem_read(struct page **pages, + loff_t page_base, int page_offset, + char __user *data, + int length) +{ + char __iomem *vaddr; + int ret; + + vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); + if (vaddr == NULL) + return -ENOMEM; + ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); + kunmap_atomic(vaddr, KM_USER0); + + return ret; +} + static inline int slow_shmem_copy(struct page *dst_page, int dst_offset, @@ -163,6 +181,179 @@ slow_shmem_copy(struct page *dst_page, return 0; } +/** + * This is the fast shmem pread path, which attempts to copy_from_user directly + * from the backing pages of the object to the user's address space. On a + * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). + */ +static int +i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pread *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + ssize_t remain; + loff_t offset, page_base; + char __user *user_data; + int page_offset, page_length; + int ret; + + user_data = (char __user *) (uintptr_t) args->data_ptr; + remain = args->size; + + mutex_lock(&dev->struct_mutex); + + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + + ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, + args->size); + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; + offset = args->offset; + + while (remain > 0) { + /* Operation in this page + * + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page + */ + page_base = (offset & ~(PAGE_SIZE-1)); + page_offset = offset & (PAGE_SIZE-1); + page_length = remain; + if ((page_offset + remain) > PAGE_SIZE) + page_length = PAGE_SIZE - page_offset; + + ret = fast_shmem_read(obj_priv->pages, + page_base, page_offset, + user_data, page_length); + if (ret) + goto fail_put_pages; + + remain -= page_length; + user_data += page_length; + offset += page_length; + } + +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +/** + * This is the fallback shmem pread path, which allocates temporary storage + * in kernel space to copy_to_user into outside of the struct_mutex, so we + * can copy out of the object's backing pages while holding the struct mutex + * and not take page faults. + */ +static int +i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pread *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct mm_struct *mm = current->mm; + struct page **user_pages; + ssize_t remain; + loff_t offset, pinned_pages, i; + loff_t first_data_page, last_data_page, num_pages; + int shmem_page_index, shmem_page_offset; + int data_page_index, data_page_offset; + int page_length; + int ret; + uint64_t data_ptr = args->data_ptr; + + remain = args->size; + + /* Pin the user pages containing the data. We can't fault while + * holding the struct mutex, yet we want to hold it while + * dereferencing the user data. + */ + first_data_page = data_ptr / PAGE_SIZE; + last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; + num_pages = last_data_page - first_data_page + 1; + + user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); + if (user_pages == NULL) + return -ENOMEM; + + down_read(&mm->mmap_sem); + pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, + num_pages, 0, 0, user_pages, NULL); + up_read(&mm->mmap_sem); + if (pinned_pages < num_pages) { + ret = -EFAULT; + goto fail_put_user_pages; + } + + mutex_lock(&dev->struct_mutex); + + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + + ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, + args->size); + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; + offset = args->offset; + + while (remain > 0) { + /* Operation in this page + * + * shmem_page_index = page number within shmem file + * shmem_page_offset = offset within page in shmem file + * data_page_index = page number in get_user_pages return + * data_page_offset = offset with data_page_index page. + * page_length = bytes to copy for this page + */ + shmem_page_index = offset / PAGE_SIZE; + shmem_page_offset = offset & ~PAGE_MASK; + data_page_index = data_ptr / PAGE_SIZE - first_data_page; + data_page_offset = data_ptr & ~PAGE_MASK; + + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; + if ((data_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - data_page_offset; + + ret = slow_shmem_copy(user_pages[data_page_index], + data_page_offset, + obj_priv->pages[shmem_page_index], + shmem_page_offset, + page_length); + if (ret) + goto fail_put_pages; + + remain -= page_length; + data_ptr += page_length; + offset += page_length; + } + +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: + mutex_unlock(&dev->struct_mutex); +fail_put_user_pages: + for (i = 0; i < pinned_pages; i++) { + SetPageDirty(user_pages[i]); + page_cache_release(user_pages[i]); + } + kfree(user_pages); + + return ret; +} + /** * Reads data from the object referenced by handle. * @@ -175,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_pread *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - ssize_t read; - loff_t offset; int ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); @@ -194,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - mutex_lock(&dev->struct_mutex); - - ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, - args->size); - if (ret != 0) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } - - offset = args->offset; - - read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, - args->size, &offset); - if (read != args->size) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - if (read < 0) - return read; - else - return -EINVAL; - } + ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); + if (ret != 0) + ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } /* This is the fast write path which cannot handle -- cgit v1.2.3 From 201361a54ed187d8595a283e3a4ddb213bc8323b Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 11 Mar 2009 12:30:04 -0700 Subject: drm/i915: Fix lock order reversal with cliprects and cmdbuf in non-DRI2 paths. This introduces allocation in the batch submission path that wasn't there previously, but these are compatibility paths so we care about simplicity more than performance. kernel.org bug #12419. Signed-off-by: Eric Anholt Reviewed-by: Keith Packard Acked-by: Jesse Barnes --- drivers/gpu/drm/i915/i915_dma.c | 107 +++++++++++++++++++++++++++------------- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 27 ++++++++-- 3 files changed, 97 insertions(+), 39 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6d21b9e48b8..ae83fe0ab37 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -356,7 +356,7 @@ static int validate_cmd(int cmd) return ret; } -static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) +static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i; @@ -370,8 +370,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor for (i = 0; i < dwords;) { int cmd, sz; - if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) - return -EINVAL; + cmd = buffer[i]; if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) return -EINVAL; @@ -379,11 +378,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor OUT_RING(cmd); while (++i, --sz) { - if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], - sizeof(cmd))) { - return -EINVAL; - } - OUT_RING(cmd); + OUT_RING(buffer[i]); } } @@ -397,17 +392,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor int i915_emit_box(struct drm_device *dev, - struct drm_clip_rect __user *boxes, + struct drm_clip_rect *boxes, int i, int DR1, int DR4) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_clip_rect box; + struct drm_clip_rect box = boxes[i]; RING_LOCALS; - if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { - return -EFAULT; - } - if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", box.x1, box.y1, box.x2, box.y2); @@ -460,7 +451,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev) } static int i915_dispatch_cmdbuffer(struct drm_device * dev, - drm_i915_cmdbuffer_t * cmd) + drm_i915_cmdbuffer_t *cmd, + struct drm_clip_rect *cliprects, + void *cmdbuf) { int nbox = cmd->num_cliprects; int i = 0, count, ret; @@ -476,13 +469,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, for (i = 0; i < count; i++) { if (i < nbox) { - ret = i915_emit_box(dev, cmd->cliprects, i, + ret = i915_emit_box(dev, cliprects, i, cmd->DR1, cmd->DR4); if (ret) return ret; } - ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); + ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); if (ret) return ret; } @@ -492,10 +485,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, } static int i915_dispatch_batchbuffer(struct drm_device * dev, - drm_i915_batchbuffer_t * batch) + drm_i915_batchbuffer_t * batch, + struct drm_clip_rect *cliprects) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_clip_rect __user *boxes = batch->cliprects; int nbox = batch->num_cliprects; int i = 0, count; RING_LOCALS; @@ -511,7 +504,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, for (i = 0; i < count; i++) { if (i < nbox) { - int ret = i915_emit_box(dev, boxes, i, + int ret = i915_emit_box(dev, cliprects, i, batch->DR1, batch->DR4); if (ret) return ret; @@ -626,6 +619,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, master_priv->sarea_priv; drm_i915_batchbuffer_t *batch = data; int ret; + struct drm_clip_rect *cliprects = NULL; if (!dev_priv->allow_batchbuffer) { DRM_ERROR("Batchbuffer ioctl disabled\n"); @@ -637,17 +631,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, RING_LOCK_TEST_WITH_RETURN(dev, file_priv); - if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, - batch->num_cliprects * - sizeof(struct drm_clip_rect))) - return -EFAULT; + if (batch->num_cliprects < 0) + return -EINVAL; + + if (batch->num_cliprects) { + cliprects = drm_calloc(batch->num_cliprects, + sizeof(struct drm_clip_rect), + DRM_MEM_DRIVER); + if (cliprects == NULL) + return -ENOMEM; + + ret = copy_from_user(cliprects, batch->cliprects, + batch->num_cliprects * + sizeof(struct drm_clip_rect)); + if (ret != 0) + goto fail_free; + } mutex_lock(&dev->struct_mutex); - ret = i915_dispatch_batchbuffer(dev, batch); + ret = i915_dispatch_batchbuffer(dev, batch, cliprects); mutex_unlock(&dev->struct_mutex); if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + +fail_free: + drm_free(cliprects, + batch->num_cliprects * sizeof(struct drm_clip_rect), + DRM_MEM_DRIVER); + return ret; } @@ -659,6 +671,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; drm_i915_cmdbuffer_t *cmdbuf = data; + struct drm_clip_rect *cliprects = NULL; + void *batch_data; int ret; DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", @@ -666,25 +680,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, RING_LOCK_TEST_WITH_RETURN(dev, file_priv); - if (cmdbuf->num_cliprects && - DRM_VERIFYAREA_READ(cmdbuf->cliprects, - cmdbuf->num_cliprects * - sizeof(struct drm_clip_rect))) { - DRM_ERROR("Fault accessing cliprects\n"); - return -EFAULT; + if (cmdbuf->num_cliprects < 0) + return -EINVAL; + + batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER); + if (batch_data == NULL) + return -ENOMEM; + + ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); + if (ret != 0) + goto fail_batch_free; + + if (cmdbuf->num_cliprects) { + cliprects = drm_calloc(cmdbuf->num_cliprects, + sizeof(struct drm_clip_rect), + DRM_MEM_DRIVER); + if (cliprects == NULL) + goto fail_batch_free; + + ret = copy_from_user(cliprects, cmdbuf->cliprects, + cmdbuf->num_cliprects * + sizeof(struct drm_clip_rect)); + if (ret != 0) + goto fail_clip_free; } mutex_lock(&dev->struct_mutex); - ret = i915_dispatch_cmdbuffer(dev, cmdbuf); + ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); mutex_unlock(&dev->struct_mutex); if (ret) { DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); - return ret; + goto fail_batch_free; } if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); - return 0; + +fail_batch_free: + drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER); +fail_clip_free: + drm_free(cliprects, + cmdbuf->num_cliprects * sizeof(struct drm_clip_rect), + DRM_MEM_DRIVER); + + return ret; } static int i915_flip_bufs(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 75e33844146..2c02ce6b2b9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -520,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern int i915_emit_box(struct drm_device *dev, - struct drm_clip_rect __user *boxes, + struct drm_clip_rect *boxes, int i, int DR1, int DR4); /* i915_irq.c */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 010af908bdb..2bda15197cc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2891,11 +2891,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, static int i915_dispatch_gem_execbuffer(struct drm_device *dev, struct drm_i915_gem_execbuffer *exec, + struct drm_clip_rect *cliprects, uint64_t exec_offset) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *) - (uintptr_t) exec->cliprects_ptr; int nbox = exec->num_cliprects; int i = 0, count; uint32_t exec_start, exec_len; @@ -2916,7 +2915,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, for (i = 0; i < count; i++) { if (i < nbox) { - int ret = i915_emit_box(dev, boxes, i, + int ret = i915_emit_box(dev, cliprects, i, exec->DR1, exec->DR4); if (ret) return ret; @@ -2983,6 +2982,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object **object_list = NULL; struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; + struct drm_clip_rect *cliprects = NULL; int ret, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains; @@ -3019,6 +3019,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, goto pre_mutex_err; } + if (args->num_cliprects != 0) { + cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects), + DRM_MEM_DRIVER); + if (cliprects == NULL) + goto pre_mutex_err; + + ret = copy_from_user(cliprects, + (struct drm_clip_rect __user *) + (uintptr_t) args->cliprects_ptr, + sizeof(*cliprects) * args->num_cliprects); + if (ret != 0) { + DRM_ERROR("copy %d cliprects failed: %d\n", + args->num_cliprects, ret); + goto pre_mutex_err; + } + } + mutex_lock(&dev->struct_mutex); i915_verify_inactive(dev, __FILE__, __LINE__); @@ -3155,7 +3172,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, #endif /* Exec the batchbuffer */ - ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); + ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); if (ret) { DRM_ERROR("dispatch failed %d\n", ret); goto err; @@ -3224,6 +3241,8 @@ pre_mutex_err: DRM_MEM_DRIVER); drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, DRM_MEM_DRIVER); + drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, + DRM_MEM_DRIVER); return ret; } -- cgit v1.2.3 From 40a5f0decdf050785ebd62b36ad48c869ee4b384 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 12 Mar 2009 11:23:52 -0700 Subject: drm/i915: Fix lock order reversal in GEM relocation entry copying. Signed-off-by: Eric Anholt Reviewed-by: Keith Packard --- drivers/gpu/drm/i915/i915_gem.c | 187 ++++++++++++++++++++++++++++------------ 1 file changed, 133 insertions(+), 54 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2bda15197cc..f135c903305 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2713,12 +2713,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, static int i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, struct drm_file *file_priv, - struct drm_i915_gem_exec_object *entry) + struct drm_i915_gem_exec_object *entry, + struct drm_i915_gem_relocation_entry *relocs) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_relocation_entry reloc; - struct drm_i915_gem_relocation_entry __user *relocs; struct drm_i915_gem_object *obj_priv = obj->driver_private; int i, ret; void __iomem *reloc_page; @@ -2730,25 +2729,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, entry->offset = obj_priv->gtt_offset; - relocs = (struct drm_i915_gem_relocation_entry __user *) - (uintptr_t) entry->relocs_ptr; /* Apply the relocations, using the GTT aperture to avoid cache * flushing requirements. */ for (i = 0; i < entry->relocation_count; i++) { + struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; struct drm_gem_object *target_obj; struct drm_i915_gem_object *target_obj_priv; uint32_t reloc_val, reloc_offset; uint32_t __iomem *reloc_entry; - ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); - if (ret != 0) { - i915_gem_object_unpin(obj); - return ret; - } - target_obj = drm_gem_object_lookup(obj->dev, file_priv, - reloc.target_handle); + reloc->target_handle); if (target_obj == NULL) { i915_gem_object_unpin(obj); return -EBADF; @@ -2760,53 +2752,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, */ if (target_obj_priv->gtt_space == NULL) { DRM_ERROR("No GTT space found for object %d\n", - reloc.target_handle); + reloc->target_handle); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc.offset > obj->size - 4) { + if (reloc->offset > obj->size - 4) { DRM_ERROR("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", - obj, reloc.target_handle, - (int) reloc.offset, (int) obj->size); + obj, reloc->target_handle, + (int) reloc->offset, (int) obj->size); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc.offset & 3) { + if (reloc->offset & 3) { DRM_ERROR("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", - obj, reloc.target_handle, - (int) reloc.offset); + obj, reloc->target_handle, + (int) reloc->offset); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc.write_domain & I915_GEM_DOMAIN_CPU || - reloc.read_domains & I915_GEM_DOMAIN_CPU) { + if (reloc->write_domain & I915_GEM_DOMAIN_CPU || + reloc->read_domains & I915_GEM_DOMAIN_CPU) { DRM_ERROR("reloc with read/write CPU domains: " "obj %p target %d offset %d " "read %08x write %08x", - obj, reloc.target_handle, - (int) reloc.offset, - reloc.read_domains, - reloc.write_domain); + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc.write_domain && target_obj->pending_write_domain && - reloc.write_domain != target_obj->pending_write_domain) { + if (reloc->write_domain && target_obj->pending_write_domain && + reloc->write_domain != target_obj->pending_write_domain) { DRM_ERROR("Write domain conflict: " "obj %p target %d offset %d " "new %08x old %08x\n", - obj, reloc.target_handle, - (int) reloc.offset, - reloc.write_domain, + obj, reloc->target_handle, + (int) reloc->offset, + reloc->write_domain, target_obj->pending_write_domain); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); @@ -2819,22 +2811,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, "presumed %08x delta %08x\n", __func__, obj, - (int) reloc.offset, - (int) reloc.target_handle, - (int) reloc.read_domains, - (int) reloc.write_domain, + (int) reloc->offset, + (int) reloc->target_handle, + (int) reloc->read_domains, + (int) reloc->write_domain, (int) target_obj_priv->gtt_offset, - (int) reloc.presumed_offset, - reloc.delta); + (int) reloc->presumed_offset, + reloc->delta); #endif - target_obj->pending_read_domains |= reloc.read_domains; - target_obj->pending_write_domain |= reloc.write_domain; + target_obj->pending_read_domains |= reloc->read_domains; + target_obj->pending_write_domain |= reloc->write_domain; /* If the relocation already has the right value in it, no * more work needs to be done. */ - if (target_obj_priv->gtt_offset == reloc.presumed_offset) { + if (target_obj_priv->gtt_offset == reloc->presumed_offset) { drm_gem_object_unreference(target_obj); continue; } @@ -2849,32 +2841,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, /* Map the page containing the relocation we're going to * perform. */ - reloc_offset = obj_priv->gtt_offset + reloc.offset; + reloc_offset = obj_priv->gtt_offset + reloc->offset; reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, (reloc_offset & ~(PAGE_SIZE - 1))); reloc_entry = (uint32_t __iomem *)(reloc_page + (reloc_offset & (PAGE_SIZE - 1))); - reloc_val = target_obj_priv->gtt_offset + reloc.delta; + reloc_val = target_obj_priv->gtt_offset + reloc->delta; #if WATCH_BUF DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", - obj, (unsigned int) reloc.offset, + obj, (unsigned int) reloc->offset, readl(reloc_entry), reloc_val); #endif writel(reloc_val, reloc_entry); io_mapping_unmap_atomic(reloc_page); - /* Write the updated presumed offset for this entry back out - * to the user. + /* The updated presumed offset for this entry will be + * copied back out to the user. */ - reloc.presumed_offset = target_obj_priv->gtt_offset; - ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); - if (ret != 0) { - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return ret; - } + reloc->presumed_offset = target_obj_priv->gtt_offset; drm_gem_object_unreference(target_obj); } @@ -2971,6 +2957,75 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) return ret; } +static int +i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, + uint32_t buffer_count, + struct drm_i915_gem_relocation_entry **relocs) +{ + uint32_t reloc_count = 0, reloc_index = 0, i; + int ret; + + *relocs = NULL; + for (i = 0; i < buffer_count; i++) { + if (reloc_count + exec_list[i].relocation_count < reloc_count) + return -EINVAL; + reloc_count += exec_list[i].relocation_count; + } + + *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER); + if (*relocs == NULL) + return -ENOMEM; + + for (i = 0; i < buffer_count; i++) { + struct drm_i915_gem_relocation_entry __user *user_relocs; + + user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; + + ret = copy_from_user(&(*relocs)[reloc_index], + user_relocs, + exec_list[i].relocation_count * + sizeof(**relocs)); + if (ret != 0) { + drm_free(*relocs, reloc_count * sizeof(**relocs), + DRM_MEM_DRIVER); + *relocs = NULL; + return ret; + } + + reloc_index += exec_list[i].relocation_count; + } + + return ret; +} + +static int +i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, + uint32_t buffer_count, + struct drm_i915_gem_relocation_entry *relocs) +{ + uint32_t reloc_count = 0, i; + int ret; + + for (i = 0; i < buffer_count; i++) { + struct drm_i915_gem_relocation_entry __user *user_relocs; + + user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; + + if (ret == 0) { + ret = copy_to_user(user_relocs, + &relocs[reloc_count], + exec_list[i].relocation_count * + sizeof(*relocs)); + } + + reloc_count += exec_list[i].relocation_count; + } + + drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); + + return ret; +} + int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -2983,9 +3038,10 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; - int ret, i, pinned = 0; + struct drm_i915_gem_relocation_entry *relocs; + int ret, ret2, i, pinned = 0; uint64_t exec_offset; - uint32_t seqno, flush_domains; + uint32_t seqno, flush_domains, reloc_index; int pin_tries; #if WATCH_EXEC @@ -3036,6 +3092,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, } } + ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, + &relocs); + if (ret != 0) + goto pre_mutex_err; + mutex_lock(&dev->struct_mutex); i915_verify_inactive(dev, __FILE__, __LINE__); @@ -3078,15 +3139,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, /* Pin and relocate */ for (pin_tries = 0; ; pin_tries++) { ret = 0; + reloc_index = 0; + for (i = 0; i < args->buffer_count; i++) { object_list[i]->pending_read_domains = 0; object_list[i]->pending_write_domain = 0; ret = i915_gem_object_pin_and_relocate(object_list[i], file_priv, - &exec_list[i]); + &exec_list[i], + &relocs[reloc_index]); if (ret) break; pinned = i + 1; + reloc_index += exec_list[i].relocation_count; } /* success */ if (ret == 0) @@ -3236,6 +3301,20 @@ err: args->buffer_count, ret); } + /* Copy the updated relocations out regardless of current error + * state. Failure to update the relocs would mean that the next + * time userland calls execbuf, it would do so with presumed offset + * state that didn't match the actual object state. + */ + ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, + relocs); + if (ret2 != 0) { + DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); + + if (ret == 0) + ret = ret2; + } + pre_mutex_err: drm_free(object_list, sizeof(*object_list) * args->buffer_count, DRM_MEM_DRIVER); -- cgit v1.2.3 From 28a62277e06f93729d0340d9659153dcfbdbe16d Mon Sep 17 00:00:00 2001 From: Ben Gamari Date: Tue, 17 Feb 2009 20:08:49 -0500 Subject: drm: Convert proc files to seq_file and introduce debugfs The old mechanism to formatting proc files is extremely ugly. The seq_file API was designed specifically for cases like this and greatly simplifies the process. Also, most of the files in /proc really don't belong there. This patch introduces the infrastructure for putting these into debugfs and exposes all of the proc files in debugfs as well. Signed-off-by: Ben Gamari Signed-off-by: Eric Anholt --- drivers/gpu/drm/Makefile | 3 +- drivers/gpu/drm/drm_debugfs.c | 235 ++++++++++++++ drivers/gpu/drm/drm_drv.c | 12 +- drivers/gpu/drm/drm_info.c | 328 +++++++++++++++++++ drivers/gpu/drm/drm_proc.c | 721 +++++++----------------------------------- drivers/gpu/drm/drm_stub.c | 15 +- 6 files changed, 700 insertions(+), 614 deletions(-) create mode 100644 drivers/gpu/drm/drm_debugfs.c create mode 100644 drivers/gpu/drm/drm_info.c (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 30022c4a5c1..4ec5061fa58 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -10,7 +10,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ - drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o + drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \ + drm_info.o drm_debugfs.o drm-$(CONFIG_COMPAT) += drm_ioc32.o diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c new file mode 100644 index 00000000000..c77c6c6d9d2 --- /dev/null +++ b/drivers/gpu/drm/drm_debugfs.c @@ -0,0 +1,235 @@ +/** + * \file drm_debugfs.c + * debugfs support for DRM + * + * \author Ben Gamari + */ + +/* + * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com + * + * Copyright 2008 Ben Gamari + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include "drmP.h" + +#if defined(CONFIG_DEBUG_FS) + +/*************************************************** + * Initialization, etc. + **************************************************/ + +static struct drm_info_list drm_debugfs_list[] = { + {"name", drm_name_info, 0}, + {"vm", drm_vm_info, 0}, + {"clients", drm_clients_info, 0}, + {"queues", drm_queues_info, 0}, + {"bufs", drm_bufs_info, 0}, + {"gem_names", drm_gem_name_info, DRIVER_GEM}, + {"gem_objects", drm_gem_object_info, DRIVER_GEM}, +#if DRM_DEBUG_CODE + {"vma", drm_vma_info, 0}, +#endif +}; +#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list) + + +static int drm_debugfs_open(struct inode *inode, struct file *file) +{ + struct drm_info_node *node = inode->i_private; + + return single_open(file, node->info_ent->show, node); +} + + +static const struct file_operations drm_debugfs_fops = { + .owner = THIS_MODULE, + .open = drm_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + +/** + * Initialize a given set of debugfs files for a device + * + * \param files The array of files to create + * \param count The number of files given + * \param root DRI debugfs dir entry. + * \param minor device minor number + * \return Zero on success, non-zero on failure + * + * Create a given set of debugfs files represented by an array of + * gdm_debugfs_lists in the given root directory. + */ +int drm_debugfs_create_files(struct drm_info_list *files, int count, + struct dentry *root, struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + struct dentry *ent; + struct drm_info_node *tmp; + char name[64]; + int i, ret; + + for (i = 0; i < count; i++) { + u32 features = files[i].driver_features; + + if (features != 0 && + (dev->driver->driver_features & features) != features) + continue; + + tmp = drm_alloc(sizeof(struct drm_info_node), + _DRM_DRIVER); + ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, + root, tmp, &drm_debugfs_fops); + if (!ent) { + DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n", + name, files[i].name); + drm_free(tmp, sizeof(struct drm_info_node), + _DRM_DRIVER); + ret = -1; + goto fail; + } + + tmp->minor = minor; + tmp->dent = ent; + tmp->info_ent = &files[i]; + list_add(&(tmp->list), &(minor->debugfs_nodes.list)); + } + return 0; + +fail: + drm_debugfs_remove_files(files, count, minor); + return ret; +} +EXPORT_SYMBOL(drm_debugfs_create_files); + +/** + * Initialize the DRI debugfs filesystem for a device + * + * \param dev DRM device + * \param minor device minor number + * \param root DRI debugfs dir entry. + * + * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry + * "/debugfs/dri/%minor%/", and each entry in debugfs_list as + * "/debugfs/dri/%minor%/%name%". + */ +int drm_debugfs_init(struct drm_minor *minor, int minor_id, + struct dentry *root) +{ + struct drm_device *dev = minor->dev; + char name[64]; + int ret; + + INIT_LIST_HEAD(&minor->debugfs_nodes.list); + sprintf(name, "%d", minor_id); + minor->debugfs_root = debugfs_create_dir(name, root); + if (!minor->debugfs_root) { + DRM_ERROR("Cannot create /debugfs/dri/%s\n", name); + return -1; + } + + ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); + if (ret) { + debugfs_remove(minor->debugfs_root); + minor->debugfs_root = NULL; + DRM_ERROR("Failed to create core drm debugfs files\n"); + return ret; + } + + if (dev->driver->debugfs_init) { + ret = dev->driver->debugfs_init(minor); + if (ret) { + DRM_ERROR("DRM: Driver failed to initialize " + "/debugfs/dri.\n"); + return ret; + } + } + return 0; +} + + +/** + * Remove a list of debugfs files + * + * \param files The list of files + * \param count The number of files + * \param minor The minor of which we should remove the files + * \return always zero. + * + * Remove all debugfs entries created by debugfs_init(). + */ +int drm_debugfs_remove_files(struct drm_info_list *files, int count, + struct drm_minor *minor) +{ + struct list_head *pos, *q; + struct drm_info_node *tmp; + int i; + + for (i = 0; i < count; i++) { + list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { + tmp = list_entry(pos, struct drm_info_node, list); + if (tmp->info_ent == &files[i]) { + debugfs_remove(tmp->dent); + list_del(pos); + drm_free(tmp, sizeof(struct drm_info_node), + _DRM_DRIVER); + } + } + } + return 0; +} +EXPORT_SYMBOL(drm_debugfs_remove_files); + +/** + * Cleanup the debugfs filesystem resources. + * + * \param minor device minor number. + * \return always zero. + * + * Remove all debugfs entries created by debugfs_init(). + */ +int drm_debugfs_cleanup(struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + + if (!minor->debugfs_root) + return 0; + + if (dev->driver->debugfs_cleanup) + dev->driver->debugfs_cleanup(minor); + + drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor); + + debugfs_remove(minor->debugfs_root); + minor->debugfs_root = NULL; + + return 0; +} + +#endif /* CONFIG_DEBUG_FS */ + diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 14c7a23dc15..ed32edb1716 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -46,9 +46,11 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include #include "drmP.h" #include "drm_core.h" + static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -178,7 +180,7 @@ int drm_lastclose(struct drm_device * dev) /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp && - !drm_core_check_feature(dev, DRIVER_MODESET)) { + !drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_agp_mem *entry, *tempe; /* Remove AGP resources, but leave dev->agp @@ -382,6 +384,13 @@ static int __init drm_core_init(void) goto err_p3; } + drm_debugfs_root = debugfs_create_dir("dri", NULL); + if (!drm_debugfs_root) { + DRM_ERROR("Cannot create /debugfs/dri\n"); + ret = -1; + goto err_p3; + } + drm_mem_init(); DRM_INFO("Initialized %s %d.%d.%d %s\n", @@ -400,6 +409,7 @@ err_p1: static void __exit drm_core_exit(void) { remove_proc_entry("dri", NULL); + debugfs_remove(drm_debugfs_root); drm_sysfs_destroy(); unregister_chrdev(DRM_MAJOR, "drm"); diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c new file mode 100644 index 00000000000..fc98952b903 --- /dev/null +++ b/drivers/gpu/drm/drm_info.c @@ -0,0 +1,328 @@ +/** + * \file drm_info.c + * DRM info file implementations + * + * \author Ben Gamari + */ + +/* + * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright 2008 Ben Gamari + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include "drmP.h" + +/** + * Called when "/proc/dri/.../name" is read. + * + * Prints the device name together with the bus id if available. + */ +int drm_name_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_minor *minor = node->minor; + struct drm_device *dev = minor->dev; + struct drm_master *master = minor->master; + + if (!master) + return 0; + + if (master->unique) { + seq_printf(m, "%s %s %s\n", + dev->driver->pci_driver.name, + pci_name(dev->pdev), master->unique); + } else { + seq_printf(m, "%s %s\n", dev->driver->pci_driver.name, + pci_name(dev->pdev)); + } + + return 0; +} + +/** + * Called when "/proc/dri/.../vm" is read. + * + * Prints information about all mappings in drm_device::maplist. + */ +int drm_vm_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_map *map; + struct drm_map_list *r_list; + + /* Hardcoded from _DRM_FRAME_BUFFER, + _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and + _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ + const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; + const char *type; + int i; + + mutex_lock(&dev->struct_mutex); + seq_printf(m, "slot offset size type flags address mtrr\n\n"); + i = 0; + list_for_each_entry(r_list, &dev->maplist, head) { + map = r_list->map; + if (!map) + continue; + if (map->type < 0 || map->type > 5) + type = "??"; + else + type = types[map->type]; + + seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", + i, + map->offset, + map->size, type, map->flags, + (unsigned long) r_list->user_token); + if (map->mtrr < 0) + seq_printf(m, "none\n"); + else + seq_printf(m, "%4d\n", map->mtrr); + i++; + } + mutex_unlock(&dev->struct_mutex); + return 0; +} + +/** + * Called when "/proc/dri/.../queues" is read. + */ +int drm_queues_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + int i; + struct drm_queue *q; + + mutex_lock(&dev->struct_mutex); + seq_printf(m, " ctx/flags use fin" + " blk/rw/rwf wait flushed queued" + " locks\n\n"); + for (i = 0; i < dev->queue_count; i++) { + q = dev->queuelist[i]; + atomic_inc(&q->use_count); + seq_printf(m, "%5d/0x%03x %5d %5d" + " %5d/%c%c/%c%c%c %5Zd\n", + i, + q->flags, + atomic_read(&q->use_count), + atomic_read(&q->finalization), + atomic_read(&q->block_count), + atomic_read(&q->block_read) ? 'r' : '-', + atomic_read(&q->block_write) ? 'w' : '-', + waitqueue_active(&q->read_queue) ? 'r' : '-', + waitqueue_active(&q->write_queue) ? 'w' : '-', + waitqueue_active(&q->flush_queue) ? 'f' : '-', + DRM_BUFCOUNT(&q->waitlist)); + atomic_dec(&q->use_count); + } + mutex_unlock(&dev->struct_mutex); + return 0; +} + +/** + * Called when "/proc/dri/.../bufs" is read. + */ +int drm_bufs_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_device_dma *dma; + int i, seg_pages; + + mutex_lock(&dev->struct_mutex); + dma = dev->dma; + if (!dma) { + mutex_unlock(&dev->struct_mutex); + return 0; + } + + seq_printf(m, " o size count free segs pages kB\n\n"); + for (i = 0; i <= DRM_MAX_ORDER; i++) { + if (dma->bufs[i].buf_count) { + seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order); + seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n", + i, + dma->bufs[i].buf_size, + dma->bufs[i].buf_count, + atomic_read(&dma->bufs[i].freelist.count), + dma->bufs[i].seg_count, + seg_pages, + seg_pages * PAGE_SIZE / 1024); + } + } + seq_printf(m, "\n"); + for (i = 0; i < dma->buf_count; i++) { + if (i && !(i % 32)) + seq_printf(m, "\n"); + seq_printf(m, " %d", dma->buflist[i]->list); + } + seq_printf(m, "\n"); + mutex_unlock(&dev->struct_mutex); + return 0; +} + +/** + * Called when "/proc/dri/.../vblank" is read. + */ +int drm_vblank_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + int crtc; + + mutex_lock(&dev->struct_mutex); + for (crtc = 0; crtc < dev->num_crtcs; crtc++) { + seq_printf(m, "CRTC %d enable: %d\n", + crtc, atomic_read(&dev->vblank_refcount[crtc])); + seq_printf(m, "CRTC %d counter: %d\n", + crtc, drm_vblank_count(dev, crtc)); + seq_printf(m, "CRTC %d last wait: %d\n", + crtc, dev->last_vblank_wait[crtc]); + seq_printf(m, "CRTC %d in modeset: %d\n", + crtc, dev->vblank_inmodeset[crtc]); + } + mutex_unlock(&dev->struct_mutex); + return 0; +} + +/** + * Called when "/proc/dri/.../clients" is read. + * + */ +int drm_clients_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_file *priv; + + mutex_lock(&dev->struct_mutex); + seq_printf(m, "a dev pid uid magic ioctls\n\n"); + list_for_each_entry(priv, &dev->filelist, lhead) { + seq_printf(m, "%c %3d %5d %5d %10u %10lu\n", + priv->authenticated ? 'y' : 'n', + priv->minor->index, + priv->pid, + priv->uid, priv->magic, priv->ioctl_count); + } + mutex_unlock(&dev->struct_mutex); + return 0; +} + + +int drm_gem_one_name_info(int id, void *ptr, void *data) +{ + struct drm_gem_object *obj = ptr; + struct seq_file *m = data; + + seq_printf(m, "name %d size %zd\n", obj->name, obj->size); + + seq_printf(m, "%6d %8zd %7d %8d\n", + obj->name, obj->size, + atomic_read(&obj->handlecount.refcount), + atomic_read(&obj->refcount.refcount)); + return 0; +} + +int drm_gem_name_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + + seq_printf(m, " name size handles refcount\n"); + idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m); + return 0; +} + +int drm_gem_object_info(struct seq_file *m, void* data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + + seq_printf(m, "%d objects\n", atomic_read(&dev->object_count)); + seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory)); + seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count)); + seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory)); + seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory)); + seq_printf(m, "%d gtt total\n", dev->gtt_total); + return 0; +} + +#if DRM_DEBUG_CODE + +int drm_vma_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_vma_entry *pt; + struct vm_area_struct *vma; +#if defined(__i386__) + unsigned int pgprot; +#endif + + mutex_lock(&dev->struct_mutex); + seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08lx\n", + atomic_read(&dev->vma_count), + high_memory, virt_to_phys(high_memory)); + + list_for_each_entry(pt, &dev->vmalist, head) { + vma = pt->vma; + if (!vma) + continue; + seq_printf(m, + "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", + pt->pid, vma->vm_start, vma->vm_end, + vma->vm_flags & VM_READ ? 'r' : '-', + vma->vm_flags & VM_WRITE ? 'w' : '-', + vma->vm_flags & VM_EXEC ? 'x' : '-', + vma->vm_flags & VM_MAYSHARE ? 's' : 'p', + vma->vm_flags & VM_LOCKED ? 'l' : '-', + vma->vm_flags & VM_IO ? 'i' : '-', + vma->vm_pgoff); + +#if defined(__i386__) + pgprot = pgprot_val(vma->vm_page_prot); + seq_printf(m, " %c%c%c%c%c%c%c%c%c", + pgprot & _PAGE_PRESENT ? 'p' : '-', + pgprot & _PAGE_RW ? 'w' : 'r', + pgprot & _PAGE_USER ? 'u' : 's', + pgprot & _PAGE_PWT ? 't' : 'b', + pgprot & _PAGE_PCD ? 'u' : 'c', + pgprot & _PAGE_ACCESSED ? 'a' : '-', + pgprot & _PAGE_DIRTY ? 'd' : '-', + pgprot & _PAGE_PSE ? 'm' : 'k', + pgprot & _PAGE_GLOBAL ? 'g' : 'l'); +#endif + seq_printf(m, "\n"); + } + mutex_unlock(&dev->struct_mutex); + return 0; +} + +#endif + diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index 8df849f6683..9b3c5af61e9 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c @@ -37,697 +37,196 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include #include "drmP.h" -static int drm_name_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_vm_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_clients_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_queues_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_bufs_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_vblank_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_gem_name_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -static int drm_gem_object_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -#if DRM_DEBUG_CODE -static int drm_vma_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data); -#endif + +/*************************************************** + * Initialization, etc. + **************************************************/ /** * Proc file list. */ -static struct drm_proc_list { - const char *name; /**< file name */ - int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ - u32 driver_features; /**< Required driver features for this entry */ -} drm_proc_list[] = { +static struct drm_info_list drm_proc_list[] = { {"name", drm_name_info, 0}, - {"mem", drm_mem_info, 0}, {"vm", drm_vm_info, 0}, {"clients", drm_clients_info, 0}, {"queues", drm_queues_info, 0}, {"bufs", drm_bufs_info, 0}, - {"vblank", drm_vblank_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, {"gem_objects", drm_gem_object_info, DRIVER_GEM}, #if DRM_DEBUG_CODE - {"vma", drm_vma_info}, + {"vma", drm_vma_info, 0}, #endif }; - #define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) +static int drm_proc_open(struct inode *inode, struct file *file) +{ + struct drm_info_node* node = PDE(inode)->data; + + return single_open(file, node->info_ent->show, node); +} + +static const struct file_operations drm_proc_fops = { + .owner = THIS_MODULE, + .open = drm_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + /** - * Initialize the DRI proc filesystem for a device. + * Initialize a given set of proc files for a device * - * \param dev DRM device. - * \param minor device minor number. + * \param files The array of files to create + * \param count The number of files given * \param root DRI proc dir entry. - * \param dev_root resulting DRI device proc dir entry. - * \return root entry pointer on success, or NULL on failure. + * \param minor device minor number + * \return Zero on success, non-zero on failure * - * Create the DRI proc root entry "/proc/dri", the device proc root entry - * "/proc/dri/%minor%/", and each entry in proc_list as - * "/proc/dri/%minor%/%name%". + * Create a given set of proc files represented by an array of + * gdm_proc_lists in the given root directory. */ -int drm_proc_init(struct drm_minor *minor, int minor_id, - struct proc_dir_entry *root) +int drm_proc_create_files(struct drm_info_list *files, int count, + struct proc_dir_entry *root, struct drm_minor *minor) { struct drm_device *dev = minor->dev; struct proc_dir_entry *ent; - int i, j, ret; + struct drm_info_node *tmp; char name[64]; + int i, ret; - sprintf(name, "%d", minor_id); - minor->dev_root = proc_mkdir(name, root); - if (!minor->dev_root) { - DRM_ERROR("Cannot create /proc/dri/%s\n", name); - return -1; - } - - for (i = 0; i < DRM_PROC_ENTRIES; i++) { - u32 features = drm_proc_list[i].driver_features; + for (i = 0; i < count; i++) { + u32 features = files[i].driver_features; if (features != 0 && (dev->driver->driver_features & features) != features) continue; - ent = create_proc_entry(drm_proc_list[i].name, - S_IFREG | S_IRUGO, minor->dev_root); + tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER); + ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root); if (!ent) { DRM_ERROR("Cannot create /proc/dri/%s/%s\n", - name, drm_proc_list[i].name); + name, files[i].name); + drm_free(tmp, sizeof(struct drm_info_node), + _DRM_DRIVER); ret = -1; goto fail; } - ent->read_proc = drm_proc_list[i].f; - ent->data = minor; - } - if (dev->driver->proc_init) { - ret = dev->driver->proc_init(minor); - if (ret) { - DRM_ERROR("DRM: Driver failed to initialize " - "/proc/dri.\n"); - goto fail; - } + ent->proc_fops = &drm_proc_fops; + ent->data = tmp; + tmp->minor = minor; + tmp->info_ent = &files[i]; + list_add(&(tmp->list), &(minor->proc_nodes.list)); } - return 0; - fail: - for (j = 0; j < i; j++) - remove_proc_entry(drm_proc_list[i].name, - minor->dev_root); - remove_proc_entry(name, root); - minor->dev_root = NULL; +fail: + for (i = 0; i < count; i++) + remove_proc_entry(drm_proc_list[i].name, minor->proc_root); return ret; } /** - * Cleanup the proc filesystem resources. + * Initialize the DRI proc filesystem for a device * - * \param minor device minor number. + * \param dev DRM device + * \param minor device minor number * \param root DRI proc dir entry. - * \param dev_root DRI device proc dir entry. - * \return always zero. + * \param dev_root resulting DRI device proc dir entry. + * \return root entry pointer on success, or NULL on failure. * - * Remove all proc entries created by proc_init(). + * Create the DRI proc root entry "/proc/dri", the device proc root entry + * "/proc/dri/%minor%/", and each entry in proc_list as + * "/proc/dri/%minor%/%name%". */ -int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) +int drm_proc_init(struct drm_minor *minor, int minor_id, + struct proc_dir_entry *root) { struct drm_device *dev = minor->dev; - int i; char name[64]; + int ret; - if (!root || !minor->dev_root) - return 0; - - if (dev->driver->proc_cleanup) - dev->driver->proc_cleanup(minor); - - for (i = 0; i < DRM_PROC_ENTRIES; i++) - remove_proc_entry(drm_proc_list[i].name, minor->dev_root); - sprintf(name, "%d", minor->index); - remove_proc_entry(name, root); - - return 0; -} - -/** - * Called when "/proc/dri/.../name" is read. - * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. - * - * Prints the device name together with the bus id if available. - */ -static int drm_name_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_master *master = minor->master; - struct drm_device *dev = minor->dev; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; + INIT_LIST_HEAD(&minor->proc_nodes.list); + sprintf(name, "%d", minor_id); + minor->proc_root = proc_mkdir(name, root); + if (!minor->proc_root) { + DRM_ERROR("Cannot create /proc/dri/%s\n", name); + return -1; } - if (!master) - return 0; - - *start = &buf[offset]; - *eof = 0; - - if (master->unique) { - DRM_PROC_PRINT("%s %s %s\n", - dev->driver->pci_driver.name, - pci_name(dev->pdev), master->unique); - } else { - DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, - pci_name(dev->pdev)); + ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES, + minor->proc_root, minor); + if (ret) { + remove_proc_entry(name, root); + minor->proc_root = NULL; + DRM_ERROR("Failed to create core drm proc files\n"); + return ret; } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Called when "/proc/dri/.../vm" is read. - * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. - * - * Prints information about all mappings in drm_device::maplist. - */ -static int drm__vm_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - struct drm_map *map; - struct drm_map_list *r_list; - - /* Hardcoded from _DRM_FRAME_BUFFER, - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; - const char *type; - int i; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - - DRM_PROC_PRINT("slot offset size type flags " - "address mtrr\n\n"); - i = 0; - list_for_each_entry(r_list, &dev->maplist, head) { - map = r_list->map; - if (!map) - continue; - if (map->type < 0 || map->type > 5) - type = "??"; - else - type = types[map->type]; - DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", - i, - map->offset, - map->size, type, map->flags, - (unsigned long) r_list->user_token); - if (map->mtrr < 0) { - DRM_PROC_PRINT("none\n"); - } else { - DRM_PROC_PRINT("%4d\n", map->mtrr); + if (dev->driver->proc_init) { + ret = dev->driver->proc_init(minor); + if (ret) { + DRM_ERROR("DRM: Driver failed to initialize " + "/proc/dri.\n"); + return ret; } - i++; - } - - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Simply calls _vm_info() while holding the drm_device::struct_mutex lock. - */ -static int drm_vm_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__vm_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; -} - -/** - * Called when "/proc/dri/.../queues" is read. - * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. - */ -static int drm__queues_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - int i; - struct drm_queue *q; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; } - - *start = &buf[offset]; - *eof = 0; - - DRM_PROC_PRINT(" ctx/flags use fin" - " blk/rw/rwf wait flushed queued" - " locks\n\n"); - for (i = 0; i < dev->queue_count; i++) { - q = dev->queuelist[i]; - atomic_inc(&q->use_count); - DRM_PROC_PRINT_RET(atomic_dec(&q->use_count), - "%5d/0x%03x %5d %5d" - " %5d/%c%c/%c%c%c %5Zd\n", - i, - q->flags, - atomic_read(&q->use_count), - atomic_read(&q->finalization), - atomic_read(&q->block_count), - atomic_read(&q->block_read) ? 'r' : '-', - atomic_read(&q->block_write) ? 'w' : '-', - waitqueue_active(&q->read_queue) ? 'r' : '-', - waitqueue_active(&q-> - write_queue) ? 'w' : '-', - waitqueue_active(&q-> - flush_queue) ? 'f' : '-', - DRM_BUFCOUNT(&q->waitlist)); - atomic_dec(&q->use_count); - } - - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Simply calls _queues_info() while holding the drm_device::struct_mutex lock. - */ -static int drm_queues_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__queues_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } -/** - * Called when "/proc/dri/.../bufs" is read. - * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. - */ -static int drm__bufs_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) +int drm_proc_remove_files(struct drm_info_list *files, int count, + struct drm_minor *minor) { - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - struct drm_device_dma *dma = dev->dma; + struct list_head *pos, *q; + struct drm_info_node *tmp; int i; - if (!dma || offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - - DRM_PROC_PRINT(" o size count free segs pages kB\n\n"); - for (i = 0; i <= DRM_MAX_ORDER; i++) { - if (dma->bufs[i].buf_count) - DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n", - i, - dma->bufs[i].buf_size, - dma->bufs[i].buf_count, - atomic_read(&dma->bufs[i] - .freelist.count), - dma->bufs[i].seg_count, - dma->bufs[i].seg_count - * (1 << dma->bufs[i].page_order), - (dma->bufs[i].seg_count - * (1 << dma->bufs[i].page_order)) - * PAGE_SIZE / 1024); - } - DRM_PROC_PRINT("\n"); - for (i = 0; i < dma->buf_count; i++) { - if (i && !(i % 32)) - DRM_PROC_PRINT("\n"); - DRM_PROC_PRINT(" %d", dma->buflist[i]->list); + for (i = 0; i < count; i++) { + list_for_each_safe(pos, q, &minor->proc_nodes.list) { + tmp = list_entry(pos, struct drm_info_node, list); + if (tmp->info_ent == &files[i]) { + remove_proc_entry(files[i].name, + minor->proc_root); + list_del(pos); + drm_free(tmp, sizeof(struct drm_info_node), + _DRM_DRIVER); + } + } } - DRM_PROC_PRINT("\n"); - - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock. - */ -static int drm_bufs_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__bufs_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } /** - * Called when "/proc/dri/.../vblank" is read. + * Cleanup the proc filesystem resources. * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. - */ -static int drm__vblank_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - int crtc; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - - for (crtc = 0; crtc < dev->num_crtcs; crtc++) { - DRM_PROC_PRINT("CRTC %d enable: %d\n", - crtc, atomic_read(&dev->vblank_refcount[crtc])); - DRM_PROC_PRINT("CRTC %d counter: %d\n", - crtc, drm_vblank_count(dev, crtc)); - DRM_PROC_PRINT("CRTC %d last wait: %d\n", - crtc, dev->last_vblank_wait[crtc]); - DRM_PROC_PRINT("CRTC %d in modeset: %d\n", - crtc, dev->vblank_inmodeset[crtc]); - } - - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock. - */ -static int drm_vblank_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__vblank_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; -} - -/** - * Called when "/proc/dri/.../clients" is read. + * \param minor device minor number. + * \param root DRI proc dir entry. + * \param dev_root DRI device proc dir entry. + * \return always zero. * - * \param buf output buffer. - * \param start start of output data. - * \param offset requested start offset. - * \param request requested number of bytes. - * \param eof whether there is no more data to return. - * \param data private data. - * \return number of written bytes. + * Remove all proc entries created by proc_init(). */ -static int drm__clients_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) +int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) { - struct drm_minor *minor = (struct drm_minor *) data; struct drm_device *dev = minor->dev; - int len = 0; - struct drm_file *priv; + char name[64]; - if (offset > DRM_PROC_LIMIT) { - *eof = 1; + if (!root || !minor->proc_root) return 0; - } - - *start = &buf[offset]; - *eof = 0; - - DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n"); - list_for_each_entry(priv, &dev->filelist, lhead) { - DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n", - priv->authenticated ? 'y' : 'n', - priv->minor->index, - priv->pid, - priv->uid, priv->magic, priv->ioctl_count); - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -/** - * Simply calls _clients_info() while holding the drm_device::struct_mutex lock. - */ -static int drm_clients_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__clients_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; -} - -struct drm_gem_name_info_data { - int len; - char *buf; - int eof; -}; + if (dev->driver->proc_cleanup) + dev->driver->proc_cleanup(minor); -static int drm_gem_one_name_info(int id, void *ptr, void *data) -{ - struct drm_gem_object *obj = ptr; - struct drm_gem_name_info_data *nid = data; + drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor); - DRM_INFO("name %d size %zd\n", obj->name, obj->size); - if (nid->eof) - return 0; + sprintf(name, "%d", minor->index); + remove_proc_entry(name, root); - nid->len += sprintf(&nid->buf[nid->len], - "%6d %8zd %7d %8d\n", - obj->name, obj->size, - atomic_read(&obj->handlecount.refcount), - atomic_read(&obj->refcount.refcount)); - if (nid->len > DRM_PROC_LIMIT) { - nid->eof = 1; - return 0; - } return 0; } -static int drm_gem_name_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - struct drm_gem_name_info_data nid; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - nid.len = sprintf(buf, " name size handles refcount\n"); - nid.buf = buf; - nid.eof = 0; - idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid); - - *start = &buf[offset]; - *eof = 0; - if (nid.len > request + offset) - return request; - *eof = 1; - return nid.len - offset; -} - -static int drm_gem_object_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count)); - DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory)); - DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count)); - DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory)); - DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory)); - DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total); - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -#if DRM_DEBUG_CODE - -static int drm__vma_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int len = 0; - struct drm_vma_entry *pt; - struct vm_area_struct *vma; -#if defined(__i386__) - unsigned int pgprot; -#endif - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - - DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", - atomic_read(&dev->vma_count), - high_memory, virt_to_phys(high_memory)); - list_for_each_entry(pt, &dev->vmalist, head) { - if (!(vma = pt->vma)) - continue; - DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", - pt->pid, - vma->vm_start, - vma->vm_end, - vma->vm_flags & VM_READ ? 'r' : '-', - vma->vm_flags & VM_WRITE ? 'w' : '-', - vma->vm_flags & VM_EXEC ? 'x' : '-', - vma->vm_flags & VM_MAYSHARE ? 's' : 'p', - vma->vm_flags & VM_LOCKED ? 'l' : '-', - vma->vm_flags & VM_IO ? 'i' : '-', - vma->vm_pgoff); - -#if defined(__i386__) - pgprot = pgprot_val(vma->vm_page_prot); - DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c", - pgprot & _PAGE_PRESENT ? 'p' : '-', - pgprot & _PAGE_RW ? 'w' : 'r', - pgprot & _PAGE_USER ? 'u' : 's', - pgprot & _PAGE_PWT ? 't' : 'b', - pgprot & _PAGE_PCD ? 'u' : 'c', - pgprot & _PAGE_ACCESSED ? 'a' : '-', - pgprot & _PAGE_DIRTY ? 'd' : '-', - pgprot & _PAGE_PSE ? 'm' : 'k', - pgprot & _PAGE_GLOBAL ? 'g' : 'l'); -#endif - DRM_PROC_PRINT("\n"); - } - - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int drm_vma_info(char *buf, char **start, off_t offset, int request, - int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - int ret; - - mutex_lock(&dev->struct_mutex); - ret = drm__vma_info(buf, start, offset, request, eof, data); - mutex_unlock(&dev->struct_mutex); - return ret; -} -#endif diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 7c8b15b22bf..48f33be8fd0 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -50,6 +50,7 @@ struct idr drm_minors_idr; struct class *drm_class; struct proc_dir_entry *drm_proc_root; +struct dentry *drm_debugfs_root; static int drm_minor_get_id(struct drm_device *dev, int type) { @@ -313,7 +314,15 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t goto err_mem; } } else - new_minor->dev_root = NULL; + new_minor->proc_root = NULL; + +#if defined(CONFIG_DEBUG_FS) + ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); + if (ret) { + DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n"); + goto err_g2; + } +#endif ret = drm_sysfs_device_add(new_minor); if (ret) { @@ -451,6 +460,10 @@ int drm_put_minor(struct drm_minor **minor_p) if (minor->type == DRM_MINOR_LEGACY) drm_proc_cleanup(minor, drm_proc_root); +#if defined(CONFIG_DEBUG_FS) + drm_debugfs_cleanup(minor); +#endif + drm_sysfs_device_remove(minor); idr_remove(&drm_minors_idr, minor->index); -- cgit v1.2.3 From 2017263e9e72974610179beaa85c4498b9c4b7a4 Mon Sep 17 00:00:00 2001 From: Ben Gamari Date: Tue, 17 Feb 2009 20:08:50 -0500 Subject: drm/i915: Convert i915 proc files to seq_file and move to debugfs. Signed-off-by: Ben Gamari Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/i915_drv.c | 6 +- drivers/gpu/drm/i915/i915_drv.h | 6 +- drivers/gpu/drm/i915/i915_gem_debugfs.c | 230 ++++++++++++++++++++++ drivers/gpu/drm/i915/i915_gem_proc.c | 334 -------------------------------- 5 files changed, 239 insertions(+), 339 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_gem_debugfs.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_proc.c (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 793cba39d83..51c5a050aa7 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -7,7 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ i915_suspend.o \ i915_gem.o \ i915_gem_debug.o \ - i915_gem_proc.o \ + i915_gem_debugfs.o \ i915_gem_tiling.o \ intel_display.o \ intel_crt.o \ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b293ef0bae7..dcb91f5df6e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -150,8 +150,10 @@ static struct drm_driver driver = { .get_reg_ofs = drm_core_get_reg_ofs, .master_create = i915_master_create, .master_destroy = i915_master_destroy, - .proc_init = i915_gem_proc_init, - .proc_cleanup = i915_gem_proc_cleanup, +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = i915_gem_debugfs_init, + .debugfs_cleanup = i915_gem_debugfs_cleanup, +#endif .gem_init_object = i915_gem_init_object, .gem_free_object = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c02ce6b2b9..1c03b3e81ff 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -605,8 +605,6 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data, int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); -int i915_gem_proc_init(struct drm_minor *minor); -void i915_gem_proc_cleanup(struct drm_minor *minor); int i915_gem_init_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj); int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); @@ -650,6 +648,10 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len, const char *where, uint32_t mark); void i915_dump_lru(struct drm_device *dev, const char *where); +/* i915_debugfs.c */ +int i915_gem_debugfs_init(struct drm_minor *minor); +void i915_gem_debugfs_cleanup(struct drm_minor *minor); + /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c new file mode 100644 index 00000000000..dd2b0edb996 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -0,0 +1,230 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Keith Packard + * + */ + +#include +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" + +#define DRM_I915_RING_DEBUG 1 + + +#if defined(CONFIG_DEBUG_FS) + +static int i915_gem_active_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + + seq_printf(m, "Active:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + seq_printf(m, " %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + seq_printf(m, " %p: %08x %08x %d\n", + obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + return 0; +} + +static int i915_gem_flushing_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + + seq_printf(m, "Flushing:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + seq_printf(m, " %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + seq_printf(m, " %p: %08x %08x %d\n", obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + return 0; +} + +static int i915_gem_inactive_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + + seq_printf(m, "Inactive:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + seq_printf(m, " %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + seq_printf(m, " %p: %08x %08x %d\n", obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + return 0; +} + +static int i915_gem_request_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_request *gem_request; + + seq_printf(m, "Request:\n"); + list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); + } + return 0; +} + +static int i915_gem_seqno_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + if (dev_priv->hw_status_page != NULL) { + seq_printf(m, "Current sequence: %d\n", + i915_get_gem_seqno(dev)); + } else { + seq_printf(m, "Current sequence: hws uninitialized\n"); + } + seq_printf(m, "Waiter sequence: %d\n", + dev_priv->mm.waiting_gem_seqno); + seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); + return 0; +} + + +static int i915_interrupt_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + seq_printf(m, "Interrupt enable: %08x\n", + I915_READ(IER)); + seq_printf(m, "Interrupt identity: %08x\n", + I915_READ(IIR)); + seq_printf(m, "Interrupt mask: %08x\n", + I915_READ(IMR)); + seq_printf(m, "Pipe A stat: %08x\n", + I915_READ(PIPEASTAT)); + seq_printf(m, "Pipe B stat: %08x\n", + I915_READ(PIPEBSTAT)); + seq_printf(m, "Interrupts received: %d\n", + atomic_read(&dev_priv->irq_received)); + if (dev_priv->hw_status_page != NULL) { + seq_printf(m, "Current sequence: %d\n", + i915_get_gem_seqno(dev)); + } else { + seq_printf(m, "Current sequence: hws uninitialized\n"); + } + seq_printf(m, "Waiter sequence: %d\n", + dev_priv->mm.waiting_gem_seqno); + seq_printf(m, "IRQ sequence: %d\n", + dev_priv->mm.irq_gem_seqno); + return 0; +} + +static int i915_hws_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int i; + volatile u32 *hws; + + hws = (volatile u32 *)dev_priv->hw_status_page; + if (hws == NULL) + return 0; + + for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { + seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", + i * 4, + hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); + } + return 0; +} + +static struct drm_info_list i915_gem_debugfs_list[] = { + {"i915_gem_active", i915_gem_active_info, 0}, + {"i915_gem_flushing", i915_gem_flushing_info, 0}, + {"i915_gem_inactive", i915_gem_inactive_info, 0}, + {"i915_gem_request", i915_gem_request_info, 0}, + {"i915_gem_seqno", i915_gem_seqno_info, 0}, + {"i915_gem_interrupt", i915_interrupt_info, 0}, + {"i915_gem_hws", i915_hws_info, 0}, +}; +#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) + +int i915_gem_debugfs_init(struct drm_minor *minor) +{ + return drm_debugfs_create_files(i915_gem_debugfs_list, + I915_GEM_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); +} + +void i915_gem_debugfs_cleanup(struct drm_minor *minor) +{ + drm_debugfs_remove_files(i915_gem_debugfs_list, + I915_GEM_DEBUGFS_ENTRIES, minor); +} + +#endif /* CONFIG_DEBUG_FS */ + diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c deleted file mode 100644 index 4d1b9de0cd8..00000000000 --- a/drivers/gpu/drm/i915/i915_gem_proc.c +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Copyright © 2008 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Eric Anholt - * Keith Packard - * - */ - -#include "drmP.h" -#include "drm.h" -#include "i915_drm.h" -#include "i915_drv.h" - -static int i915_gem_active_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Active:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - DRM_PROC_PRINT(" %p: %08x %08x %d\n", - obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_flushing_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Flushing:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_inactive_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Inactive:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_request_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_request *gem_request; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Request:\n"); - list_for_each_entry(gem_request, &dev_priv->mm.request_list, - list) - { - DRM_PROC_PRINT(" %d @ %d\n", - gem_request->seqno, - (int) (jiffies - gem_request->emitted_jiffies)); - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_seqno_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - if (dev_priv->hw_status_page != NULL) { - DRM_PROC_PRINT("Current sequence: %d\n", - i915_get_gem_seqno(dev)); - } else { - DRM_PROC_PRINT("Current sequence: hws uninitialized\n"); - } - DRM_PROC_PRINT("Waiter sequence: %d\n", - dev_priv->mm.waiting_gem_seqno); - DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - - -static int i915_interrupt_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Interrupt enable: %08x\n", - I915_READ(IER)); - DRM_PROC_PRINT("Interrupt identity: %08x\n", - I915_READ(IIR)); - DRM_PROC_PRINT("Interrupt mask: %08x\n", - I915_READ(IMR)); - DRM_PROC_PRINT("Pipe A stat: %08x\n", - I915_READ(PIPEASTAT)); - DRM_PROC_PRINT("Pipe B stat: %08x\n", - I915_READ(PIPEBSTAT)); - DRM_PROC_PRINT("Interrupts received: %d\n", - atomic_read(&dev_priv->irq_received)); - if (dev_priv->hw_status_page != NULL) { - DRM_PROC_PRINT("Current sequence: %d\n", - i915_get_gem_seqno(dev)); - } else { - DRM_PROC_PRINT("Current sequence: hws uninitialized\n"); - } - DRM_PROC_PRINT("Waiter sequence: %d\n", - dev_priv->mm.waiting_gem_seqno); - DRM_PROC_PRINT("IRQ sequence: %d\n", - dev_priv->mm.irq_gem_seqno); - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_hws_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - int len = 0, i; - volatile u32 *hws; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - hws = (volatile u32 *)dev_priv->hw_status_page; - if (hws == NULL) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { - DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", - i * 4, - hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static struct drm_proc_list { - /** file name */ - const char *name; - /** proc callback*/ - int (*f) (char *, char **, off_t, int, int *, void *); -} i915_gem_proc_list[] = { - {"i915_gem_active", i915_gem_active_info}, - {"i915_gem_flushing", i915_gem_flushing_info}, - {"i915_gem_inactive", i915_gem_inactive_info}, - {"i915_gem_request", i915_gem_request_info}, - {"i915_gem_seqno", i915_gem_seqno_info}, - {"i915_gem_interrupt", i915_interrupt_info}, - {"i915_gem_hws", i915_hws_info}, -}; - -#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) - -int i915_gem_proc_init(struct drm_minor *minor) -{ - struct proc_dir_entry *ent; - int i, j; - - for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) { - ent = create_proc_entry(i915_gem_proc_list[i].name, - S_IFREG | S_IRUGO, minor->dev_root); - if (!ent) { - DRM_ERROR("Cannot create /proc/dri/.../%s\n", - i915_gem_proc_list[i].name); - for (j = 0; j < i; j++) - remove_proc_entry(i915_gem_proc_list[i].name, - minor->dev_root); - return -1; - } - ent->read_proc = i915_gem_proc_list[i].f; - ent->data = minor; - } - return 0; -} - -void i915_gem_proc_cleanup(struct drm_minor *minor) -{ - int i; - - if (!minor->dev_root) - return; - - for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) - remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root); -} -- cgit v1.2.3 From 433e12f78b68a8069f54956edf766bb21394c197 Mon Sep 17 00:00:00 2001 From: Ben Gamari Date: Tue, 17 Feb 2009 20:08:51 -0500 Subject: drm/i915: Consolidate gem object list dumping Here we eliminate a few functions in favor of using a single function to dump from all of the object lists. Signed-Off-By: Ben Gamari Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem_debugfs.c | 86 +++++++++++---------------------- 1 file changed, 28 insertions(+), 58 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index dd2b0edb996..4fc845cee80 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -37,69 +37,38 @@ #if defined(CONFIG_DEBUG_FS) -static int i915_gem_active_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - - seq_printf(m, "Active:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - seq_printf(m, " %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - seq_printf(m, " %p: %08x %08x %d\n", - obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } - } - return 0; -} +#define ACTIVE_LIST 1 +#define FLUSHING_LIST 2 +#define INACTIVE_LIST 3 -static int i915_gem_flushing_info(struct seq_file *m, void *data) +static int i915_gem_object_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; + uintptr_t list = (uintptr_t) node->info_ent->data; + struct list_head *head; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; - seq_printf(m, "Flushing:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - seq_printf(m, " %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - seq_printf(m, " %p: %08x %08x %d\n", obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } + switch (list) { + case ACTIVE_LIST: + seq_printf(m, "Active:\n"); + head = &dev_priv->mm.active_list; + break; + case INACTIVE_LIST: + seq_printf(m, "Inctive:\n"); + head = &dev_priv->mm.inactive_list; + break; + case FLUSHING_LIST: + seq_printf(m, "Flushing:\n"); + head = &dev_priv->mm.flushing_list; + break; + default: + DRM_INFO("Ooops, unexpected list\n"); + return 0; } - return 0; -} -static int i915_gem_inactive_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - - seq_printf(m, "Inactive:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) + list_for_each_entry(obj_priv, head, list) { struct drm_gem_object *obj = obj_priv->obj; if (obj->name) { @@ -108,7 +77,8 @@ static int i915_gem_inactive_info(struct seq_file *m, void *data) obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno); } else { - seq_printf(m, " %p: %08x %08x %d\n", obj, + seq_printf(m, " %p: %08x %08x %d\n", + obj, obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno); } @@ -203,9 +173,9 @@ static int i915_hws_info(struct seq_file *m, void *data) } static struct drm_info_list i915_gem_debugfs_list[] = { - {"i915_gem_active", i915_gem_active_info, 0}, - {"i915_gem_flushing", i915_gem_flushing_info, 0}, - {"i915_gem_inactive", i915_gem_inactive_info, 0}, + {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, + {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, + {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, -- cgit v1.2.3 From f4ceda89895b56e2c03dd327f13d0256838a20ab Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 17 Feb 2009 23:53:41 -0800 Subject: drm/i915: Add information on pinning and fencing to the i915 list debug. This was inspired by a patch by Chris Wilson, though none of it applied in any way due to the debugfs work and I decided to change the formatting of the new information anyway. Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem_debugfs.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index 4fc845cee80..f7e7d3750f8 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -70,18 +70,27 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) list_for_each_entry(obj_priv, head, list) { + char *pin_description; struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - seq_printf(m, " %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - seq_printf(m, " %p: %08x %08x %d\n", - obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } + + if (obj_priv->user_pin_count > 0) + pin_description = "P"; + else if (obj_priv->pin_count > 0) + pin_description = "p"; + else + pin_description = " "; + + seq_printf(m, " %p: %s %08x %08x %d", + obj, + pin_description, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + + if (obj->name) + seq_printf(m, " (name: %d)", obj->name); + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + seq_printf(m, " (fence: %d\n", obj_priv->fence_reg); + seq_printf(m, "\n"); } return 0; } -- cgit v1.2.3 From a6172a80ecb7ac64151960de1f709f78b509c57c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 11 Feb 2009 14:26:38 +0000 Subject: drm/i915: Display fence register state in debugfs i915_gem_fence_regs node. Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem_debugfs.c | 66 ++++++++++++++++++++++++++++----- 1 file changed, 57 insertions(+), 9 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index f7e7d3750f8..5a4cdb5d287 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -41,6 +41,26 @@ #define FLUSHING_LIST 2 #define INACTIVE_LIST 3 +static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) +{ + if (obj_priv->user_pin_count > 0) + return "P"; + else if (obj_priv->pin_count > 0) + return "p"; + else + return " "; +} + +static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) +{ + switch (obj_priv->tiling_mode) { + default: + case I915_TILING_NONE: return " "; + case I915_TILING_X: return "X"; + case I915_TILING_Y: return "Y"; + } +} + static int i915_gem_object_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -70,19 +90,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) list_for_each_entry(obj_priv, head, list) { - char *pin_description; struct drm_gem_object *obj = obj_priv->obj; - if (obj_priv->user_pin_count > 0) - pin_description = "P"; - else if (obj_priv->pin_count > 0) - pin_description = "p"; - else - pin_description = " "; - seq_printf(m, " %p: %s %08x %08x %d", obj, - pin_description, + get_pin_flag(obj_priv), obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno); @@ -161,6 +173,41 @@ static int i915_interrupt_info(struct seq_file *m, void *data) return 0; } +static int i915_gem_fence_regs_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int i; + + seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); + seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); + for (i = 0; i < dev_priv->num_fence_regs; i++) { + struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; + + if (obj == NULL) { + seq_printf(m, "Fenced object[%2d] = unused\n", i); + } else { + struct drm_i915_gem_object *obj_priv; + + obj_priv = obj->driver_private; + seq_printf(m, "Fenced object[%2d] = %p: %s " + "%08x %08x %08x %s %08x %08x %d", + i, obj, get_pin_flag(obj_priv), + obj_priv->gtt_offset, + obj->size, obj_priv->stride, + get_tiling_flag(obj_priv), + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + if (obj->name) + seq_printf(m, " (name: %d)", obj->name); + seq_printf(m, "\n"); + } + } + + return 0; +} + static int i915_hws_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -187,6 +234,7 @@ static struct drm_info_list i915_gem_debugfs_list[] = { {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0}, + {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, {"i915_gem_hws", i915_hws_info, 0}, }; -- cgit v1.2.3 From ad086c833d00ef3be56ec554b1061f19e87a6210 Mon Sep 17 00:00:00 2001 From: "Owain G. Ainsworth" Date: Fri, 20 Feb 2009 08:30:19 +0000 Subject: i915/drm: Remove two redundant agp_chipset_flushes agp_chipset_flush() is for flushing the intel GMCH write cache via the IFP, these two uses are for when we're getting the object into the cpu READ domain, and thus should not be needed. This confused me when I was getting my head around the code. With thanks to airlied for helping me check my mental picture of how the flushes and clflushes are supposed to be used. Signed-off-by: Owain G. Ainsworth Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f135c903305..b52cba0f16d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2381,7 +2381,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) { - struct drm_device *dev = obj->dev; int ret; i915_gem_object_flush_gpu_write_domain(obj); @@ -2400,7 +2399,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) /* Flush the CPU cache if it's still invalid. */ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); obj->read_domains |= I915_GEM_DOMAIN_CPU; } @@ -2612,7 +2610,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) { - struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; if (!obj_priv->page_cpu_valid) @@ -2628,7 +2625,6 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) continue; drm_clflush_pages(obj_priv->pages + i, 1); } - drm_agp_chipset_flush(dev); } /* Free the page_cpu_valid mappings which are now stale, whether -- cgit v1.2.3 From 2177832f2e20fceb32142bb4fd33ae68c8af8c5a Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 23 Feb 2009 15:19:16 +0800 Subject: agp/intel: Add support for new intel chipset. This is a G33-like desktop and mobile chipset. Signed-off-by: Shaohua Li Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 10 +++- drivers/gpu/drm/i915/i915_reg.h | 4 ++ drivers/gpu/drm/i915/intel_display.c | 113 +++++++++++++++++++++++++++++------ 3 files changed, 108 insertions(+), 19 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1c03b3e81ff..c1685d0c704 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -787,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2E22 || \ IS_GM45(dev)) +#define IS_IGDG(dev) ((dev)->pci_device == 0xa001) +#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) +#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) + #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ (dev)->pci_device == 0x29B2 || \ - (dev)->pci_device == 0x29D2) + (dev)->pci_device == 0x29D2 || \ + (IS_IGD(dev))) #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ - IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) + IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ + IS_IGD(dev)) #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 90600d89941..6d567772679 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -359,6 +359,7 @@ #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ +#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */ #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) #define I915_CRC_ERROR_ENABLE (1UL<<29) @@ -435,6 +436,7 @@ */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 +#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15 /* i830, required in DVO non-gang */ #define PLL_P2_DIVIDE_BY_4 (1 << 23) #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ @@ -501,10 +503,12 @@ #define FPB0 0x06048 #define FPB1 0x0604c #define FP_N_DIV_MASK 0x003f0000 +#define FP_N_IGD_DIV_MASK 0x00ff0000 #define FP_N_DIV_SHIFT 16 #define FP_M1_DIV_MASK 0x00003f00 #define FP_M1_DIV_SHIFT 8 #define FP_M2_DIV_MASK 0x0000003f +#define FP_M2_IGD_DIV_MASK 0x000000ff #define FP_M2_DIV_SHIFT 0 #define DPLL_TEST 0x606c #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0d40b4b6979..d9c50ff94d7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -92,18 +92,32 @@ struct intel_limit { #define I9XX_DOT_MAX 400000 #define I9XX_VCO_MIN 1400000 #define I9XX_VCO_MAX 2800000 +#define IGD_VCO_MIN 1700000 +#define IGD_VCO_MAX 3500000 #define I9XX_N_MIN 1 #define I9XX_N_MAX 6 +/* IGD's Ncounter is a ring counter */ +#define IGD_N_MIN 3 +#define IGD_N_MAX 6 #define I9XX_M_MIN 70 #define I9XX_M_MAX 120 +#define IGD_M_MIN 2 +#define IGD_M_MAX 256 #define I9XX_M1_MIN 10 #define I9XX_M1_MAX 22 #define I9XX_M2_MIN 5 #define I9XX_M2_MAX 9 +/* IGD M1 is reserved, and must be 0 */ +#define IGD_M1_MIN 0 +#define IGD_M1_MAX 0 +#define IGD_M2_MIN 0 +#define IGD_M2_MAX 254 #define I9XX_P_SDVO_DAC_MIN 5 #define I9XX_P_SDVO_DAC_MAX 80 #define I9XX_P_LVDS_MIN 7 #define I9XX_P_LVDS_MAX 98 +#define IGD_P_LVDS_MIN 7 +#define IGD_P_LVDS_MAX 112 #define I9XX_P1_MIN 1 #define I9XX_P1_MAX 8 #define I9XX_P2_SDVO_DAC_SLOW 10 @@ -121,6 +135,8 @@ struct intel_limit { #define INTEL_LIMIT_G4X_HDMI_DAC 5 #define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6 #define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 +#define INTEL_LIMIT_IGD_SDVO_DAC 8 +#define INTEL_LIMIT_IGD_LVDS 9 /*The parameter is for SDVO on G4x platform*/ #define G4X_DOT_SDVO_MIN 25000 @@ -340,6 +356,32 @@ static const intel_limit_t intel_limits[] = { }, .find_pll = intel_g4x_find_best_PLL, }, + { /* INTEL_LIMIT_IGD_SDVO */ + .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, + .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, + .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, + .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, + .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, + .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, + .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, + .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, + .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, + .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, + }, + { /* INTEL_LIMIT_IGD_LVDS */ + .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, + .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, + .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, + .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, + .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, + .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, + .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX }, + .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, + /* IGD only supports single-channel mode. */ + .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, + .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, + }, + }; static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) @@ -376,11 +418,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) if (IS_G4X(dev)) { limit = intel_g4x_limit(crtc); - } else if (IS_I9XX(dev)) { + } else if (IS_I9XX(dev) && !IS_IGD(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; else limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; + } else if (IS_IGD(dev)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + limit = &intel_limits[INTEL_LIMIT_IGD_LVDS]; + else + limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC]; } else { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; @@ -390,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) return limit; } -static void intel_clock(int refclk, intel_clock_t *clock) +/* m1 is reserved as 0 in IGD, n is a ring counter */ +static void igd_clock(int refclk, intel_clock_t *clock) { + clock->m = clock->m2 + 2; + clock->p = clock->p1 * clock->p2; + clock->vco = refclk * clock->m / clock->n; + clock->dot = clock->vco / clock->p; +} + +static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) +{ + if (IS_IGD(dev)) { + igd_clock(refclk, clock); + return; + } clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; clock->vco = refclk * clock->m / (clock->n + 2); @@ -427,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) { const intel_limit_t *limit = intel_limit (crtc); + struct drm_device *dev = crtc->dev; if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) INTELPllInvalid ("p1 out of range\n"); @@ -436,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) INTELPllInvalid ("m2 out of range\n"); if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) INTELPllInvalid ("m1 out of range\n"); - if (clock->m1 <= clock->m2) + if (clock->m1 <= clock->m2 && !IS_IGD(dev)) INTELPllInvalid ("m1 <= m2\n"); if (clock->m < limit->m.min || limit->m.max < clock->m) INTELPllInvalid ("m out of range\n"); @@ -486,15 +547,17 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, memset (best_clock, 0, sizeof (*best_clock)); for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { - for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && - clock.m2 <= limit->m2.max; clock.m2++) { + for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { + /* m1 is always 0 in IGD */ + if (clock.m2 >= clock.m1 && !IS_IGD(dev)) + break; for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; clock.p1++) { int this_err; - intel_clock(refclk, &clock); + intel_clock(dev, refclk, &clock); if (!intel_PLL_is_valid(crtc, &clock)) continue; @@ -551,7 +614,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, clock.p1 >= limit->p1.min; clock.p1--) { int this_err; - intel_clock(refclk, &clock); + intel_clock(dev, refclk, &clock); if (!intel_PLL_is_valid(crtc, &clock)) continue; this_err = abs(clock.dot - target) ; @@ -888,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev) return 400000; else if (IS_I915G(dev)) return 333000; - else if (IS_I945GM(dev) || IS_845G(dev)) + else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) return 200000; else if (IS_I915GM(dev)) { u16 gcfgc = 0; @@ -1043,7 +1106,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, return -EINVAL; } - fp = clock.n << 16 | clock.m1 << 8 | clock.m2; + if (IS_IGD(dev)) + fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; + else + fp = clock.n << 16 | clock.m1 << 8 | clock.m2; dpll = DPLL_VGA_MODE_DIS; if (IS_I9XX(dev)) { @@ -1060,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } /* compute bitmask from p1 value */ - dpll |= (1 << (clock.p1 - 1)) << 16; + if (IS_IGD(dev)) + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; + else + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; switch (clock.p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; @@ -1540,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) fp = I915_READ((pipe == 0) ? FPA1 : FPB1); clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; - clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; - clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; + if (IS_IGD(dev)) { + clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; + clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT; + } else { + clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; + clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; + } + if (IS_I9XX(dev)) { - clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> + if (IS_IGD(dev)) + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >> + DPLL_FPA01_P1_POST_DIV_SHIFT_IGD); + else + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> DPLL_FPA01_P1_POST_DIV_SHIFT); switch (dpll & DPLL_MODE_MASK) { @@ -1562,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) } /* XXX: Handle the 100Mhz refclk */ - intel_clock(96000, &clock); + intel_clock(dev, 96000, &clock); } else { bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); @@ -1574,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) { /* XXX: might not be 66MHz */ - intel_clock(66000, &clock); + intel_clock(dev, 66000, &clock); } else - intel_clock(48000, &clock); + intel_clock(dev, 48000, &clock); } else { if (dpll & PLL_P1_DIVIDE_BY_TWO) clock.p1 = 2; @@ -1589,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) else clock.p2 = 2; - intel_clock(48000, &clock); + intel_clock(dev, 48000, &clock); } } -- cgit v1.2.3 From ba01079c71559304771f9d741c9bbe8b2eac22a2 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Wed, 4 Mar 2009 20:23:02 +0800 Subject: drm/i915: TV modes' parameters sync up with 2D driver This covers at least: TV: subcarrier fix for NTSC and PAL TV: fix timing parameters for PAL, 480p, 1080i Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_tv.c | 112 ++++++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 56 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 56485d67369..0e606855c85 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -217,8 +217,8 @@ static const u32 filter_table[] = { */ static const struct color_conversion ntsc_m_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, - .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, - .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, + .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, + .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels ntsc_m_levels_composite = { @@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = { }; static const struct color_conversion ntsc_m_csc_svideo = { - .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, - .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, - .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, + .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, + .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels ntsc_m_levels_svideo = { @@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = { static const struct color_conversion ntsc_j_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, - .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00, - .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00, + .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200, + .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200, }; static const struct video_levels ntsc_j_levels_composite = { @@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = { static const struct color_conversion ntsc_j_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, - .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00, - .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00, + .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200, + .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200, }; static const struct video_levels ntsc_j_levels_svideo = { @@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = { static const struct color_conversion pal_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, - .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00, - .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00, + .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200, + .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200, }; static const struct video_levels pal_levels_composite = { @@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = { static const struct color_conversion pal_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, - .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00, - .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00, + .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200, + .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200, }; static const struct video_levels pal_levels_svideo = { @@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = { static const struct color_conversion pal_m_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, - .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, - .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, + .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, + .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels pal_m_levels_composite = { @@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = { }; static const struct color_conversion pal_m_csc_svideo = { - .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, - .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, - .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, + .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, + .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels pal_m_levels_svideo = { @@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = { static const struct color_conversion pal_n_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, - .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, - .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, + .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, + .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels pal_n_levels_composite = { @@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = { }; static const struct color_conversion pal_n_csc_svideo = { - .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, - .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, - .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, + .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, + .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels pal_n_levels_svideo = { @@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = { * Component connections */ static const struct color_conversion sdtv_csc_yprpb = { - .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146, - .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00, - .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00, + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, + .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200, + .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200, }; static const struct color_conversion sdtv_csc_rgb = { @@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = { }; static const struct color_conversion hdtv_csc_yprpb = { - .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146, - .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00, - .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00, + .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145, + .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200, + .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200, }; static const struct color_conversion hdtv_csc_rgb = { @@ -414,7 +414,7 @@ struct tv_mode { static const struct tv_mode tv_modes[] = { { .name = "NTSC-M", - .clock = 107520, + .clock = 108000, .refresh = 29970, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = { .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ - .dda1_inc = 136, - .dda2_inc = 7624, .dda2_size = 20013, + .dda1_inc = 135, + .dda2_inc = 20800, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_4, .pal_burst = false, @@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = { }, { .name = "NTSC-443", - .clock = 107520, + .clock = 108000, .refresh = 29970, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = { /* desired 4.4336180 actual 4.4336180 clock 107.52 */ .dda1_inc = 168, - .dda2_inc = 18557, .dda2_size = 20625, - .dda3_inc = 0, .dda3_size = 0, - .sc_reset = TV_SC_RESET_EVERY_8, - .pal_burst = true, + .dda2_inc = 4093, .dda2_size = 27456, + .dda3_inc = 310, .dda3_size = 525, + .sc_reset = TV_SC_RESET_NEVER, + .pal_burst = false, .composite_levels = &ntsc_m_levels_composite, .composite_color = &ntsc_m_csc_composite, @@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = { }, { .name = "NTSC-J", - .clock = 107520, + .clock = 108000, .refresh = 29970, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = { .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ - .dda1_inc = 136, - .dda2_inc = 7624, .dda2_size = 20013, + .dda1_inc = 135, + .dda2_inc = 20800, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_4, .pal_burst = false, @@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = { }, { .name = "PAL-M", - .clock = 107520, + .clock = 108000, .refresh = 29970, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = { .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ - .dda1_inc = 136, - .dda2_inc = 7624, .dda2_size = 20013, + .dda1_inc = 135, + .dda2_inc = 16704, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, - .sc_reset = TV_SC_RESET_EVERY_4, - .pal_burst = false, + .sc_reset = TV_SC_RESET_EVERY_8, + .pal_burst = true, .composite_levels = &pal_m_levels_composite, .composite_color = &pal_m_csc_composite, @@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = { { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL-N", - .clock = 107520, + .clock = 108000, .refresh = 25000, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = { /* desired 4.4336180 actual 4.4336180 clock 107.52 */ - .dda1_inc = 168, - .dda2_inc = 18557, .dda2_size = 20625, - .dda3_inc = 0, .dda3_size = 0, + .dda1_inc = 135, + .dda2_inc = 23578, .dda2_size = 27648, + .dda3_inc = 134, .dda3_size = 625, .sc_reset = TV_SC_RESET_EVERY_8, .pal_burst = true, @@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = { { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL", - .clock = 107520, + .clock = 108000, .refresh = 25000, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, - .hsync_end = 64, .hblank_end = 128, + .hsync_end = 64, .hblank_end = 142, .hblank_start = 844, .htotal = 863, .progressive = false, .trilevel_sync = false, @@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = { /* desired 4.4336180 actual 4.4336180 clock 107.52 */ .dda1_inc = 168, - .dda2_inc = 18557, .dda2_size = 20625, - .dda3_inc = 0, .dda3_size = 0, + .dda2_inc = 4122, .dda2_size = 27648, + .dda3_inc = 67, .dda3_size = 625, .sc_reset = TV_SC_RESET_EVERY_8, .pal_burst = true, @@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = { .veq_ena = false, .vi_end_f1 = 44, .vi_end_f2 = 44, - .nbr_end = 496, + .nbr_end = 479, .burst_ena = false, @@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = { .veq_ena = false, .vi_end_f1 = 44, .vi_end_f2 = 44, - .nbr_end = 496, + .nbr_end = 479, .burst_ena = false, @@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = { .component_only = 1, .hsync_end = 88, .hblank_end = 235, - .hblank_start = 2155, .htotal = 2200, + .hblank_start = 2155, .htotal = 2201, .progressive = false, .trilevel_sync = true, -- cgit v1.2.3 From 6bcdcd9e3c09d133e3278edabebc314a2451b74a Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Tue, 3 Mar 2009 18:06:42 +0800 Subject: drm/i915: Sync mode_valid/mode_set with intel video driver This covers: Limit CRT DAC speed better. and also clears the border color in case it's set to some garbage, which would fix ugly outlines in the blank regions of the CRT. Signed-off-by: Zhao Yakui [anholt: replaced *drm_dev with *dev] Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_crt.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index dcaed3466e8..e58defa247d 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) static int intel_crt_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct drm_device *dev = connector->dev; + + int max_clock = 0; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - if (mode->clock > 400000 || mode->clock < 25000) - return MODE_CLOCK_RANGE; + if (mode->clock < 25000) + return MODE_CLOCK_LOW; + + if (!IS_I9XX(dev)) + max_clock = 350000; + else + max_clock = 400000; + if (mode->clock > max_clock) + return MODE_CLOCK_HIGH; return MODE_OK; } @@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) adpa |= ADPA_VSYNC_ACTIVE_HIGH; - if (intel_crtc->pipe == 0) + if (intel_crtc->pipe == 0) { adpa |= ADPA_PIPE_A_SELECT; - else + I915_WRITE(BCLRPAT_A, 0); + } else { adpa |= ADPA_PIPE_B_SELECT; + I915_WRITE(BCLRPAT_B, 0); + } I915_WRITE(ADPA, adpa); } -- cgit v1.2.3 From 771cb081354161eea21534ba58e5cc1a2db94a25 Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Tue, 3 Mar 2009 18:07:52 +0800 Subject: drm/i915: Sync crt hotplug detection with intel video driver This covers: Use long crt hotplug activation time on GM45. Signed-off-by: Zhao Yakui Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_reg.h | 16 ++++++++++++++ drivers/gpu/drm/i915/intel_crt.c | 45 ++++++++++++++++++++++++++++------------ 2 files changed, 48 insertions(+), 13 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6d567772679..05b1894fa13 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -633,6 +633,22 @@ #define TV_HOTPLUG_INT_EN (1 << 18) #define CRT_HOTPLUG_INT_EN (1 << 9) #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) +#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) +/* must use period 64 on GM45 according to docs */ +#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8) +#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7) +#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5) +#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4) +#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) +#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) +#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) +#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ + #define PORT_HOTPLUG_STAT 0x61114 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index e58defa247d..2b6d44381c3 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -146,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 temp; - - unsigned long timeout = jiffies + msecs_to_jiffies(1000); - - temp = I915_READ(PORT_HOTPLUG_EN); - - I915_WRITE(PORT_HOTPLUG_EN, - temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5)); + u32 hotplug_en; + int i, tries = 0; + /* + * On 4 series desktop, CRT detect sequence need to be done twice + * to get a reliable result. + */ - do { - if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) - break; - msleep(1); - } while (time_after(timeout, jiffies)); + if (IS_G4X(dev) && !IS_GM45(dev)) + tries = 2; + else + tries = 1; + hotplug_en = I915_READ(PORT_HOTPLUG_EN); + hotplug_en &= ~(CRT_HOTPLUG_MASK); + hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; + + if (IS_GM45(dev)) + hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; + + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + + for (i = 0; i < tries ; i++) { + unsigned long timeout; + /* turn on the FORCE_DETECT */ + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + timeout = jiffies + msecs_to_jiffies(1000); + /* wait for FORCE_DETECT to go off */ + do { + if (!(I915_READ(PORT_HOTPLUG_EN) & + CRT_HOTPLUG_FORCE_DETECT)) + break; + msleep(1); + } while (time_after(timeout, jiffies)); + } if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == CRT_HOTPLUG_MONITOR_COLOR) -- cgit v1.2.3 From 02c5dd985ddc5407aa9cc7e0f4456ca63b294f16 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Wed, 4 Mar 2009 19:36:01 +0800 Subject: drm/i915: Fix TV get_modes to return modes count The get_modes hook must return the number of modes added. This also fixes TV mode's clock calculation int overflow issue, and use 0.01 precision for mode refresh validation. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_tv.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 0e606855c85..08c4034c44c 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); /* Ensure TV refresh is close to desired refresh */ - if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1) + if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10) return MODE_OK; return MODE_CLOCK_RANGE; } @@ -1495,7 +1495,8 @@ intel_tv_get_modes(struct drm_connector *connector) struct drm_display_mode *mode_ptr; struct intel_output *intel_output = to_intel_output(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); - int j; + int j, count = 0; + u64 tmp; for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); j++) { @@ -1510,8 +1511,9 @@ intel_tv_get_modes(struct drm_connector *connector) && !tv_mode->component_only)) continue; - mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode), - DRM_MEM_DRIVER); + mode_ptr = drm_mode_create(connector->dev); + if (!mode_ptr) + continue; strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); mode_ptr->hdisplay = hactive_s; @@ -1528,15 +1530,17 @@ intel_tv_get_modes(struct drm_connector *connector) mode_ptr->vsync_end = mode_ptr->vsync_start + 1; mode_ptr->vtotal = vactive_s + 33; - mode_ptr->clock = (int) (tv_mode->refresh * - mode_ptr->vtotal * - mode_ptr->htotal / 1000) / 1000; + tmp = (u64) tv_mode->refresh * mode_ptr->vtotal; + tmp *= mode_ptr->htotal; + tmp = div_u64(tmp, 1000000); + mode_ptr->clock = (int) tmp; mode_ptr->type = DRM_MODE_TYPE_DRIVER; drm_mode_probed_add(connector, mode_ptr); + count++; } - return 0; + return count; } static void -- cgit v1.2.3 From d2d9f23240a7ec29a496ee072ffdf69c4f6cdc76 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Wed, 4 Mar 2009 19:36:02 +0800 Subject: drm/i915: TV mode_set sync up with 2D driver Fix TV control save register for untouched bits, and color knobs different definition for 945 and 965 chips. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_reg.h | 2 +- drivers/gpu/drm/i915/intel_tv.c | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 05b1894fa13..377cc588f5e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -876,7 +876,7 @@ */ # define TV_ENC_C0_FIX (1 << 10) /** Bits that must be preserved by software */ -# define TV_CTL_SAVE ((3 << 8) | (3 << 6)) +# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) # define TV_FUSE_STATE_MASK (3 << 4) /** Read-only state that reports all features enabled */ # define TV_FUSE_STATE_ENABLED (0 << 4) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 08c4034c44c..7021798f98e 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (!tv_mode) return; /* can't happen (mode_prepare prevents this) */ - tv_ctl = 0; + tv_ctl = I915_READ(TV_CTL); + tv_ctl &= TV_CTL_SAVE; switch (tv_priv->type) { default: @@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, /* dda1 implies valid video levels */ if (tv_mode->dda1_inc) { scctl1 |= TV_SC_DDA1_EN; - scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; } if (tv_mode->dda2_inc) @@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, scctl1 |= TV_SC_DDA3_EN; scctl1 |= tv_mode->sc_reset; + scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | @@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, color_conversion->av); } - I915_WRITE(TV_CLR_KNOBS, 0x00606000); + if (IS_I965G(dev)) + I915_WRITE(TV_CLR_KNOBS, 0x00404000); + else + I915_WRITE(TV_CLR_KNOBS, 0x00606000); + if (video_levels) I915_WRITE(TV_CLR_LEVEL, ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | -- cgit v1.2.3 From bf5a269a4cc966f783b9faaf3fffd8fa31b53383 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Wed, 4 Mar 2009 19:36:03 +0800 Subject: drm/i915: TV detection fix Check that the encoder has a real enabled crtc for TV detect, and fix missing TV type setting after detect. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_tv.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 7021798f98e..ceca9471a75 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1406,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) tv_dac = I915_READ(TV_DAC); I915_WRITE(TV_DAC, save_tv_dac); I915_WRITE(TV_CTL, save_tv_ctl); + intel_wait_for_vblank(dev); } /* * A B C @@ -1456,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector) mode = reported_modes[0]; drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); - if (encoder->crtc) { + if (encoder->crtc && encoder->crtc->enabled) { type = intel_tv_detect_type(encoder->crtc, intel_output); } else { crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); @@ -1467,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector) type = -1; } + tv_priv->type = type; + if (type < 0) return connector_status_disconnected; -- cgit v1.2.3 From 98787c057fdefdce6230ff46f2c1105835005a4c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 6 Mar 2009 23:27:52 +0000 Subject: drm/i915: Check for dev->primary->master before dereference. I've hit the occasional oops inside i915_wait_ring() with an indication of a NULL derefence of dev->primary->master. Adding a NULL check is consistent with the other potential users of dev->primary->master. Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_dma.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ae83fe0ab37..a818b377e1f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -41,7 +41,6 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; drm_i915_ring_buffer_t *ring = &(dev_priv->ring); u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; u32 last_acthd = I915_READ(acthd_reg); @@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) if (ring->space >= n) return 0; - if (master_priv->sarea_priv) - master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; + if (dev->primary->master) { + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + if (master_priv->sarea_priv) + master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; + } + if (ring->head != last_head) i = 0; -- cgit v1.2.3 From 2b5cde2b272f56ec67b56a2af8c067d42eff7328 Mon Sep 17 00:00:00 2001 From: Li Peng Date: Fri, 13 Mar 2009 10:25:07 +0800 Subject: drm/i915: Fix LVDS dither setting Update bdb_lvds_options structure according to its defination in 2D driver. Then we can parse and set 'lvds_dither' bit correctly on non-965 chips. Signed-off-by: Li Peng Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/intel_bios.h | 12 ++++++------ drivers/gpu/drm/i915/intel_lvds.c | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 5ea715ace3a..de621aad85b 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -162,13 +162,13 @@ struct bdb_lvds_options { u8 panel_type; u8 rsvd1; /* LVDS capabilities, stored in a dword */ - u8 rsvd2:1; - u8 lvds_edid:1; - u8 pixel_dither:1; - u8 pfit_ratio_auto:1; - u8 pfit_gfx_mode_enhanced:1; - u8 pfit_text_mode_enhanced:1; u8 pfit_mode:2; + u8 pfit_text_mode_enhanced:1; + u8 pfit_gfx_mode_enhanced:1; + u8 pfit_ratio_auto:1; + u8 pixel_dither:1; + u8 lvds_edid:1; + u8 rsvd2:1; u8 rsvd4; } __attribute__((packed)); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 0d211af9885..6619f26e46a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, pfit_control = 0; if (!IS_I965G(dev)) { - if (dev_priv->panel_wants_dither) + if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) pfit_control |= PANEL_8TO6_DITHER_ENABLE; } else -- cgit v1.2.3 From 0b4d569de222452bcb55a4a536ade6cf4d8d1e30 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 27 Mar 2009 17:02:09 -0700 Subject: i915: fix wrong 'size_t' format string For the fifteen bazillionth time. See also commits f06da264cfb0f9444d41ca247213e419f90aa72a and aeb565dfc3ac4c8b47c5049085b4c7bfb2c7d5d7 ("i915: Fix more size_t format string warnings" and "Fix annoying DRM_ERROR() string warning"). Grr-target: Eric Anholt Grr-target: Chris Wilson Signed-off-by: Linus Torvalds --- drivers/gpu/drm/i915/i915_gem_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index 5a4cdb5d287..455ec970b38 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -192,7 +192,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) obj_priv = obj->driver_private; seq_printf(m, "Fenced object[%2d] = %p: %s " - "%08x %08x %08x %s %08x %08x %d", + "%08x %08zx %08x %s %08x %08x %d", i, obj, get_pin_flag(obj_priv), obj_priv->gtt_offset, obj->size, obj_priv->stride, -- cgit v1.2.3