summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2010-12-24 14:59:34 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2010-12-24 14:59:34 +0100
commitdfa524f8f2d4fa94bb27447d9a0b9d71ce66ec30 (patch)
tree45eaaa8da06c28004bddc28d56fb409913b2b8f5
parent6276e355746d1c1150f1494829011f85bd34fd6f (diff)
cache coherency checkeri855-cache-coherency-checker
-rw-r--r--drivers/char/agp/intel-gtt.c179
1 files changed, 161 insertions, 18 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 799ad17b70a1..12a858003fbc 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -22,6 +22,7 @@
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
#include <linux/delay.h>
+#include <linux/ratelimit.h>
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
@@ -39,6 +40,8 @@
#define USE_PCI_DMA_API 0
#endif
+#define I830_CACHE_COHERENY_CHECKER 1
+
struct intel_gtt_driver {
unsigned int gen : 8;
unsigned int is_g33 : 1;
@@ -77,6 +80,12 @@ static struct _intel_private {
};
char *i81x_gtt_table;
struct page *i8xx_page;
+#if I830_CACHE_COHERENY_CHECKER
+ void *i8xx_cpu_check_page;
+ void __iomem *i8xx_gtt_check_page;
+ unsigned int i8xx_cache_flush_num;
+ struct page *i8xx_cc_page;
+#endif
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
@@ -722,6 +731,141 @@ static int intel_fake_agp_fetch_size(void)
return 0;
}
+static void i830_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags = I810_PTE_VALID;
+
+ if (flags == AGP_USER_CACHED_MEMORY)
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
+#if I830_CACHE_COHERENY_CHECKER
+static void intel_i830_cc_setup(void)
+{
+ /* steal one page of the gtt to check coherency */
+ intel_private.base.gtt_total_entries--;
+ intel_private.base.gtt_mappable_entries--;
+
+ intel_private.i8xx_cc_page =
+ alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ if (!intel_private.i8xx_cc_page)
+ return;
+
+ intel_private.i8xx_cpu_check_page =
+ kmap(intel_private.i8xx_cc_page);
+ if(!intel_private.i8xx_cpu_check_page)
+ return;
+
+ intel_private.i8xx_gtt_check_page =
+ ioremap_wc(intel_private.gma_bus_addr +
+ intel_private.base.gtt_total_entries*4096,
+ 4096);
+ if (!intel_private.i8xx_gtt_check_page)
+ return;
+
+ i830_write_entry(page_to_phys(intel_private.i8xx_cc_page),
+ intel_private.base.gtt_total_entries, 0);
+ readl(intel_private.gtt+intel_private.base.gtt_total_entries);
+
+ intel_private.i8xx_cache_flush_num = 0;
+
+ dev_info(&intel_private.bridge_dev->dev,
+ "i830 cache coherency checker enabled\n");
+}
+
+static void intel_i830_cc_cleanup(void)
+{
+ if (intel_private.i8xx_gtt_check_page)
+ iounmap(intel_private.i8xx_gtt_check_page);
+ intel_private.i8xx_gtt_check_page = NULL;
+
+ if(intel_private.i8xx_cpu_check_page)
+ kunmap(intel_private.i8xx_cc_page);
+ intel_private.i8xx_cpu_check_page = NULL;
+
+ if (intel_private.i8xx_cc_page)
+ __free_page(intel_private.i8xx_cc_page);
+ intel_private.i8xx_cc_page = NULL;
+}
+
+DEFINE_RATELIMIT_STATE(i8xx_chipset_flush_ratelimit_cpu, 60*HZ, 1);
+DEFINE_RATELIMIT_STATE(i8xx_chipset_flush_ratelimit_gtt, 60*HZ, 1);
+
+static void intel_i830_cc_before_flush(void)
+{
+ unsigned int offset1
+ = (intel_private.i8xx_cache_flush_num * sizeof(int)) % 4096;
+ unsigned int offset2
+ = (intel_private.i8xx_cache_flush_num * sizeof(int)
+ + 2048) % 4096;
+ unsigned int *p_cpu_write = intel_private.i8xx_cpu_check_page + offset2;
+
+ /* write check values */
+ *p_cpu_write = intel_private.i8xx_cache_flush_num;
+ mb();
+ if (cpu_has_clflush) {
+ clflush(p_cpu_write);
+ } else
+ wbinvd_on_all_cpus();
+ writel(intel_private.i8xx_cache_flush_num,
+ intel_private.i8xx_gtt_check_page + offset1);
+ mb();
+}
+
+static void intel_i830_cc_after_flush(void)
+{
+ unsigned int offset1
+ = (intel_private.i8xx_cache_flush_num * sizeof(int)) % 4096;
+ unsigned int offset2
+ = (intel_private.i8xx_cache_flush_num * sizeof(int)
+ + 2048) % 4096;
+ unsigned int *p_cpu_read = intel_private.i8xx_cpu_check_page + offset1;
+ unsigned int gtt_read, cpu_read;
+
+ if (!intel_private.i8xx_gtt_check_page)
+ return;
+
+ mb();
+ gtt_read = readl(intel_private.i8xx_gtt_check_page + offset2);
+ cpu_read = *p_cpu_read;
+
+ WARN(cpu_read != intel_private.i8xx_cache_flush_num
+ && __ratelimit(&i8xx_chipset_flush_ratelimit_cpu),
+ "i8xx chipset flush failed, expected: %u, cpu_read: %u\n",
+ intel_private.i8xx_cache_flush_num, cpu_read);
+ WARN(gtt_read != intel_private.i8xx_cache_flush_num
+ && __ratelimit(&i8xx_chipset_flush_ratelimit_gtt),
+ "i8xx chipset flush failed, expected: %u, gtt_read: %u\n",
+ intel_private.i8xx_cache_flush_num, gtt_read);
+
+ clflush(p_cpu_read);
+ mb();
+
+ intel_private.i8xx_cache_flush_num++;
+ if (intel_private.i8xx_cache_flush_num % 256 == 0)
+ printk("cache flush num %u\n", intel_private.i8xx_cache_flush_num);
+}
+#else /* I830_CACHE_COHERENY_CHECKER */
+static void intel_i830_cc_setup(void)
+{
+}
+
+static void intel_i830_cc_cleanup(void)
+{
+}
+
+static void intel_i830_cc_before_flush(void)
+{
+}
+
+static void intel_i830_cc_after_flush(void)
+{
+}
+#endif
+
static void i830_cleanup(void)
{
if (intel_private.i8xx_flush_page) {
@@ -731,6 +875,8 @@ static void i830_cleanup(void)
__free_page(intel_private.i8xx_page);
intel_private.i8xx_page = NULL;
+
+ intel_i830_cc_cleanup();
}
static void intel_i830_setup_flush(void)
@@ -780,26 +926,20 @@ static void i830_chipset_flush(void)
{
unsigned int *pg;
- if (IS_855GM)
- return i855_chipset_flush();
-
- pg = intel_private.i8xx_flush_page;
- memset(pg, 0, 1024);
- if (cpu_has_clflush)
- clflush_cache_range(pg, 1024);
- else if (wbinvd_on_all_cpus() != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
-}
-
-static void i830_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- u32 pte_flags = I810_PTE_VALID;
+ intel_i830_cc_before_flush();
- if (flags == AGP_USER_CACHED_MEMORY)
- pte_flags |= I830_PTE_SYSTEM_CACHED;
+ if (IS_855GM)
+ i855_chipset_flush();
+ else {
+ pg = intel_private.i8xx_flush_page;
+ memset(pg, 0, 1024);
+ if (cpu_has_clflush)
+ clflush_cache_range(pg, 1024);
+ else if (wbinvd_on_all_cpus() != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ }
- writel(addr | pte_flags, intel_private.gtt + entry);
+ intel_i830_cc_after_flush();
}
static bool intel_enable_gtt(void)
@@ -889,6 +1029,9 @@ static int intel_fake_agp_configure(void)
if (!intel_enable_gtt())
return -EIO;
+ if (INTEL_GTT_GEN == 2)
+ intel_i830_cc_setup();
+
agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
for (i = 0; i < intel_private.base.gtt_total_entries; i++) {