1 /**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19 * develop this driver.
21 **************************************************************************/
27 * Code for the SGX MMU:
31 * clflush on one processor only:
32 * clflush should apparently flush the cache line on all processors in an
38 * The usage of the slots must be completely encapsulated within a spinlock, and
39 * no other functions that may be using the locks for other purposed may be
40 * called from within the locked region.
41 * Since the slots are per processor, this will guarantee that we are the only
46 * TODO: Inserting ptes from an interrupt handler:
47 * This may be desirable for some SGX functionality where the GPU can fault in
48 * needed pages. For that, we need to make an atomic insert_pages function, that
50 * If it fails, the caller need to insert the page using a workqueue function,
51 * but on average it should be fast.
54 struct psb_mmu_driver {
55 /* protects driver- and pd structures. Always take in read mode
56 * before taking the page table spinlock.
58 struct rw_semaphore sem;
60 /* protects page tables, directory tables and pt tables.
65 atomic_t needs_tlbflush;
66 atomic_t *msvdx_mmu_invaldc;
67 uint8_t __iomem *register_map;
68 struct psb_mmu_pd *default_pd;
72 unsigned long clflush_mask;
78 struct psb_mmu_pd *pd;
86 struct psb_mmu_driver *driver;
88 struct psb_mmu_pt **tables;
90 struct page *dummy_pt;
91 struct page *dummy_page;
97 static inline uint32_t psb_mmu_pt_index(uint32_t offset)
99 return (offset >> PSB_PTE_SHIFT) & 0x3FF;
101 static inline uint32_t psb_mmu_pd_index(uint32_t offset)
103 return (offset >> PSB_PDE_SHIFT);
106 #if defined(CONFIG_X86)
107 static inline void psb_clflush(void *addr)
109 __asm__ __volatile__("clflush (%0)\n"::"r"(addr):"memory");
112 static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
114 if (!driver->has_clflush)
123 static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
129 static inline void psb_iowrite32(const struct psb_mmu_driver *d,
130 uint32_t val, uint32_t offset)
132 iowrite32(val, d->register_map + offset);
135 static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
138 return ioread32(d->register_map + offset);
141 static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
143 if (atomic_read(&driver->needs_tlbflush) || force) {
144 uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
145 psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
148 psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
150 (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
151 if (driver->msvdx_mmu_invaldc)
152 atomic_set(driver->msvdx_mmu_invaldc, 1);
154 atomic_set(&driver->needs_tlbflush, 0);
157 static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
159 down_write(&driver->sem);
160 psb_mmu_flush_pd_locked(driver, force);
161 up_write(&driver->sem);
164 void psb_mmu_flush(struct psb_mmu_driver *driver)
168 down_write(&driver->sem);
169 val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
170 if (atomic_read(&driver->needs_tlbflush))
171 psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
174 psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
177 psb_iowrite32(driver,
178 val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
180 (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
181 atomic_set(&driver->needs_tlbflush, 0);
182 if (driver->msvdx_mmu_invaldc)
183 atomic_set(driver->msvdx_mmu_invaldc, 1);
184 up_write(&driver->sem);
187 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
189 uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
190 PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
192 drm_ttm_cache_flush();
193 down_write(&pd->driver->sem);
194 psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT), offset);
196 psb_mmu_flush_pd_locked(pd->driver, 1);
197 pd->hw_context = hw_context;
198 up_write(&pd->driver->sem);
202 static inline unsigned long psb_pd_addr_end(unsigned long addr,
206 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
207 return (addr < end) ? addr : end;
210 static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
212 uint32_t mask = PSB_PTE_VALID;
214 if (type & PSB_MMU_CACHED_MEMORY)
215 mask |= PSB_PTE_CACHED;
216 if (type & PSB_MMU_RO_MEMORY)
218 if (type & PSB_MMU_WO_MEMORY)
221 return (pfn << PAGE_SHIFT) | mask;
224 struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
225 int trap_pagefaults, int invalid_type)
227 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
234 pd->p = alloc_page(GFP_DMA32);
237 pd->dummy_pt = alloc_page(GFP_DMA32);
240 pd->dummy_page = alloc_page(GFP_DMA32);
244 if (!trap_pagefaults) {
245 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
247 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
254 v = kmap(pd->dummy_pt);
255 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
256 v[i] = pd->invalid_pte;
258 kunmap(pd->dummy_pt);
261 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
262 v[i] = pd->invalid_pde;
266 clear_page(kmap(pd->dummy_page));
267 kunmap(pd->dummy_page);
269 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
274 pd->pd_mask = PSB_PTE_VALID;
280 __free_page(pd->dummy_page);
282 __free_page(pd->dummy_pt);
290 void psb_mmu_free_pt(struct psb_mmu_pt *pt)
296 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
298 struct psb_mmu_driver *driver = pd->driver;
299 struct psb_mmu_pt *pt;
302 down_write(&driver->sem);
303 if (pd->hw_context != -1) {
304 psb_iowrite32(driver, 0,
305 PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
306 psb_mmu_flush_pd_locked(driver, 1);
309 /* Should take the spinlock here, but we don't need to do that
310 since we have the semaphore in write mode. */
312 for (i = 0; i < 1024; ++i) {
319 __free_page(pd->dummy_page);
320 __free_page(pd->dummy_pt);
323 up_write(&driver->sem);
326 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
328 struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
330 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
331 uint32_t clflush_count = PAGE_SIZE / clflush_add;
332 spinlock_t *lock = &pd->driver->lock;
340 pt->p = alloc_page(GFP_DMA32);
348 v = kmap_atomic(pt->p, KM_USER0);
350 ptes = (uint32_t *) v;
351 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
352 *ptes++ = pd->invalid_pte;
355 #if defined(CONFIG_X86)
356 if (pd->driver->has_clflush && pd->hw_context != -1) {
358 for (i = 0; i < clflush_count; ++i) {
365 kunmap_atomic(v, KM_USER0);
375 struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
378 uint32_t index = psb_mmu_pd_index(addr);
379 struct psb_mmu_pt *pt;
380 volatile uint32_t *v;
381 spinlock_t *lock = &pd->driver->lock;
384 pt = pd->tables[index];
387 pt = psb_mmu_alloc_pt(pd);
392 if (pd->tables[index]) {
396 pt = pd->tables[index];
400 v = kmap_atomic(pd->p, KM_USER0);
401 pd->tables[index] = pt;
402 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
404 kunmap_atomic((void *)v, KM_USER0);
406 if (pd->hw_context != -1) {
407 psb_mmu_clflush(pd->driver, (void *)&v[index]);
408 atomic_set(&pd->driver->needs_tlbflush, 1);
411 pt->v = kmap_atomic(pt->p, KM_USER0);
415 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
418 uint32_t index = psb_mmu_pd_index(addr);
419 struct psb_mmu_pt *pt;
420 spinlock_t *lock = &pd->driver->lock;
423 pt = pd->tables[index];
428 pt->v = kmap_atomic(pt->p, KM_USER0);
432 static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
434 struct psb_mmu_pd *pd = pt->pd;
435 volatile uint32_t *v;
437 kunmap_atomic(pt->v, KM_USER0);
438 if (pt->count == 0) {
439 v = kmap_atomic(pd->p, KM_USER0);
440 v[pt->index] = pd->invalid_pde;
441 pd->tables[pt->index] = NULL;
443 if (pd->hw_context != -1) {
444 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
445 atomic_set(&pd->driver->needs_tlbflush, 1);
447 kunmap_atomic(pt->v, KM_USER0);
448 spin_unlock(&pd->driver->lock);
452 spin_unlock(&pd->driver->lock);
455 static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
458 pt->v[psb_mmu_pt_index(addr)] = pte;
461 static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
464 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
468 static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
474 v = kmap_atomic(pd->p, KM_USER0);
476 printk(KERN_INFO "Could not kmap pde page.\n");
479 pfn = v[psb_mmu_pd_index(mmu_offset)];
480 // printk(KERN_INFO "pde is 0x%08x\n",pfn);
481 kunmap_atomic(v, KM_USER0);
482 if (((pfn & 0x0F) != PSB_PTE_VALID)) {
483 printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
486 v = ioremap(pfn & 0xFFFFF000, 4096);
488 printk(KERN_INFO "Could not kmap pte page.\n");
491 pfn = v[psb_mmu_pt_index(mmu_offset)];
492 // printk(KERN_INFO "pte is 0x%08x\n",pfn);
494 if (((pfn & 0x0F) != PSB_PTE_VALID)) {
495 printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
498 return pfn >> PAGE_SHIFT;
501 static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
502 uint32_t mmu_offset, uint32_t gtt_pages)
507 printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
508 mmu_offset, gtt_pages);
509 down_read(&pd->driver->sem);
510 start = psb_mmu_check_pte_locked(pd, mmu_offset);
511 mmu_offset += PAGE_SIZE;
513 while (gtt_pages--) {
514 next = psb_mmu_check_pte_locked(pd, mmu_offset);
515 if (next != start + 1) {
516 printk(KERN_INFO "Ptes out of order: 0x%08x, 0x%08x.\n",
520 mmu_offset += PAGE_SIZE;
522 up_read(&pd->driver->sem);
527 void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
528 uint32_t mmu_offset, uint32_t gtt_start,
532 uint32_t start = psb_mmu_pd_index(mmu_offset);
533 struct psb_mmu_driver *driver = pd->driver;
535 down_read(&driver->sem);
536 spin_lock(&driver->lock);
538 v = kmap_atomic(pd->p, KM_USER0);
541 while (gtt_pages--) {
542 *v++ = gtt_start | pd->pd_mask;
543 gtt_start += PAGE_SIZE;
546 drm_ttm_cache_flush();
547 kunmap_atomic(v, KM_USER0);
548 spin_unlock(&driver->lock);
550 if (pd->hw_context != -1)
551 atomic_set(&pd->driver->needs_tlbflush, 1);
553 up_read(&pd->driver->sem);
554 psb_mmu_flush_pd(pd->driver, 0);
557 struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
559 struct psb_mmu_pd *pd;
561 down_read(&driver->sem);
562 pd = driver->default_pd;
563 up_read(&driver->sem);
568 /* Returns the physical address of the PD shared by sgx/msvdx */
569 uint32_t psb_get_default_pd_addr(struct psb_mmu_driver * driver)
571 struct psb_mmu_pd *pd;
573 pd = psb_mmu_get_default_pd(driver);
574 return ((page_to_pfn(pd->p) << PAGE_SHIFT));
577 void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
579 psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
580 psb_mmu_free_pagedir(driver->default_pd);
584 struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
587 atomic_t *msvdx_mmu_invaldc)
589 struct psb_mmu_driver *driver;
591 driver = (struct psb_mmu_driver *)kmalloc(sizeof(*driver), GFP_KERNEL);
596 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
598 if (!driver->default_pd)
601 spin_lock_init(&driver->lock);
602 init_rwsem(&driver->sem);
603 down_write(&driver->sem);
604 driver->register_map = registers;
605 atomic_set(&driver->needs_tlbflush, 1);
606 driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
608 driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
609 psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
611 psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
614 driver->has_clflush = 0;
616 #if defined(CONFIG_X86)
617 if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
618 uint32_t tfms, misc, cap0, cap4, clflush_size;
621 * clflush size is determined at kernel setup for x86_64 but not for
622 * i386. We have to do it here.
625 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
626 clflush_size = ((misc >> 8) & 0xff) * 8;
627 driver->has_clflush = 1;
628 driver->clflush_add =
629 PAGE_SIZE * clflush_size / sizeof(uint32_t);
630 driver->clflush_mask = driver->clflush_add - 1;
631 driver->clflush_mask = ~driver->clflush_mask;
635 up_write(&driver->sem);
643 #if defined(CONFIG_X86)
644 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
645 uint32_t num_pages, uint32_t desired_tile_stride,
646 uint32_t hw_tile_stride)
648 struct psb_mmu_pt *pt;
655 unsigned long row_add;
656 unsigned long clflush_add = pd->driver->clflush_add;
657 unsigned long clflush_mask = pd->driver->clflush_mask;
659 if (!pd->driver->has_clflush) {
660 drm_ttm_cache_flush();
665 rows = num_pages / desired_tile_stride;
667 desired_tile_stride = num_pages;
669 add = desired_tile_stride << PAGE_SHIFT;
670 row_add = hw_tile_stride << PAGE_SHIFT;
672 for (i = 0; i < rows; ++i) {
678 next = psb_pd_addr_end(addr, end);
679 pt = psb_mmu_pt_map_lock(pd, addr);
683 psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
684 } while (addr += clflush_add,
685 (addr & clflush_mask) < next);
687 psb_mmu_pt_unmap_unlock(pt);
688 } while (addr = next, next != end);
694 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
695 uint32_t num_pages, uint32_t desired_tile_stride,
696 uint32_t hw_tile_stride)
698 drm_ttm_cache_flush();
702 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
703 unsigned long address, uint32_t num_pages)
705 struct psb_mmu_pt *pt;
709 unsigned long f_address = address;
711 down_read(&pd->driver->sem);
714 end = addr + (num_pages << PAGE_SHIFT);
717 next = psb_pd_addr_end(addr, end);
718 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
722 psb_mmu_invalidate_pte(pt, addr);
724 } while (addr += PAGE_SIZE, addr < next);
725 psb_mmu_pt_unmap_unlock(pt);
727 } while (addr = next, next != end);
730 if (pd->hw_context != -1)
731 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
733 up_read(&pd->driver->sem);
735 if (pd->hw_context != -1)
736 psb_mmu_flush(pd->driver);
741 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
742 uint32_t num_pages, uint32_t desired_tile_stride,
743 uint32_t hw_tile_stride)
745 struct psb_mmu_pt *pt;
752 unsigned long row_add;
753 unsigned long f_address = address;
756 rows = num_pages / desired_tile_stride;
758 desired_tile_stride = num_pages;
760 add = desired_tile_stride << PAGE_SHIFT;
761 row_add = hw_tile_stride << PAGE_SHIFT;
763 down_read(&pd->driver->sem);
765 /* Make sure we only need to flush this processor's cache */
767 for (i = 0; i < rows; ++i) {
773 next = psb_pd_addr_end(addr, end);
774 pt = psb_mmu_pt_map_lock(pd, addr);
778 psb_mmu_invalidate_pte(pt, addr);
781 } while (addr += PAGE_SIZE, addr < next);
782 psb_mmu_pt_unmap_unlock(pt);
784 } while (addr = next, next != end);
787 if (pd->hw_context != -1)
788 psb_mmu_flush_ptes(pd, f_address, num_pages,
789 desired_tile_stride, hw_tile_stride);
791 up_read(&pd->driver->sem);
793 if (pd->hw_context != -1)
794 psb_mmu_flush(pd->driver);
797 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
798 unsigned long address, uint32_t num_pages,
801 struct psb_mmu_pt *pt;
806 unsigned long f_address = address;
809 down_read(&pd->driver->sem);
812 end = addr + (num_pages << PAGE_SHIFT);
815 next = psb_pd_addr_end(addr, end);
816 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
822 pte = psb_mmu_mask_pte(start_pfn++, type);
823 psb_mmu_set_pte(pt, addr, pte);
825 } while (addr += PAGE_SIZE, addr < next);
826 psb_mmu_pt_unmap_unlock(pt);
828 } while (addr = next, next != end);
832 if (pd->hw_context != -1)
833 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
835 up_read(&pd->driver->sem);
837 if (pd->hw_context != -1)
838 psb_mmu_flush(pd->driver);
843 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
844 unsigned long address, uint32_t num_pages,
845 uint32_t desired_tile_stride, uint32_t hw_tile_stride,
848 struct psb_mmu_pt *pt;
856 unsigned long row_add;
857 unsigned long f_address = address;
860 if (hw_tile_stride) {
861 if (num_pages % desired_tile_stride != 0)
863 rows = num_pages / desired_tile_stride;
865 desired_tile_stride = num_pages;
868 add = desired_tile_stride << PAGE_SHIFT;
869 row_add = hw_tile_stride << PAGE_SHIFT;
871 down_read(&pd->driver->sem);
873 for (i = 0; i < rows; ++i) {
879 next = psb_pd_addr_end(addr, end);
880 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
884 pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
886 psb_mmu_set_pte(pt, addr, pte);
888 } while (addr += PAGE_SIZE, addr < next);
889 psb_mmu_pt_unmap_unlock(pt);
891 } while (addr = next, next != end);
897 if (pd->hw_context != -1)
898 psb_mmu_flush_ptes(pd, f_address, num_pages,
899 desired_tile_stride, hw_tile_stride);
901 up_read(&pd->driver->sem);
903 if (pd->hw_context != -1)
904 psb_mmu_flush(pd->driver);
909 void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
911 mask &= _PSB_MMU_ER_MASK;
912 psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
914 (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
917 void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
919 mask &= _PSB_MMU_ER_MASK;
920 psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
922 (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
925 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
929 struct psb_mmu_pt *pt;
931 spinlock_t *lock = &pd->driver->lock;
933 down_read(&pd->driver->sem);
934 pt = psb_mmu_pt_map_lock(pd, virtual);
939 v = kmap_atomic(pd->p, KM_USER0);
940 tmp = v[psb_mmu_pd_index(virtual)];
941 kunmap_atomic(v, KM_USER0);
944 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
945 !(pd->invalid_pte & PSB_PTE_VALID)) {
950 *pfn = pd->invalid_pte >> PAGE_SHIFT;
953 tmp = pt->v[psb_mmu_pt_index(virtual)];
954 if (!(tmp & PSB_PTE_VALID)) {
958 *pfn = tmp >> PAGE_SHIFT;
960 psb_mmu_pt_unmap_unlock(pt);
962 up_read(&pd->driver->sem);
966 void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
971 struct psb_mmu_pd *pd;
975 pd = driver->default_pd;
977 printk(KERN_WARNING "Could not get default pd\n");
980 p = alloc_page(GFP_DMA32);
983 printk(KERN_WARNING "Failed allocating page\n");
988 memset(v, 0x67, PAGE_SIZE);
990 pfn = (offset >> PAGE_SHIFT);
992 ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
994 printk(KERN_WARNING "Failed inserting mmu page\n");
998 /* Ioremap the page through the GART aperture */
1000 vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
1002 printk(KERN_WARNING "Failed ioremapping page\n");
1006 /* Read from the page with mmu disabled. */
1007 printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
1009 /* Enable the mmu for host accesses and read again. */
1010 psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
1012 printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
1015 printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
1017 iowrite32(0x16243355, vmmu);
1018 (void)ioread32(vmmu);
1019 printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
1021 printk(KERN_INFO "Int stat is 0x%08x\n",
1022 psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
1023 printk(KERN_INFO "Fault is 0x%08x\n",
1024 psb_ioread32(driver, PSB_CR_BIF_FAULT));
1026 /* Disable MMU for host accesses and clear page fault register */
1027 psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
1030 psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);