home *** CD-ROM | disk | FTP | other *** search
/ PC Welt 2006 November (DVD) / PCWELT_11_2006.ISO / casper / filesystem.squashfs / usr / src / linux-headers-2.6.17-6 / include / asm-frv / highmem.h < prev    next >
Encoding:
C/C++ Source or Header  |  2006-08-11  |  5.2 KB  |  181 lines

  1. /* highmem.h: virtual kernel memory mappings for high memory
  2.  *
  3.  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4.  * Written by David Howells (dhowells@redhat.com)
  5.  * - Derived from include/asm-i386/highmem.h
  6.  *
  7.  * See Documentation/fujitsu/frv/mmu-layout.txt for more information.
  8.  *
  9.  * This program is free software; you can redistribute it and/or
  10.  * modify it under the terms of the GNU General Public License
  11.  * as published by the Free Software Foundation; either version
  12.  * 2 of the License, or (at your option) any later version.
  13.  */
  14.  
  15. #ifndef _ASM_HIGHMEM_H
  16. #define _ASM_HIGHMEM_H
  17.  
  18. #ifdef __KERNEL__
  19.  
  20. #include <linux/init.h>
  21. #include <asm/mem-layout.h>
  22. #include <asm/spr-regs.h>
  23. #include <asm/mb-regs.h>
  24.  
  25. #define NR_TLB_LINES        64    /* number of lines in the TLB */
  26.  
  27. #ifndef __ASSEMBLY__
  28.  
  29. #include <linux/interrupt.h>
  30. #include <asm/kmap_types.h>
  31. #include <asm/pgtable.h>
  32.  
  33. #ifdef CONFIG_DEBUG_HIGHMEM
  34. #define HIGHMEM_DEBUG 1
  35. #else
  36. #define HIGHMEM_DEBUG 0
  37. #endif
  38.  
  39. /* declarations for highmem.c */
  40. extern unsigned long highstart_pfn, highend_pfn;
  41.  
  42. #define kmap_prot PAGE_KERNEL
  43. #define kmap_pte ______kmap_pte_in_TLB
  44. extern pte_t *pkmap_page_table;
  45.  
  46. #define flush_cache_kmaps()  do { } while (0)
  47.  
  48. /*
  49.  * Right now we initialize only a single pte table. It can be extended
  50.  * easily, subsequent pte tables have to be allocated in one physical
  51.  * chunk of RAM.
  52.  */
  53. #define LAST_PKMAP    PTRS_PER_PTE
  54. #define LAST_PKMAP_MASK    (LAST_PKMAP - 1)
  55. #define PKMAP_NR(virt)    ((virt - PKMAP_BASE) >> PAGE_SHIFT)
  56. #define PKMAP_ADDR(nr)    (PKMAP_BASE + ((nr) << PAGE_SHIFT))
  57.  
  58. extern void *kmap_high(struct page *page);
  59. extern void kunmap_high(struct page *page);
  60.  
  61. extern void *kmap(struct page *page);
  62. extern void kunmap(struct page *page);
  63.  
  64. extern struct page *kmap_atomic_to_page(void *ptr);
  65.  
  66. #endif /* !__ASSEMBLY__ */
  67.  
  68. /*
  69.  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  70.  * gives a more generic (and caching) interface. But kmap_atomic can
  71.  * be used in IRQ contexts, so in some (very limited) cases we need
  72.  * it.
  73.  */
  74. #define KMAP_ATOMIC_CACHE_DAMR        8
  75.  
  76. #ifndef __ASSEMBLY__
  77.  
  78. #define __kmap_atomic_primary(type, paddr, ampr)                        \
  79. ({                                                \
  80.     unsigned long damlr, dampr;                                \
  81.                                                 \
  82.     dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V;        \
  83.                                                 \
  84.     if (type != __KM_CACHE)                                    \
  85.         asm volatile("movgs %0,dampr"#ampr :: "r"(dampr));                \
  86.     else                                            \
  87.         asm volatile("movgs %0,iampr"#ampr"\n"                        \
  88.                  "movgs %0,dampr"#ampr"\n"                        \
  89.                  :: "r"(dampr)                            \
  90.                  );                                    \
  91.                                                 \
  92.     asm("movsg damlr"#ampr",%0" : "=r"(damlr));                        \
  93.                                                 \
  94.     /*printk("DAMR"#ampr": PRIM sl=%d L=%08lx P=%08lx\n", type, damlr, dampr);*/        \
  95.                                                 \
  96.     (void *) damlr;                                        \
  97. })
  98.  
  99. #define __kmap_atomic_secondary(slot, paddr)                              \
  100. ({                                                  \
  101.     unsigned long damlr = KMAP_ATOMIC_SECONDARY_FRAME + (slot) * PAGE_SIZE;              \
  102.     unsigned long dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
  103.                                                   \
  104.     asm volatile("movgs %0,tplr \n"                                  \
  105.              "movgs %1,tppr \n"                                  \
  106.              "tlbpr %0,gr0,#2,#1"                              \
  107.              : : "r"(damlr), "r"(dampr));                          \
  108.                                                   \
  109.     /*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/              \
  110.                                                   \
  111.     (void *) damlr;                                          \
  112. })
  113.  
  114. static inline void *kmap_atomic(struct page *page, enum km_type type)
  115. {
  116.     unsigned long paddr;
  117.  
  118.     preempt_disable();
  119.     paddr = page_to_phys(page);
  120.  
  121.     switch (type) {
  122.         case 0:        return __kmap_atomic_primary(0, paddr, 2);
  123.         case 1:        return __kmap_atomic_primary(1, paddr, 3);
  124.         case 2:        return __kmap_atomic_primary(2, paddr, 4);
  125.         case 3:        return __kmap_atomic_primary(3, paddr, 5);
  126.         case 4:        return __kmap_atomic_primary(4, paddr, 6);
  127.         case 5:        return __kmap_atomic_primary(5, paddr, 7);
  128.         case 6:        return __kmap_atomic_primary(6, paddr, 8);
  129.         case 7:        return __kmap_atomic_primary(7, paddr, 9);
  130.         case 8:        return __kmap_atomic_primary(8, paddr, 10);
  131.  
  132.     case 9 ... 9 + NR_TLB_LINES - 1:
  133.         return __kmap_atomic_secondary(type - 9, paddr);
  134.  
  135.     default:
  136.         BUG();
  137.         return 0;
  138.     }
  139. }
  140.  
  141. #define __kunmap_atomic_primary(type, ampr)            \
  142. do {                                \
  143.     asm volatile("movgs gr0,dampr"#ampr"\n");        \
  144.     if (type == __KM_CACHE)                    \
  145.         asm volatile("movgs gr0,iampr"#ampr"\n");    \
  146. } while(0)
  147.  
  148. #define __kunmap_atomic_secondary(slot, vaddr)            \
  149. do {                                \
  150.     asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr));    \
  151. } while(0)
  152.  
  153. static inline void kunmap_atomic(void *kvaddr, enum km_type type)
  154. {
  155.     switch (type) {
  156.         case 0:        __kunmap_atomic_primary(0, 2);    break;
  157.         case 1:        __kunmap_atomic_primary(1, 3);    break;
  158.         case 2:        __kunmap_atomic_primary(2, 4);    break;
  159.         case 3:        __kunmap_atomic_primary(3, 5);    break;
  160.         case 4:        __kunmap_atomic_primary(4, 6);    break;
  161.         case 5:        __kunmap_atomic_primary(5, 7);    break;
  162.         case 6:        __kunmap_atomic_primary(6, 8);    break;
  163.         case 7:        __kunmap_atomic_primary(7, 9);    break;
  164.         case 8:        __kunmap_atomic_primary(8, 10);    break;
  165.  
  166.     case 9 ... 9 + NR_TLB_LINES - 1:
  167.         __kunmap_atomic_secondary(type - 9, kvaddr);
  168.         break;
  169.  
  170.     default:
  171.         BUG();
  172.     }
  173.     preempt_enable();
  174. }
  175.  
  176. #endif /* !__ASSEMBLY__ */
  177.  
  178. #endif /* __KERNEL__ */
  179.  
  180. #endif /* _ASM_HIGHMEM_H */
  181.