home *** CD-ROM | disk | FTP | other *** search
- #ifndef __ASM_SYSTEM_H
- #define __ASM_SYSTEM_H
-
- #include <linux/kernel.h>
- #include <asm/segment.h>
- #include <asm/cpufeature.h>
- #include <linux/bitops.h>
-
- #ifdef __KERNEL__
-
- struct task_struct; /* one of the stranger aspects of C forward declarations.. */
- extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
-
- #define switch_to(prev,next,last) do { \
- unsigned long esi,edi; \
- asm volatile("pushl %%ebp\n\t" \
- "movl %%esp,%0\n\t" /* save ESP */ \
- "movl %5,%%esp\n\t" /* restore ESP */ \
- "movl $1f,%1\n\t" /* save EIP */ \
- "pushl %6\n\t" /* restore EIP */ \
- "jmp __switch_to\n" \
- "1:\t" \
- "popl %%ebp\n\t" \
- :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
- "=a" (last),"=S" (esi),"=D" (edi) \
- :"m" (next->thread.esp),"m" (next->thread.eip), \
- "2" (prev), "d" (next)); \
- } while (0)
-
- #define _set_base(addr,base) do { unsigned long __pr; \
- __asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %%dl,%2\n\t" \
- "movb %%dh,%3" \
- :"=&d" (__pr) \
- :"m" (*((addr)+2)), \
- "m" (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while(0)
-
- #define _set_limit(addr,limit) do { unsigned long __lr; \
- __asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while(0)
-
- #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
- #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
-
- /*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
- #define loadsegment(seg,value) \
- asm volatile("\n" \
- "1:\t" \
- "mov %0,%%" #seg "\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:\t" \
- "pushl $0\n\t" \
- "popl %%" #seg "\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,3b\n" \
- ".previous" \
- : :"rm" (value))
-
- /*
- * Save a segment register away
- */
- #define savesegment(seg, value) \
- asm volatile("mov %%" #seg ",%0":"=rm" (value))
-
- /*
- * Clear and set 'TS' bit respectively
- */
- #define clts() __asm__ __volatile__ ("clts")
- #define read_cr0() ({ \
- unsigned int __dummy; \
- __asm__ __volatile__( \
- "movl %%cr0,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
- })
- #define write_cr0(x) \
- __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
-
- #define read_cr2() ({ \
- unsigned int __dummy; \
- __asm__ __volatile__( \
- "movl %%cr2,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
- })
- #define write_cr2(x) \
- __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
-
- #define read_cr3() ({ \
- unsigned int __dummy; \
- __asm__ ( \
- "movl %%cr3,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
- })
- #define write_cr3(x) \
- __asm__ __volatile__("movl %0,%%cr3": :"r" (x));
-
- #define read_cr4() ({ \
- unsigned int __dummy; \
- __asm__( \
- "movl %%cr4,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
- })
-
- #define read_cr4_safe() ({ \
- unsigned int __dummy; \
- /* This could fault if %cr4 does not exist */ \
- __asm__("1: movl %%cr4, %0 \n" \
- "2: \n" \
- ".section __ex_table,\"a\" \n" \
- ".long 1b,2b \n" \
- ".previous \n" \
- : "=r" (__dummy): "0" (0)); \
- __dummy; \
- })
-
- #define write_cr4(x) \
- __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
- #define stts() write_cr0(8 | read_cr0())
-
- #endif /* __KERNEL__ */
-
- #define wbinvd() \
- __asm__ __volatile__ ("wbinvd": : :"memory");
-
- static inline unsigned long get_limit(unsigned long segment)
- {
- unsigned long __limit;
- __asm__("lsll %1,%0"
- :"=r" (__limit):"r" (segment));
- return __limit+1;
- }
-
- #define nop() __asm__ __volatile__ ("nop")
-
- #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-
- #define tas(ptr) (xchg((ptr),1))
-
- struct __xchg_dummy { unsigned long a[100]; };
- #define __xg(x) ((struct __xchg_dummy *)(x))
-
-
- #ifdef CONFIG_X86_CMPXCHG64
-
- /*
- * The semantics of XCHGCMP8B are a bit strange, this is why
- * there is a loop and the loading of %%eax and %%edx has to
- * be inside. This inlines well in most cases, the cached
- * cost is around ~38 cycles. (in the future we might want
- * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
- * might have an implicit FPU-save as a cost, so it's not
- * clear which path to go.)
- *
- * cmpxchg8b must be used with the lock prefix here to allow
- * the instruction to be executed atomically, see page 3-102
- * of the instruction set reference 24319102.pdf. We need
- * the reader side to see the coherent 64bit value.
- */
- static inline void __set_64bit (unsigned long long * ptr,
- unsigned int low, unsigned int high)
- {
- __asm__ __volatile__ (
- "\n1:\t"
- "movl (%0), %%eax\n\t"
- "movl 4(%0), %%edx\n\t"
- "lock cmpxchg8b (%0)\n\t"
- "jnz 1b"
- : /* no outputs */
- : "D"(ptr),
- "b"(low),
- "c"(high)
- : "ax","dx","memory");
- }
-
- static inline void __set_64bit_constant (unsigned long long *ptr,
- unsigned long long value)
- {
- __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
- }
- #define ll_low(x) *(((unsigned int*)&(x))+0)
- #define ll_high(x) *(((unsigned int*)&(x))+1)
-
- static inline void __set_64bit_var (unsigned long long *ptr,
- unsigned long long value)
- {
- __set_64bit(ptr,ll_low(value), ll_high(value));
- }
-
- #define set_64bit(ptr,value) \
- (__builtin_constant_p(value) ? \
- __set_64bit_constant(ptr, value) : \
- __set_64bit_var(ptr, value) )
-
- #define _set_64bit(ptr,value) \
- (__builtin_constant_p(value) ? \
- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
- __set_64bit(ptr, ll_low(value), ll_high(value)) )
-
- #endif
-
- /*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- * but generally the primitive is invalid, *ptr is output argument. --ANK
- */
- static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
- {
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
- }
-
- /*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
- #ifdef CONFIG_X86_CMPXCHG
- #define __HAVE_ARCH_CMPXCHG 1
- #define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
- #endif
-
- static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
- {
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
- }
-
- #ifndef CONFIG_X86_CMPXCHG
- /*
- * Building a kernel capable running on 80386. It may be necessary to
- * simulate the cmpxchg on the 80386 CPU. For that purpose we define
- * a function for each of the sizes we support.
- */
-
- extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
- extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
- extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
-
- static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
- {
- switch (size) {
- case 1:
- return cmpxchg_386_u8(ptr, old, new);
- case 2:
- return cmpxchg_386_u16(ptr, old, new);
- case 4:
- return cmpxchg_386_u32(ptr, old, new);
- }
- return old;
- }
-
- #define cmpxchg(ptr,o,n) \
- ({ \
- __typeof__(*(ptr)) __ret; \
- if (likely(boot_cpu_data.x86 > 3)) \
- __ret = __cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
- else \
- __ret = cmpxchg_386((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
- __ret; \
- })
- #endif
-
- #ifdef CONFIG_X86_CMPXCHG64
-
- static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
- unsigned long long new)
- {
- unsigned long long prev;
- __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
- : "=A"(prev)
- : "b"((unsigned long)new),
- "c"((unsigned long)(new >> 32)),
- "m"(*__xg(ptr)),
- "0"(old)
- : "memory");
- return prev;
- }
-
- #define cmpxchg64(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
- (unsigned long long)(n)))
-
- #endif
-
- /*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-
-
- /*
- * Actually only lfence would be needed for mb() because all stores done
- * by the kernel should be already ordered. But keep a full barrier for now.
- */
-
- #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
- #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-
- /**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like thiswhere there are no data dependencies.
- **/
-
- #define read_barrier_depends() do { } while(0)
-
- #ifdef CONFIG_X86_OOSTORE
- /* Actually there are no OOO store capable CPUs for now that do SSE,
- but make it already an possibility. */
- #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
- #else
- #define wmb() __asm__ __volatile__ ("": : :"memory")
- #endif
-
- #ifdef CONFIG_SMP
- #define smp_mb() mb()
- #define smp_rmb() rmb()
- #define smp_wmb() wmb()
- #define smp_read_barrier_depends() read_barrier_depends()
- #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
- #else
- #define smp_mb() barrier()
- #define smp_rmb() barrier()
- #define smp_wmb() barrier()
- #define smp_read_barrier_depends() do { } while(0)
- #define set_mb(var, value) do { var = value; barrier(); } while (0)
- #endif
-
- #define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
- /* interrupt control.. */
- #define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
- #define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
- #define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
- #define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
- /* used in the idle loop; sti takes one instruction cycle to complete */
- #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
- /* used when interrupts are already enabled or to shutdown the processor */
- #define halt() __asm__ __volatile__("hlt": : :"memory")
-
- #define irqs_disabled() \
- ({ \
- unsigned long flags; \
- local_save_flags(flags); \
- !(flags & (1<<9)); \
- })
-
- /* For spinlocks etc */
- #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
-
- /*
- * disable hlt during certain critical i/o operations
- */
- #define HAVE_DISABLE_HLT
- void disable_hlt(void);
- void enable_hlt(void);
-
- extern int es7000_plat;
- void cpu_idle_wait(void);
-
- /*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible:
- */
- static inline void sched_cacheflush(void)
- {
- wbinvd();
- }
-
- extern unsigned long arch_align_stack(unsigned long sp);
- extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
-
- void default_idle(void);
-
- #endif
-