| #ifndef I386_BITS_CPU_H |
| #define I386_BITS_CPU_H |
| |
| |
| /* Sample usage: CPU_FEATURE_P(cpu.x86_capability, FPU) */ |
| #define CPU_FEATURE_P(CAP, FEATURE) \ |
| (!!(CAP[(X86_FEATURE_##FEATURE)/32] & ((X86_FEATURE_##FEATURE) & 0x1f))) |
| |
| #define NCAPINTS 4 /* Currently we have 4 32-bit words worth of info */ |
| |
| /* Intel-defined CPU features, CPUID level 0x00000001, word 0 */ |
| #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ |
| #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ |
| #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ |
| #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ |
| #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ |
| #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ |
| #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ |
| #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ |
| #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ |
| #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ |
| #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ |
| #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ |
| #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ |
| #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ |
| #define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ |
| #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ |
| #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ |
| #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ |
| #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ |
| #define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */ |
| #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ |
| #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
| #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ |
| /* of FPU context), and CR4.OSFXSR available */ |
| #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ |
| #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ |
| #define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ |
| #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ |
| #define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ |
| #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ |
| |
| /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ |
| /* Don't duplicate feature flags which are redundant with Intel! */ |
| #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ |
| #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ |
| #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ |
| #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ |
| #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ |
| |
| /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ |
| #define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ |
| #define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ |
| #define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ |
| |
| /* Other features, Linux-defined mapping, word 3 */ |
| /* This range is used for feature bits which conflict or are synthesized */ |
| #define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ |
| #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ |
| #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ |
| #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ |
| |
| #define MAX_X86_VENDOR_ID 16 |
| struct cpuinfo_x86 { |
| uint8_t x86; /* CPU family */ |
| uint8_t x86_model; |
| uint8_t x86_mask; |
| |
| int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ |
| unsigned x86_capability[NCAPINTS]; |
| char x86_vendor_id[MAX_X86_VENDOR_ID]; |
| }; |
| |
| |
| #define X86_VENDOR_INTEL 0 |
| #define X86_VENDOR_CYRIX 1 |
| #define X86_VENDOR_AMD 2 |
| #define X86_VENDOR_UMC 3 |
| #define X86_VENDOR_NEXGEN 4 |
| #define X86_VENDOR_CENTAUR 5 |
| #define X86_VENDOR_RISE 6 |
| #define X86_VENDOR_TRANSMETA 7 |
| #define X86_VENDOR_NSC 8 |
| #define X86_VENDOR_UNKNOWN 0xff |
| |
| /* |
| * EFLAGS bits |
| */ |
| #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ |
| #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ |
| #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ |
| #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ |
| #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ |
| #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ |
| #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ |
| #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ |
| #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ |
| #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ |
| #define X86_EFLAGS_NT 0x00004000 /* Nested Task */ |
| #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ |
| #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ |
| #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ |
| #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ |
| #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ |
| #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ |
| |
| /* |
| * Generic CPUID function |
| */ |
| static inline void cpuid(int op, |
| unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) |
| { |
| __asm__("cpuid" |
| : "=a" (*eax), |
| "=b" (*ebx), |
| "=c" (*ecx), |
| "=d" (*edx) |
| : "0" (op)); |
| } |
| |
| /* |
| * CPUID functions returning a single datum |
| */ |
| static inline unsigned int cpuid_eax(unsigned int op) |
| { |
| unsigned int eax; |
| |
| __asm__("cpuid" |
| : "=a" (eax) |
| : "0" (op) |
| : "bx", "cx", "dx"); |
| return eax; |
| } |
| static inline unsigned int cpuid_ebx(unsigned int op) |
| { |
| unsigned int eax, ebx; |
| |
| __asm__("cpuid" |
| : "=a" (eax), "=b" (ebx) |
| : "0" (op) |
| : "cx", "dx" ); |
| return ebx; |
| } |
| static inline unsigned int cpuid_ecx(unsigned int op) |
| { |
| unsigned int eax, ecx; |
| |
| __asm__("cpuid" |
| : "=a" (eax), "=c" (ecx) |
| : "0" (op) |
| : "bx", "dx" ); |
| return ecx; |
| } |
| static inline unsigned int cpuid_edx(unsigned int op) |
| { |
| unsigned int eax, edx; |
| |
| __asm__("cpuid" |
| : "=a" (eax), "=d" (edx) |
| : "0" (op) |
| : "bx", "cx"); |
| return edx; |
| } |
| |
| /* |
| * Intel CPU features in CR4 |
| */ |
| #define X86_CR4_VME 0x0001 /* enable vm86 extensions */ |
| #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ |
| #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ |
| #define X86_CR4_DE 0x0008 /* enable debugging extensions */ |
| #define X86_CR4_PSE 0x0010 /* enable page size extensions */ |
| #define X86_CR4_PAE 0x0020 /* enable physical address extensions */ |
| #define X86_CR4_MCE 0x0040 /* Machine check enable */ |
| #define X86_CR4_PGE 0x0080 /* enable global pages */ |
| #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ |
| #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ |
| #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ |
| |
| |
| #define MSR_K6_EFER 0xC0000080 |
| /* EFER bits: */ |
| #define _EFER_SCE 0 /* SYSCALL/SYSRET */ |
| #define _EFER_LME 8 /* Long mode enable */ |
| #define _EFER_LMA 10 /* Long mode active (read-only) */ |
| #define _EFER_NX 11 /* No execute enable */ |
| |
| #define EFER_SCE (1<<_EFER_SCE) |
| #define EFER_LME (1<<EFER_LME) |
| #define EFER_LMA (1<<EFER_LMA) |
| #define EFER_NX (1<<_EFER_NX) |
| |
| #define rdmsr(msr,val1,val2) \ |
| __asm__ __volatile__("rdmsr" \ |
| : "=a" (val1), "=d" (val2) \ |
| : "c" (msr)) |
| |
| #define wrmsr(msr,val1,val2) \ |
| __asm__ __volatile__("wrmsr" \ |
| : /* no outputs */ \ |
| : "c" (msr), "a" (val1), "d" (val2)) |
| |
| |
| #define read_cr0() ({ \ |
| unsigned int __dummy; \ |
| __asm__( \ |
| "movl %%cr0, %0\n\t" \ |
| :"=r" (__dummy)); \ |
| __dummy; \ |
| }) |
| #define write_cr0(x) \ |
| __asm__("movl %0,%%cr0": :"r" (x)); |
| |
| #define read_cr3() ({ \ |
| unsigned int __dummy; \ |
| __asm__( \ |
| "movl %%cr3, %0\n\t" \ |
| :"=r" (__dummy)); \ |
| __dummy; \ |
| }) |
| #define write_cr3x(x) \ |
| __asm__("movl %0,%%cr3": :"r" (x)); |
| |
| |
| #define read_cr4() ({ \ |
| unsigned int __dummy; \ |
| __asm__( \ |
| "movl %%cr4, %0\n\t" \ |
| :"=r" (__dummy)); \ |
| __dummy; \ |
| }) |
| #define write_cr4x(x) \ |
| __asm__("movl %0,%%cr4": :"r" (x)); |
| |
| |
| extern struct cpuinfo_x86 cpu_info; |
| #ifdef CONFIG_X86_64 |
| extern void cpu_setup(void); |
| #else |
| #define cpu_setup() do {} while(0) |
| #endif |
| |
| #endif /* I386_BITS_CPU_H */ |