| #include <linux/init.h> |
| #include <asm/processor.h> |
| |
| #define LVL_1_INST 1 |
| #define LVL_1_DATA 2 |
| #define LVL_2 3 |
| #define LVL_3 4 |
| #define LVL_TRACE 5 |
| |
| struct _cache_table |
| { |
| unsigned char descriptor; |
| char cache_type; |
| short size; |
| }; |
| |
| /* all the cache descriptor types we care about (no TLB or trace cache entries) */ |
| static struct _cache_table cache_table[] __initdata = |
| { |
| { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
| { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ |
| { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ |
| { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ |
| { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
| { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
| { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
| { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
| { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ |
| { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ |
| { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
| { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ |
| { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
| { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ |
| { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ |
| { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ |
| { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */ |
| { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */ |
| { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
| { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
| { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
| { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
| { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ |
| { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ |
| { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ |
| { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */ |
| { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
| { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
| { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
| { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
| { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */ |
| { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ |
| { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ |
| { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ |
| { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */ |
| { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ |
| { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ |
| { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ |
| { 0x00, 0, 0} |
| }; |
| |
| unsigned int __init init_intel_cacheinfo(struct cpuinfo_x86 *c) |
| { |
| unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ |
| |
| if (c->cpuid_level > 1) { |
| /* supports eax=2 call */ |
| int i, j, n; |
| int regs[4]; |
| unsigned char *dp = (unsigned char *)regs; |
| |
| /* Number of times to iterate */ |
| n = cpuid_eax(2) & 0xFF; |
| |
| for ( i = 0 ; i < n ; i++ ) { |
| cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); |
| |
| /* If bit 31 is set, this is an unknown format */ |
| for ( j = 0 ; j < 3 ; j++ ) { |
| if ( regs[j] < 0 ) regs[j] = 0; |
| } |
| |
| /* Byte 0 is level count, not a descriptor */ |
| for ( j = 1 ; j < 16 ; j++ ) { |
| unsigned char des = dp[j]; |
| unsigned char k = 0; |
| |
| /* look up this descriptor in the table */ |
| while (cache_table[k].descriptor != 0) |
| { |
| if (cache_table[k].descriptor == des) { |
| switch (cache_table[k].cache_type) { |
| case LVL_1_INST: |
| l1i += cache_table[k].size; |
| break; |
| case LVL_1_DATA: |
| l1d += cache_table[k].size; |
| break; |
| case LVL_2: |
| l2 += cache_table[k].size; |
| break; |
| case LVL_3: |
| l3 += cache_table[k].size; |
| break; |
| case LVL_TRACE: |
| trace += cache_table[k].size; |
| break; |
| } |
| |
| break; |
| } |
| |
| k++; |
| } |
| } |
| } |
| |
| if ( trace ) |
| printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); |
| else if ( l1i ) |
| printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); |
| if ( l1d ) |
| printk(", L1 D cache: %dK\n", l1d); |
| else |
| printk("\n"); |
| if ( l2 ) |
| printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); |
| if ( l3 ) |
| printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); |
| |
| /* |
| * This assumes the L3 cache is shared; it typically lives in |
| * the northbridge. The L1 caches are included by the L2 |
| * cache, and so should not be included for the purpose of |
| * SMP switching weights. |
| */ |
| c->x86_cache_size = l2 ? l2 : (l1i+l1d); |
| } |
| |
| return l2; |
| } |