* array of pre-defined configurations for various parts of the memory
* hierarchy.
* According to Intel Processor Identification, App Note 485.
+ *
+ * If a L3 cache is found, then data for it rather than the L2
+ * is returned via *LLc.
*/
static
-Int Intel_cache_info(Int level, cache_t* I1c, cache_t* D1c, cache_t* L2c)
+Int Intel_cache_info(Int level, cache_t* I1c, cache_t* D1c, cache_t* LLc)
{
Int cpuid1_eax;
Int cpuid1_ignore;
UChar info[16];
Int i, trials;
Bool L2_found = False;
+ /* If we see L3 cache info, copy it into L3c. Then, at the end,
+ copy it into *LLc. Hence if a L3 cache is specified, *LLc will
+ eventually contain a description of it rather than the L2 cache.
+ The use of the L3c intermediary makes this process independent
+ of the order in which the cache specifications appear in
+ info[]. */
+ Bool L3_found = False;
+ cache_t L3c = { 0, 0, 0 };
if (level < 2) {
VG_(dmsg)("warning: CPUID level < 2 for Intel processor (%d)\n", level);
case 0x90: case 0x96: case 0x9b:
VG_(tool_panic)("IA-64 cache detected?!");
- case 0x22: case 0x23: case 0x25: case 0x29:
- case 0x46: case 0x47: case 0x4a: case 0x4b: case 0x4c: case 0x4d:
- case 0xe2: case 0xe3: case 0xe4: case 0xea: case 0xeb: case 0xec:
- VG_(dmsg)("warning: L3 cache detected but ignored\n");
- break;
+ /* L3 cache info. */
+ case 0x22: L3c = (cache_t) { 512, 4, 64 }; L3_found = True; break;
+ case 0x23: L3c = (cache_t) { 1024, 8, 64 }; L3_found = True; break;
+ case 0x25: L3c = (cache_t) { 2048, 8, 64 }; L3_found = True; break;
+ case 0x29: L3c = (cache_t) { 4096, 8, 64 }; L3_found = True; break;
+ case 0x46: L3c = (cache_t) { 4096, 4, 64 }; L3_found = True; break;
+ case 0x47: L3c = (cache_t) { 8192, 8, 64 }; L3_found = True; break;
+ case 0x4a: L3c = (cache_t) { 6144, 12, 64 }; L3_found = True; break;
+ case 0x4b: L3c = (cache_t) { 8192, 16, 64 }; L3_found = True; break;
+ case 0x4c: L3c = (cache_t) { 12288, 12, 64 }; L3_found = True; break;
+ case 0x4d: L3c = (cache_t) { 16384, 16, 64 }; L3_found = True; break;
+ case 0xd0: L3c = (cache_t) { 512, 4, 64 }; L3_found = True; break;
+ case 0xd1: L3c = (cache_t) { 1024, 4, 64 }; L3_found = True; break;
+ case 0xd2: L3c = (cache_t) { 2048, 4, 64 }; L3_found = True; break;
+ case 0xd6: L3c = (cache_t) { 1024, 8, 64 }; L3_found = True; break;
+ case 0xd7: L3c = (cache_t) { 2048, 8, 64 }; L3_found = True; break;
+ case 0xd8: L3c = (cache_t) { 4096, 8, 64 }; L3_found = True; break;
+ case 0xdc: L3c = (cache_t) { 1536, 12, 64 }; L3_found = True; break;
+ case 0xdd: L3c = (cache_t) { 3072, 12, 64 }; L3_found = True; break;
+ case 0xde: L3c = (cache_t) { 6144, 12, 64 }; L3_found = True; break;
+ case 0xe2: L3c = (cache_t) { 2048, 16, 64 }; L3_found = True; break;
+ case 0xe3: L3c = (cache_t) { 4096, 16, 64 }; L3_found = True; break;
+ case 0xe4: L3c = (cache_t) { 8192, 16, 64 }; L3_found = True; break;
+ case 0xea: L3c = (cache_t) { 12288, 24, 64 }; L3_found = True; break;
+ case 0xeb: L3c = (cache_t) { 18432, 24, 64 }; L3_found = True; break;
+ case 0xec: L3c = (cache_t) { 24576, 24, 64 }; L3_found = True; break;
/* Described as "MLC" in Intel documentation */
- case 0x21: *L2c = (cache_t) { 256, 8, 64 }; L2_found = True; break;
+ case 0x21: *LLc = (cache_t) { 256, 8, 64 }; L2_found = True; break;
/* These are sectored, whatever that means */
- case 0x39: *L2c = (cache_t) { 128, 4, 64 }; L2_found = True; break;
- case 0x3c: *L2c = (cache_t) { 256, 4, 64 }; L2_found = True; break;
+ case 0x39: *LLc = (cache_t) { 128, 4, 64 }; L2_found = True; break;
+ case 0x3c: *LLc = (cache_t) { 256, 4, 64 }; L2_found = True; break;
/* If a P6 core, this means "no L2 cache".
If a P4 core, this means "no L3 cache".
case 0x40:
break;
- case 0x41: *L2c = (cache_t) { 128, 4, 32 }; L2_found = True; break;
- case 0x42: *L2c = (cache_t) { 256, 4, 32 }; L2_found = True; break;
- case 0x43: *L2c = (cache_t) { 512, 4, 32 }; L2_found = True; break;
- case 0x44: *L2c = (cache_t) { 1024, 4, 32 }; L2_found = True; break;
- case 0x45: *L2c = (cache_t) { 2048, 4, 32 }; L2_found = True; break;
- case 0x48: *L2c = (cache_t) { 3072,12, 64 }; L2_found = True; break;
+ case 0x41: *LLc = (cache_t) { 128, 4, 32 }; L2_found = True; break;
+ case 0x42: *LLc = (cache_t) { 256, 4, 32 }; L2_found = True; break;
+ case 0x43: *LLc = (cache_t) { 512, 4, 32 }; L2_found = True; break;
+ case 0x44: *LLc = (cache_t) { 1024, 4, 32 }; L2_found = True; break;
+ case 0x45: *LLc = (cache_t) { 2048, 4, 32 }; L2_found = True; break;
+ case 0x48: *LLc = (cache_t) { 3072, 12, 64 }; L2_found = True; break;
+ case 0x4e: *LLc = (cache_t) { 6144, 24, 64 }; L2_found = True; break;
case 0x49:
- if ((family == 15) && (model == 6))
- /* On Xeon MP (family F, model 6), this is for L3 */
- VG_(dmsg)("warning: L3 cache detected but ignored\n");
- else
- *L2c = (cache_t) { 4096, 16, 64 }; L2_found = True;
- break;
- case 0x4e: *L2c = (cache_t) { 6144, 24, 64 }; L2_found = True; break;
+ if (family == 15 && model == 6) {
+ /* On Xeon MP (family F, model 6), this is for L3 */
+ L3c = (cache_t) { 4096, 16, 64 }; L3_found = True;
+ } else {
+ *LLc = (cache_t) { 4096, 16, 64 }; L2_found = True;
+ }
+ break;
/* These are sectored, whatever that means */
case 0x60: *D1c = (cache_t) { 16, 8, 64 }; break; /* sectored */
break;
/* not sectored, whatever that might mean */
- case 0x78: *L2c = (cache_t) { 1024, 4, 64 }; L2_found = True; break;
+ case 0x78: *LLc = (cache_t) { 1024, 4, 64 }; L2_found = True; break;
/* These are sectored, whatever that means */
- case 0x79: *L2c = (cache_t) { 128, 8, 64 }; L2_found = True; break;
- case 0x7a: *L2c = (cache_t) { 256, 8, 64 }; L2_found = True; break;
- case 0x7b: *L2c = (cache_t) { 512, 8, 64 }; L2_found = True; break;
- case 0x7c: *L2c = (cache_t) { 1024, 8, 64 }; L2_found = True; break;
- case 0x7d: *L2c = (cache_t) { 2048, 8, 64 }; L2_found = True; break;
- case 0x7e: *L2c = (cache_t) { 256, 8, 128 }; L2_found = True; break;
-
- case 0x7f: *L2c = (cache_t) { 512, 2, 64 }; L2_found = True; break;
- case 0x80: *L2c = (cache_t) { 512, 8, 64 }; L2_found = True; break;
-
- case 0x81: *L2c = (cache_t) { 128, 8, 32 }; L2_found = True; break;
- case 0x82: *L2c = (cache_t) { 256, 8, 32 }; L2_found = True; break;
- case 0x83: *L2c = (cache_t) { 512, 8, 32 }; L2_found = True; break;
- case 0x84: *L2c = (cache_t) { 1024, 8, 32 }; L2_found = True; break;
- case 0x85: *L2c = (cache_t) { 2048, 8, 32 }; L2_found = True; break;
- case 0x86: *L2c = (cache_t) { 512, 4, 64 }; L2_found = True; break;
- case 0x87: *L2c = (cache_t) { 1024, 8, 64 }; L2_found = True; break;
+ case 0x79: *LLc = (cache_t) { 128, 8, 64 }; L2_found = True; break;
+ case 0x7a: *LLc = (cache_t) { 256, 8, 64 }; L2_found = True; break;
+ case 0x7b: *LLc = (cache_t) { 512, 8, 64 }; L2_found = True; break;
+ case 0x7c: *LLc = (cache_t) { 1024, 8, 64 }; L2_found = True; break;
+ case 0x7d: *LLc = (cache_t) { 2048, 8, 64 }; L2_found = True; break;
+ case 0x7e: *LLc = (cache_t) { 256, 8, 128 }; L2_found = True; break;
+ case 0x7f: *LLc = (cache_t) { 512, 2, 64 }; L2_found = True; break;
+ case 0x80: *LLc = (cache_t) { 512, 8, 64 }; L2_found = True; break;
+ case 0x81: *LLc = (cache_t) { 128, 8, 32 }; L2_found = True; break;
+ case 0x82: *LLc = (cache_t) { 256, 8, 32 }; L2_found = True; break;
+ case 0x83: *LLc = (cache_t) { 512, 8, 32 }; L2_found = True; break;
+ case 0x84: *LLc = (cache_t) { 1024, 8, 32 }; L2_found = True; break;
+ case 0x85: *LLc = (cache_t) { 2048, 8, 32 }; L2_found = True; break;
+ case 0x86: *LLc = (cache_t) { 512, 4, 64 }; L2_found = True; break;
+ case 0x87: *LLc = (cache_t) { 1024, 8, 64 }; L2_found = True; break;
/* Ignore prefetch information */
case 0xf0: case 0xf1:
}
}
+ /* If we found a L3 cache, throw away the L2 data and use the L3's instead. */
+ if (L3_found) {
+ VG_(dmsg)("warning: L3 cache found, using its data for the LL simulation.\n");
+ *LLc = L3c;
+ L2_found = True;
+ }
+
if (!L2_found)
- VG_(dmsg)("warning: L2 cache not installed, ignore L2 results.\n");
+ VG_(dmsg)("warning: L2 cache not installed, ignore LL results.\n");
return 0;
}
* 0x630) have a bug and misreport their L2 size as 1KB (it's really 64KB),
* so we detect that.
*
- * Returns 0 on success, non-zero on failure.
+ * Returns 0 on success, non-zero on failure. As with the Intel code
+ * above, if a L3 cache is found, then data for it rather than the L2
+ * is returned via *LLc.
*/
+
+/* A small helper */
+static Int decode_AMD_cache_L2_L3_assoc ( Int bits_15_12 )
+{
+ /* Decode a L2/L3 associativity indication. It is encoded
+ differently from the I1/D1 associativity. Returns 1
+ (direct-map) as a safe but suboptimal result for unknown
+ encodings. */
+ switch (bits_15_12 & 0xF) {
+ case 1: return 1; case 2: return 2;
+ case 4: return 4; case 6: return 8;
+ case 8: return 16; case 0xA: return 32;
+ case 0xB: return 48; case 0xC: return 64;
+ case 0xD: return 96; case 0xE: return 128;
+ case 0xF: /* fully associative */
+ case 0: /* L2/L3 cache or TLB is disabled */
+ default:
+ return 1;
+ }
+}
+
static
-Int AMD_cache_info(cache_t* I1c, cache_t* D1c, cache_t* L2c)
+Int AMD_cache_info(cache_t* I1c, cache_t* D1c, cache_t* LLc)
{
UInt ext_level;
UInt dummy, model;
- UInt I1i, D1i, L2i;
+ UInt I1i, D1i, L2i, L3i;
VG_(cpuid)(0x80000000, &ext_level, &dummy, &dummy, &dummy);
}
VG_(cpuid)(0x80000005, &dummy, &dummy, &D1i, &I1i);
- VG_(cpuid)(0x80000006, &dummy, &dummy, &L2i, &dummy);
+ VG_(cpuid)(0x80000006, &dummy, &dummy, &L2i, &L3i);
VG_(cpuid)(0x1, &model, &dummy, &dummy, &dummy);
I1c->assoc = (I1i >> 16) & 0xff;
I1c->line_size = (I1i >> 0) & 0xff;
- L2c->size = (L2i >> 16) & 0xffff; /* Nb: different bits used for L2 */
- L2c->assoc = (L2i >> 12) & 0xf;
- L2c->line_size = (L2i >> 0) & 0xff;
+ LLc->size = (L2i >> 16) & 0xffff; /* Nb: different bits used for L2 */
+ LLc->assoc = decode_AMD_cache_L2_L3_assoc((L2i >> 12) & 0xf);
+ LLc->line_size = (L2i >> 0) & 0xff;
+
+ if (((L3i >> 18) & 0x3fff) > 0) {
+ /* There's an L3 cache. Replace *LLc contents with this info. */
+ /* NB: the test in the if is "if L3 size > 0 ". I don't know if
+ this is the right way to test presence-vs-absence of L3. I
+ can't see any guidance on this in the AMD documentation. */
+ LLc->size = ((L3i >> 18) & 0x3fff) * 512;
+ LLc->assoc = decode_AMD_cache_L2_L3_assoc((L3i >> 12) & 0xf);
+ LLc->line_size = (L3i >> 0) & 0xff;
+ VG_(dmsg)("warning: L3 cache found, using its data for the L2 simulation.\n");
+ }
return 0;
}
static
-Int get_caches_from_CPUID(cache_t* I1c, cache_t* D1c, cache_t* L2c)
+Int get_caches_from_CPUID(cache_t* I1c, cache_t* D1c, cache_t* LLc)
{
Int level, ret;
Char vendor_id[13];
/* Only handling Intel and AMD chips... no Cyrix, Transmeta, etc */
if (0 == VG_(strcmp)(vendor_id, "GenuineIntel")) {
- ret = Intel_cache_info(level, I1c, D1c, L2c);
+ ret = Intel_cache_info(level, I1c, D1c, LLc);
} else if (0 == VG_(strcmp)(vendor_id, "AuthenticAMD")) {
- ret = AMD_cache_info(I1c, D1c, L2c);
+ ret = AMD_cache_info(I1c, D1c, LLc);
} else if (0 == VG_(strcmp)(vendor_id, "CentaurHauls")) {
/* Total kludge. Pretend to be a VIA Nehemiah. */
I1c->size = 64;
I1c->assoc = 4;
I1c->line_size = 16;
- L2c->size = 64;
- L2c->assoc = 16;
- L2c->line_size = 16;
+ LLc->size = 64;
+ LLc->assoc = 16;
+ LLc->line_size = 16;
ret = 0;
} else {
/* Successful! Convert sizes from KB to bytes */
I1c->size *= 1024;
D1c->size *= 1024;
- L2c->size *= 1024;
+ LLc->size *= 1024;
return ret;
}
-void VG_(configure_caches)(cache_t* I1c, cache_t* D1c, cache_t* L2c,
+void VG_(configure_caches)(cache_t* I1c, cache_t* D1c, cache_t* LLc,
Bool all_caches_clo_defined)
{
Int res;
// Set caches to default.
*I1c = (cache_t) { 65536, 2, 64 };
*D1c = (cache_t) { 65536, 2, 64 };
- *L2c = (cache_t) { 262144, 8, 64 };
+ *LLc = (cache_t) { 262144, 8, 64 };
// Then replace with any info we can get from CPUID.
- res = get_caches_from_CPUID(I1c, D1c, L2c);
+ res = get_caches_from_CPUID(I1c, D1c, LLc);
// Warn if CPUID failed and config not completely specified from cmd line.
if (res != 0 && !all_caches_clo_defined) {