1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
36 #include <linux/types.h> /* FIXME: kvm_para.h needs this */
38 #include <linux/stop_machine.h>
39 #include <linux/kvm_para.h>
40 #include <linux/uaccess.h>
41 #include <linux/module.h>
42 #include <linux/mutex.h>
43 #include <linux/init.h>
44 #include <linux/sort.h>
45 #include <linux/cpu.h>
46 #include <linux/pci.h>
47 #include <linux/smp.h>
48 #include <linux/syscore_ops.h>
50 #include <asm/processor.h>
59 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
60 static DEFINE_MUTEX(mtrr_mutex);
62 u64 size_or_mask, size_and_mask;
63 static bool mtrr_aps_delayed_init;
65 static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
67 const struct mtrr_ops *mtrr_if;
69 static void set_mtrr(unsigned int reg, unsigned long base,
70 unsigned long size, mtrr_type type);
72 void set_mtrr_ops(const struct mtrr_ops *ops)
74 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
75 mtrr_ops[ops->vendor] = ops;
78 /* Returns non-zero if we have the write-combining memory type */
79 static int have_wrcomb(void)
84 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
87 * ServerWorks LE chipsets < rev 6 have problems with
88 * write-combining. Don't allow it and leave room for other
89 * chipsets to be tagged
91 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
92 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
93 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
95 pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
101 * Intel 450NX errata # 23. Non ascending cacheline evictions to
102 * write combining memory may resulting in data corruption
104 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
105 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
106 pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
112 return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
115 /* This function returns the number of variable MTRRs */
116 static void __init set_num_var_ranges(void)
118 unsigned long config = 0, dummy;
121 rdmsr(MSR_MTRRcap, config, dummy);
122 else if (is_cpu(AMD))
124 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
127 num_var_ranges = config & 0xff;
130 static void __init init_table(void)
134 max = num_var_ranges;
135 for (i = 0; i < max; i++)
136 mtrr_usage_table[i] = 1;
139 struct set_mtrr_data {
142 unsigned long smp_base;
143 unsigned long smp_size;
144 unsigned int smp_reg;
148 static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work);
151 * mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs.
152 * @info: pointer to mtrr configuration data
156 static int mtrr_work_handler(void *info)
159 struct set_mtrr_data *data = info;
162 atomic_dec(&data->count);
163 while (!atomic_read(&data->gate))
166 local_irq_save(flags);
168 atomic_dec(&data->count);
169 while (atomic_read(&data->gate))
172 /* The master has cleared me to execute */
173 if (data->smp_reg != ~0U) {
174 mtrr_if->set(data->smp_reg, data->smp_base,
175 data->smp_size, data->smp_type);
176 } else if (mtrr_aps_delayed_init) {
178 * Initialize the MTRRs inaddition to the synchronisation.
183 atomic_dec(&data->count);
184 while (!atomic_read(&data->gate))
187 atomic_dec(&data->count);
188 local_irq_restore(flags);
193 static inline int types_compatible(mtrr_type type1, mtrr_type type2)
195 return type1 == MTRR_TYPE_UNCACHABLE ||
196 type2 == MTRR_TYPE_UNCACHABLE ||
197 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
198 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
202 * set_mtrr - update mtrrs on all processors
203 * @reg: mtrr in question
208 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
210 * 1. Queue work to do the following on all processors:
211 * 2. Disable Interrupts
212 * 3. Wait for all procs to do so
213 * 4. Enter no-fill cache mode
217 * 8. Disable all range registers
218 * 9. Update the MTRRs
219 * 10. Enable all range registers
220 * 11. Flush all TLBs and caches again
221 * 12. Enter normal cache mode and reenable caching
223 * 14. Wait for buddies to catch up
224 * 15. Enable interrupts.
226 * What does that mean for us? Well, first we set data.count to the number
227 * of CPUs. As each CPU announces that it started the rendezvous handler by
228 * decrementing the count, We reset data.count and set the data.gate flag
229 * allowing all the cpu's to proceed with the work. As each cpu disables
230 * interrupts, it'll decrement data.count once. We wait until it hits 0 and
231 * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
232 * are waiting for that flag to be cleared. Once it's cleared, each
233 * CPU goes through the transition of updating MTRRs.
234 * The CPU vendors may each do it differently,
235 * so we call mtrr_if->set() callback and let them take care of it.
236 * When they're done, they again decrement data->count and wait for data.gate
238 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
239 * Everyone then enables interrupts and we all continue on.
241 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
245 set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
247 struct set_mtrr_data data;
253 * If this cpu is not yet active, we are in the cpu online path. There
254 * can be no stop_machine() in parallel, as stop machine ensures this
255 * by using get_online_cpus(). We can skip taking the stop_cpus_mutex,
256 * as we don't need it and also we can't afford to block while waiting
259 * If this cpu is active, we need to prevent stop_machine() happening
260 * in parallel by taking the stop cpus mutex.
262 * Also, this is called in the context of cpu online path or in the
263 * context where cpu hotplug is prevented. So checking the active status
264 * of the raw_smp_processor_id() is safe.
266 if (cpu_active(raw_smp_processor_id()))
267 mutex_lock(&stop_cpus_mutex);
273 data.smp_base = base;
274 data.smp_size = size;
275 data.smp_type = type;
276 atomic_set(&data.count, num_booting_cpus() - 1);
278 /* Make sure data.count is visible before unleashing other CPUs */
280 atomic_set(&data.gate, 0);
282 /* Start the ball rolling on other CPUs */
283 for_each_online_cpu(cpu) {
284 struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);
286 if (cpu == smp_processor_id())
289 stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
293 while (atomic_read(&data.count))
296 /* Ok, reset count and toggle gate */
297 atomic_set(&data.count, num_booting_cpus() - 1);
299 atomic_set(&data.gate, 1);
301 local_irq_save(flags);
303 while (atomic_read(&data.count))
306 /* Ok, reset count and toggle gate */
307 atomic_set(&data.count, num_booting_cpus() - 1);
309 atomic_set(&data.gate, 0);
311 /* Do our MTRR business */
316 * We use this same function to initialize the mtrrs during boot,
317 * resume, runtime cpu online and on an explicit request to set a
320 * During boot or suspend, the state of the boot cpu's mtrrs has been
321 * saved, and we want to replicate that across all the cpus that come
322 * online (either at the end of boot or resume or during a runtime cpu
323 * online). If we're doing that, @reg is set to something special and on
324 * this cpu we still do mtrr_if->set_all(). During boot/resume, this
325 * is unnecessary if at this point we are still on the cpu that started
326 * the boot/resume sequence. But there is no guarantee that we are still
327 * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be
328 * sure that we are in sync with everyone else.
331 mtrr_if->set(reg, base, size, type);
335 /* Wait for the others */
336 while (atomic_read(&data.count))
339 atomic_set(&data.count, num_booting_cpus() - 1);
341 atomic_set(&data.gate, 1);
344 * Wait here for everyone to have seen the gate change
345 * So we're the last ones to touch 'data'
347 while (atomic_read(&data.count))
350 local_irq_restore(flags);
353 if (cpu_active(raw_smp_processor_id()))
354 mutex_unlock(&stop_cpus_mutex);
359 * mtrr_add_page - Add a memory type region
360 * @base: Physical base address of region in pages (in units of 4 kB!)
361 * @size: Physical size of region in pages (4 kB)
362 * @type: Type of MTRR desired
363 * @increment: If this is true do usage counting on the region
365 * Memory type region registers control the caching on newer Intel and
366 * non Intel processors. This function allows drivers to request an
367 * MTRR is added. The details and hardware specifics of each processor's
368 * implementation are hidden from the caller, but nevertheless the
369 * caller should expect to need to provide a power of two size on an
370 * equivalent power of two boundary.
372 * If the region cannot be added either because all regions are in use
373 * or the CPU cannot support it a negative value is returned. On success
374 * the register number for this entry is returned, but should be treated
377 * On a multiprocessor machine the changes are made to all processors.
378 * This is required on x86 by the Intel processors.
380 * The available types are
382 * %MTRR_TYPE_UNCACHABLE - No caching
384 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
386 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
388 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
390 * BUGS: Needs a quiet flag for the cases where drivers do not mind
391 * failures and do not wish system log messages to be sent.
393 int mtrr_add_page(unsigned long base, unsigned long size,
394 unsigned int type, bool increment)
396 unsigned long lbase, lsize;
397 int i, replace, error;
403 error = mtrr_if->validate_add_page(base, size, type);
407 if (type >= MTRR_NUM_TYPES) {
408 pr_warning("mtrr: type: %u invalid\n", type);
412 /* If the type is WC, check that this processor supports it */
413 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
414 pr_warning("mtrr: your processor doesn't support write-combining\n");
419 pr_warning("mtrr: zero sized request\n");
423 if (base & size_or_mask || size & size_or_mask) {
424 pr_warning("mtrr: base or size exceeds the MTRR width\n");
431 /* No CPU hotplug when we change MTRR entries */
434 /* Search for existing MTRR */
435 mutex_lock(&mtrr_mutex);
436 for (i = 0; i < num_var_ranges; ++i) {
437 mtrr_if->get(i, &lbase, &lsize, <ype);
438 if (!lsize || base > lbase + lsize - 1 ||
439 base + size - 1 < lbase)
442 * At this point we know there is some kind of
445 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
447 base + size - 1 >= lbase + lsize - 1) {
448 /* New region encloses an existing region */
450 replace = replace == -1 ? i : -2;
452 } else if (types_compatible(type, ltype))
455 pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
456 " 0x%lx000,0x%lx000\n", base, size, lbase,
460 /* New region is enclosed by an existing region */
462 if (types_compatible(type, ltype))
464 pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
465 base, size, mtrr_attrib_to_str(ltype),
466 mtrr_attrib_to_str(type));
470 ++mtrr_usage_table[i];
474 /* Search for an empty MTRR */
475 i = mtrr_if->get_free_region(base, size, replace);
477 set_mtrr(i, base, size, type);
478 if (likely(replace < 0)) {
479 mtrr_usage_table[i] = 1;
481 mtrr_usage_table[i] = mtrr_usage_table[replace];
483 mtrr_usage_table[i]++;
484 if (unlikely(replace != i)) {
485 set_mtrr(replace, 0, 0, 0);
486 mtrr_usage_table[replace] = 0;
490 pr_info("mtrr: no more MTRRs available\n");
494 mutex_unlock(&mtrr_mutex);
499 static int mtrr_check(unsigned long base, unsigned long size)
501 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
502 pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
503 pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
511 * mtrr_add - Add a memory type region
512 * @base: Physical base address of region
513 * @size: Physical size of region
514 * @type: Type of MTRR desired
515 * @increment: If this is true do usage counting on the region
517 * Memory type region registers control the caching on newer Intel and
518 * non Intel processors. This function allows drivers to request an
519 * MTRR is added. The details and hardware specifics of each processor's
520 * implementation are hidden from the caller, but nevertheless the
521 * caller should expect to need to provide a power of two size on an
522 * equivalent power of two boundary.
524 * If the region cannot be added either because all regions are in use
525 * or the CPU cannot support it a negative value is returned. On success
526 * the register number for this entry is returned, but should be treated
529 * On a multiprocessor machine the changes are made to all processors.
530 * This is required on x86 by the Intel processors.
532 * The available types are
534 * %MTRR_TYPE_UNCACHABLE - No caching
536 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
538 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
540 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
542 * BUGS: Needs a quiet flag for the cases where drivers do not mind
543 * failures and do not wish system log messages to be sent.
545 int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
548 if (mtrr_check(base, size))
550 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
553 EXPORT_SYMBOL(mtrr_add);
556 * mtrr_del_page - delete a memory type region
557 * @reg: Register returned by mtrr_add
558 * @base: Physical base address
559 * @size: Size of region
561 * If register is supplied then base and size are ignored. This is
562 * how drivers should call it.
564 * Releases an MTRR region. If the usage count drops to zero the
565 * register is freed and the region returns to default state.
566 * On success the register is returned, on failure a negative error
569 int mtrr_del_page(int reg, unsigned long base, unsigned long size)
573 unsigned long lbase, lsize;
579 max = num_var_ranges;
580 /* No CPU hotplug when we change MTRR entries */
582 mutex_lock(&mtrr_mutex);
584 /* Search for existing MTRR */
585 for (i = 0; i < max; ++i) {
586 mtrr_if->get(i, &lbase, &lsize, <ype);
587 if (lbase == base && lsize == size) {
593 pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
599 pr_warning("mtrr: register: %d too big\n", reg);
602 mtrr_if->get(reg, &lbase, &lsize, <ype);
604 pr_warning("mtrr: MTRR %d not used\n", reg);
607 if (mtrr_usage_table[reg] < 1) {
608 pr_warning("mtrr: reg: %d has count=0\n", reg);
611 if (--mtrr_usage_table[reg] < 1)
612 set_mtrr(reg, 0, 0, 0);
615 mutex_unlock(&mtrr_mutex);
621 * mtrr_del - delete a memory type region
622 * @reg: Register returned by mtrr_add
623 * @base: Physical base address
624 * @size: Size of region
626 * If register is supplied then base and size are ignored. This is
627 * how drivers should call it.
629 * Releases an MTRR region. If the usage count drops to zero the
630 * register is freed and the region returns to default state.
631 * On success the register is returned, on failure a negative error
634 int mtrr_del(int reg, unsigned long base, unsigned long size)
636 if (mtrr_check(base, size))
638 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
640 EXPORT_SYMBOL(mtrr_del);
644 * These should be called implicitly, but we can't yet until all the initcall
647 static void __init init_ifs(void)
649 #ifndef CONFIG_X86_64
656 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
657 * MTRR driver doesn't require this
665 static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
667 static int mtrr_save(void)
671 for (i = 0; i < num_var_ranges; i++) {
672 mtrr_if->get(i, &mtrr_value[i].lbase,
673 &mtrr_value[i].lsize,
674 &mtrr_value[i].ltype);
679 static void mtrr_restore(void)
683 for (i = 0; i < num_var_ranges; i++) {
684 if (mtrr_value[i].lsize) {
685 set_mtrr(i, mtrr_value[i].lbase,
687 mtrr_value[i].ltype);
694 static struct syscore_ops mtrr_syscore_ops = {
695 .suspend = mtrr_save,
696 .resume = mtrr_restore,
699 int __initdata changed_by_mtrr_cleanup;
702 * mtrr_bp_init - initialize mtrrs on the boot CPU
704 * This needs to be called early; before any of the other CPUs are
705 * initialized (i.e. before smp_init()).
708 void __init mtrr_bp_init(void)
717 mtrr_if = &generic_mtrr_ops;
718 size_or_mask = 0xff000000; /* 36 bits */
719 size_and_mask = 0x00f00000;
723 * This is an AMD specific MSR, but we assume(hope?) that
724 * Intel will implement it to when they extend the address
727 if (cpuid_eax(0x80000000) >= 0x80000008) {
728 phys_addr = cpuid_eax(0x80000008) & 0xff;
729 /* CPUID workaround for Intel 0F33/0F34 CPU */
730 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
731 boot_cpu_data.x86 == 0xF &&
732 boot_cpu_data.x86_model == 0x3 &&
733 (boot_cpu_data.x86_mask == 0x3 ||
734 boot_cpu_data.x86_mask == 0x4))
737 size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
738 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
739 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
740 boot_cpu_data.x86 == 6) {
742 * VIA C* family have Intel style MTRRs,
743 * but don't support PAE
745 size_or_mask = 0xfff00000; /* 32 bits */
750 switch (boot_cpu_data.x86_vendor) {
752 if (cpu_has_k6_mtrr) {
753 /* Pre-Athlon (K6) AMD CPU MTRRs */
754 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
755 size_or_mask = 0xfff00000; /* 32 bits */
759 case X86_VENDOR_CENTAUR:
760 if (cpu_has_centaur_mcr) {
761 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
762 size_or_mask = 0xfff00000; /* 32 bits */
766 case X86_VENDOR_CYRIX:
767 if (cpu_has_cyrix_arr) {
768 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
769 size_or_mask = 0xfff00000; /* 32 bits */
779 set_num_var_ranges();
784 if (mtrr_cleanup(phys_addr)) {
785 changed_by_mtrr_cleanup = 1;
792 void mtrr_ap_init(void)
794 if (!use_intel() || mtrr_aps_delayed_init)
797 * Ideally we should hold mtrr_mutex here to avoid mtrr entries
798 * changed, but this routine will be called in cpu boot time,
799 * holding the lock breaks it.
801 * This routine is called in two cases:
803 * 1. very earily time of software resume, when there absolutely
804 * isn't mtrr entry changes;
806 * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
807 * lock to prevent mtrr entry changes
809 set_mtrr(~0U, 0, 0, 0);
813 * Save current fixed-range MTRR state of the BSP
815 void mtrr_save_state(void)
817 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
820 void set_mtrr_aps_delayed_init(void)
825 mtrr_aps_delayed_init = true;
829 * Delayed MTRR initialization for all AP's
831 void mtrr_aps_init(void)
837 * Check if someone has requested the delay of AP MTRR initialization,
838 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
841 if (!mtrr_aps_delayed_init)
844 set_mtrr(~0U, 0, 0, 0);
845 mtrr_aps_delayed_init = false;
848 void mtrr_bp_restore(void)
856 static int __init mtrr_init_finialize(void)
862 if (!changed_by_mtrr_cleanup)
868 * The CPU has no MTRR and seems to not support SMP. They have
869 * specific drivers, we use a tricky method to support
870 * suspend/resume for them.
872 * TBD: is there any system with such CPU which supports
873 * suspend/resume? If no, we should remove the code.
875 register_syscore_ops(&mtrr_syscore_ops);
879 subsys_initcall(mtrr_init_finialize);