4 * Copyright (C) 2006 Qumranet, Inc.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 * Copyright(C) 2015 Intel Corporation.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 * Marcelo Tosatti <mtosatti@redhat.com>
12 * Paolo Bonzini <pbonzini@redhat.com>
13 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
19 #include <linux/kvm_host.h>
25 #define IA32_MTRR_DEF_TYPE_E (1ULL << 11)
26 #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
27 #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
29 static bool msr_mtrr_valid(unsigned msr)
32 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
33 case MSR_MTRRfix64K_00000:
34 case MSR_MTRRfix16K_80000:
35 case MSR_MTRRfix16K_A0000:
36 case MSR_MTRRfix4K_C0000:
37 case MSR_MTRRfix4K_C8000:
38 case MSR_MTRRfix4K_D0000:
39 case MSR_MTRRfix4K_D8000:
40 case MSR_MTRRfix4K_E0000:
41 case MSR_MTRRfix4K_E8000:
42 case MSR_MTRRfix4K_F0000:
43 case MSR_MTRRfix4K_F8000:
53 static bool valid_pat_type(unsigned t)
55 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
58 static bool valid_mtrr_type(unsigned t)
60 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
63 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
68 if (!msr_mtrr_valid(msr))
71 if (msr == MSR_IA32_CR_PAT) {
72 for (i = 0; i < 8; i++)
73 if (!valid_pat_type((data >> (i * 8)) & 0xff))
76 } else if (msr == MSR_MTRRdefType) {
79 return valid_mtrr_type(data & 0xff);
80 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
81 for (i = 0; i < 8 ; i++)
82 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
88 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
90 mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
93 if (!valid_mtrr_type(data & 0xff))
100 kvm_inject_gp(vcpu, 0);
106 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
108 static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
110 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
113 static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
115 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
118 static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
124 * Three terms are used in the following code:
125 * - segment, it indicates the address segments covered by fixed MTRRs.
126 * - unit, it corresponds to the MSR entry in the segment.
127 * - range, a range is covered in one memory cache type.
129 struct fixed_mtrr_segment {
135 /* the start position in kvm_mtrr.fixed_ranges[]. */
139 static struct fixed_mtrr_segment fixed_seg_table[] = {
140 /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
144 .range_shift = 16, /* 64K */
149 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
155 .range_shift = 14, /* 16K */
160 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
166 .range_shift = 12, /* 12K */
172 * The size of unit is covered in one MSR, one MSR entry contains
173 * 8 ranges so that unit size is always 8 * 2^range_shift.
175 static u64 fixed_mtrr_seg_unit_size(int seg)
177 return 8 << fixed_seg_table[seg].range_shift;
180 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
183 case MSR_MTRRfix64K_00000:
187 case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
189 *unit = msr - MSR_MTRRfix16K_80000;
191 case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
193 *unit = msr - MSR_MTRRfix4K_C0000;
202 static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
204 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
205 u64 unit_size = fixed_mtrr_seg_unit_size(seg);
207 *start = mtrr_seg->start + unit * unit_size;
208 *end = *start + unit_size;
209 WARN_ON(*end > mtrr_seg->end);
212 static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
214 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
216 WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
219 /* each unit has 8 ranges. */
220 return mtrr_seg->range_start + 8 * unit;
223 static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
227 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
230 fixed_mtrr_seg_unit_range(seg, unit, start, end);
234 static int fixed_msr_to_range_index(u32 msr)
238 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
241 return fixed_mtrr_seg_unit_range_index(seg, unit);
244 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
246 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
247 gfn_t start, end, mask;
250 if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
251 !kvm_arch_has_noncoherent_dma(vcpu->kvm))
254 if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
258 if (fixed_msr_to_range(msr, &start, &end)) {
259 if (!fixed_mtrr_is_enabled(mtrr_state))
261 } else if (msr == MSR_MTRRdefType) {
265 /* variable range MTRRs. */
266 index = (msr - 0x200) / 2;
267 start = mtrr_state->var_ranges[index].base & PAGE_MASK;
268 mask = mtrr_state->var_ranges[index].mask & PAGE_MASK;
269 mask |= ~0ULL << cpuid_maxphyaddr(vcpu);
271 end = ((start & mask) | ~mask) + 1;
274 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
277 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
281 if (!kvm_mtrr_valid(vcpu, msr, data))
284 index = fixed_msr_to_range_index(msr);
286 *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
287 else if (msr == MSR_MTRRdefType)
288 vcpu->arch.mtrr_state.deftype = data;
289 else if (msr == MSR_IA32_CR_PAT)
290 vcpu->arch.pat = data;
291 else { /* Variable MTRRs */
294 index = (msr - 0x200) / 2;
295 is_mtrr_mask = msr - 0x200 - 2 * index;
297 vcpu->arch.mtrr_state.var_ranges[index].base = data;
299 vcpu->arch.mtrr_state.var_ranges[index].mask = data;
302 update_mtrr(vcpu, msr);
306 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
310 /* MSR_MTRRcap is a readonly MSR. */
311 if (msr == MSR_MTRRcap) {
316 * VCNT = KVM_NR_VAR_MTRR
318 *pdata = 0x500 | KVM_NR_VAR_MTRR;
322 if (!msr_mtrr_valid(msr))
325 index = fixed_msr_to_range_index(msr);
327 *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
328 else if (msr == MSR_MTRRdefType)
329 *pdata = vcpu->arch.mtrr_state.deftype;
330 else if (msr == MSR_IA32_CR_PAT)
331 *pdata = vcpu->arch.pat;
332 else { /* Variable MTRRs */
335 index = (msr - 0x200) / 2;
336 is_mtrr_mask = msr - 0x200 - 2 * index;
338 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
340 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
346 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
348 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
349 u64 base, mask, start;
350 int i, num_var_ranges, type;
351 const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
352 | (1 << MTRR_TYPE_WRTHROUGH);
354 start = gfn_to_gpa(gfn);
355 num_var_ranges = KVM_NR_VAR_MTRR;
358 /* MTRR is completely disabled, use UC for all of physical memory. */
359 if (!mtrr_is_enabled(mtrr_state))
360 return MTRR_TYPE_UNCACHABLE;
362 /* Look in fixed ranges. Just return the type as per start */
363 if (fixed_mtrr_is_enabled(mtrr_state) && (start < 0x100000)) {
366 if (start < 0x80000) {
368 idx += (start >> 16);
369 return mtrr_state->fixed_ranges[idx];
370 } else if (start < 0xC0000) {
372 idx += ((start - 0x80000) >> 14);
373 return mtrr_state->fixed_ranges[idx];
374 } else if (start < 0x1000000) {
376 idx += ((start - 0xC0000) >> 12);
377 return mtrr_state->fixed_ranges[idx];
382 * Look in variable ranges
383 * Look of multiple ranges matching this address and pick type
384 * as per MTRR precedence
386 for (i = 0; i < num_var_ranges; ++i) {
389 if (!(mtrr_state->var_ranges[i].mask & (1 << 11)))
392 base = mtrr_state->var_ranges[i].base & PAGE_MASK;
393 mask = mtrr_state->var_ranges[i].mask & PAGE_MASK;
395 if ((start & mask) != (base & mask))
399 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
403 curr_type = mtrr_state->var_ranges[i].base & 0xff;
410 * If two or more variable memory ranges match and the
411 * memory types are identical, then that memory type is
414 if (type == curr_type)
418 * If two or more variable memory ranges match and one of
419 * the memory types is UC, the UC memory type used.
421 if (curr_type == MTRR_TYPE_UNCACHABLE)
422 return MTRR_TYPE_UNCACHABLE;
425 * If two or more variable memory ranges match and the
426 * memory types are WT and WB, the WT memory type is used.
428 if (((1 << type) & wt_wb_mask) &&
429 ((1 << curr_type) & wt_wb_mask)) {
430 type = MTRR_TYPE_WRTHROUGH;
435 * For overlaps not defined by the above rules, processor
436 * behavior is undefined.
439 /* We use WB for this undefined behavior. :( */
440 return MTRR_TYPE_WRBACK;
446 return mtrr_default_type(mtrr_state);
448 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);