OSDN Git Service

Merge remote-tracking branch 'kvmarm/kvm-arm64/stolen-time' into kvmarm-master/next
[tomoyo/tomoyo-test1.git] / arch / arm64 / include / asm / kvm_host.h
index 019bc56..b36dae9 100644 (file)
@@ -44,6 +44,7 @@
        KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_IRQ_PENDING    KVM_ARCH_REQ(1)
 #define KVM_REQ_VCPU_RESET     KVM_ARCH_REQ(2)
+#define KVM_REQ_RECORD_STEAL   KVM_ARCH_REQ(3)
 
 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
@@ -346,6 +347,13 @@ struct kvm_vcpu_arch {
        /* True when deferrable sysregs are loaded on the physical CPU,
         * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
        bool sysregs_loaded_on_cpu;
+
+       /* Guest PV state */
+       struct {
+               u64 steal;
+               u64 last_steal;
+               gpa_t base;
+       } steal;
 };
 
 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -486,6 +494,27 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
 int kvm_perf_init(void);
 int kvm_perf_teardown(void);
 
+long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
+gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
+void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
+
+int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
+                           struct kvm_device_attr *attr);
+int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
+                           struct kvm_device_attr *attr);
+int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
+                           struct kvm_device_attr *attr);
+
+static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
+{
+       vcpu_arch->steal.base = GPA_INVALID;
+}
+
+static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
+{
+       return (vcpu_arch->steal.base != GPA_INVALID);
+}
+
 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);