OSDN Git Service

perf/x86/intel: Restrict period on Nehalem
[android-x86/kernel.git] / arch / x86 / events / intel / core.c
index 369006b..55e362f 100644 (file)
@@ -2198,16 +2198,7 @@ done:
 static struct event_constraint *
 intel_bts_constraints(struct perf_event *event)
 {
-       struct hw_perf_event *hwc = &event->hw;
-       unsigned int hw_event, bts_event;
-
-       if (event->attr.freq)
-               return NULL;
-
-       hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
-       bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
-
-       if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
+       if (unlikely(intel_pmu_has_bts(event)))
                return &bts_constraint;
 
        return NULL;
@@ -2502,6 +2493,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
 }
 
 static struct event_constraint *
+dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
+{
+       WARN_ON_ONCE(!cpuc->constraint_list);
+
+       if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
+               struct event_constraint *cx;
+
+               /*
+                * grab pre-allocated constraint entry
+                */
+               cx = &cpuc->constraint_list[idx];
+
+               /*
+                * initialize dynamic constraint
+                * with static constraint
+                */
+               *cx = *c;
+
+               /*
+                * mark constraint as dynamic
+                */
+               cx->flags |= PERF_X86_EVENT_DYNAMIC;
+               c = cx;
+       }
+
+       return c;
+}
+
+static struct event_constraint *
 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
                           int idx, struct event_constraint *c)
 {
@@ -2531,27 +2551,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
         * only needed when constraint has not yet
         * been cloned (marked dynamic)
         */
-       if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
-               struct event_constraint *cx;
-
-               /*
-                * grab pre-allocated constraint entry
-                */
-               cx = &cpuc->constraint_list[idx];
-
-               /*
-                * initialize dynamic constraint
-                * with static constraint
-                */
-               *cx = *c;
-
-               /*
-                * mark constraint as dynamic, so we
-                * can free it later on
-                */
-               cx->flags |= PERF_X86_EVENT_DYNAMIC;
-               c = cx;
-       }
+       c = dyn_constraint(cpuc, c, idx);
 
        /*
         * From here on, the constraint is dynamic.
@@ -2825,10 +2825,8 @@ static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
 static int intel_pmu_bts_config(struct perf_event *event)
 {
        struct perf_event_attr *attr = &event->attr;
-       struct hw_perf_event *hwc = &event->hw;
 
-       if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
-           !attr->freq && hwc->sample_period == 1) {
+       if (unlikely(intel_pmu_has_bts(event))) {
                /* BTS is not supported by this architecture. */
                if (!x86_pmu.bts_active)
                        return -EOPNOTSUPP;
@@ -2869,7 +2867,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
                return ret;
 
        if (event->attr.precise_ip) {
-               if (!event->attr.freq) {
+               if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
                        event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
                        if (!(event->attr.sample_type &
                              ~intel_pmu_free_running_flags(event)))
@@ -2887,7 +2885,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
                /*
                 * BTS is set up earlier in this path, so don't account twice
                 */
-               if (!intel_pmu_has_bts(event)) {
+               if (!unlikely(intel_pmu_has_bts(event))) {
                        /* disallow lbr if conflicting events are present */
                        if (x86_add_exclusive(x86_lbr_exclusive_lbr))
                                return -EBUSY;
@@ -3077,6 +3075,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
        return left;
 }
 
+static u64 nhm_limit_period(struct perf_event *event, u64 left)
+{
+       return max(left, 32ULL);
+}
+
 PMU_FORMAT_ATTR(event, "config:0-7"    );
 PMU_FORMAT_ATTR(umask, "config:8-15"   );
 PMU_FORMAT_ATTR(edge,  "config:18"     );
@@ -3104,7 +3107,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
        return x86_event_sysfs_show(page, config, event);
 }
 
-struct intel_shared_regs *allocate_shared_regs(int cpu)
+static struct intel_shared_regs *allocate_shared_regs(int cpu)
 {
        struct intel_shared_regs *regs;
        int i;
@@ -3136,10 +3139,9 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
        return c;
 }
 
-static int intel_pmu_cpu_prepare(int cpu)
-{
-       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 
+int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
+{
        if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
                cpuc->shared_regs = allocate_shared_regs(cpu);
                if (!cpuc->shared_regs)
@@ -3149,7 +3151,7 @@ static int intel_pmu_cpu_prepare(int cpu)
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
                size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
 
-               cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
+               cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
                if (!cpuc->constraint_list)
                        goto err_shared_regs;
 
@@ -3174,6 +3176,11 @@ err:
        return -ENOMEM;
 }
 
+static int intel_pmu_cpu_prepare(int cpu)
+{
+       return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
+}
+
 static void intel_pmu_cpu_starting(int cpu)
 {
        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
@@ -3229,9 +3236,8 @@ static void intel_pmu_cpu_starting(int cpu)
        }
 }
 
-static void free_excl_cntrs(int cpu)
+static void free_excl_cntrs(struct cpu_hw_events *cpuc)
 {
-       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
        struct intel_excl_cntrs *c;
 
        c = cpuc->excl_cntrs;
@@ -3246,7 +3252,11 @@ static void free_excl_cntrs(int cpu)
 
 static void intel_pmu_cpu_dying(int cpu)
 {
-       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+       fini_debug_store_on_cpu(cpu);
+}
+
+void intel_cpuc_finish(struct cpu_hw_events *cpuc)
+{
        struct intel_shared_regs *pc;
 
        pc = cpuc->shared_regs;
@@ -3256,9 +3266,12 @@ static void intel_pmu_cpu_dying(int cpu)
                cpuc->shared_regs = NULL;
        }
 
-       free_excl_cntrs(cpu);
+       free_excl_cntrs(cpuc);
+}
 
-       fini_debug_store_on_cpu(cpu);
+static void intel_pmu_cpu_dead(int cpu)
+{
+       intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
 }
 
 static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -3270,6 +3283,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
                intel_pmu_lbr_sched_task(ctx, sched_in);
 }
 
+static int intel_pmu_check_period(struct perf_event *event, u64 value)
+{
+       return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
+}
+
 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
 
 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -3335,6 +3353,9 @@ static __initconst const struct x86_pmu core_pmu = {
        .cpu_prepare            = intel_pmu_cpu_prepare,
        .cpu_starting           = intel_pmu_cpu_starting,
        .cpu_dying              = intel_pmu_cpu_dying,
+       .cpu_dead               = intel_pmu_cpu_dead,
+
+       .check_period           = intel_pmu_check_period,
 };
 
 static __initconst const struct x86_pmu intel_pmu = {
@@ -3370,8 +3391,12 @@ static __initconst const struct x86_pmu intel_pmu = {
        .cpu_prepare            = intel_pmu_cpu_prepare,
        .cpu_starting           = intel_pmu_cpu_starting,
        .cpu_dying              = intel_pmu_cpu_dying,
+       .cpu_dead               = intel_pmu_cpu_dead,
+
        .guest_get_msrs         = intel_guest_get_msrs,
        .sched_task             = intel_pmu_sched_task,
+
+       .check_period           = intel_pmu_check_period,
 };
 
 static __init void intel_clovertown_quirk(void)
@@ -3714,6 +3739,7 @@ __init int intel_pmu_init(void)
                x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
                x86_pmu.extra_regs = intel_nehalem_extra_regs;
+               x86_pmu.limit_period = nhm_limit_period;
 
                x86_pmu.cpu_events = nhm_events_attrs;
 
@@ -3730,11 +3756,11 @@ __init int intel_pmu_init(void)
                pr_cont("Nehalem events, ");
                break;
 
-       case INTEL_FAM6_ATOM_PINEVIEW:
-       case INTEL_FAM6_ATOM_LINCROFT:
-       case INTEL_FAM6_ATOM_PENWELL:
-       case INTEL_FAM6_ATOM_CLOVERVIEW:
-       case INTEL_FAM6_ATOM_CEDARVIEW:
+       case INTEL_FAM6_ATOM_BONNELL:
+       case INTEL_FAM6_ATOM_BONNELL_MID:
+       case INTEL_FAM6_ATOM_SALTWELL:
+       case INTEL_FAM6_ATOM_SALTWELL_MID:
+       case INTEL_FAM6_ATOM_SALTWELL_TABLET:
                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -3746,9 +3772,11 @@ __init int intel_pmu_init(void)
                pr_cont("Atom events, ");
                break;
 
-       case INTEL_FAM6_ATOM_SILVERMONT1:
-       case INTEL_FAM6_ATOM_SILVERMONT2:
+       case INTEL_FAM6_ATOM_SILVERMONT:
+       case INTEL_FAM6_ATOM_SILVERMONT_X:
+       case INTEL_FAM6_ATOM_SILVERMONT_MID:
        case INTEL_FAM6_ATOM_AIRMONT:
+       case INTEL_FAM6_ATOM_AIRMONT_MID:
                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
                        sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -3765,7 +3793,7 @@ __init int intel_pmu_init(void)
                break;
 
        case INTEL_FAM6_ATOM_GOLDMONT:
-       case INTEL_FAM6_ATOM_DENVERTON:
+       case INTEL_FAM6_ATOM_GOLDMONT_X:
                memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -4128,7 +4156,7 @@ static __init int fixup_ht_bug(void)
        get_online_cpus();
 
        for_each_online_cpu(c) {
-               free_excl_cntrs(c);
+               free_excl_cntrs(&per_cpu(cpu_hw_events, c));
        }
 
        put_online_cpus();