OSDN Git Service

perf/events/amd/uncore: Fix amd_uncore_llc ID to use pre-defined cpu_llc_id
[android-x86/kernel.git] / arch / x86 / events / amd / uncore.c
1 /*
2  * Copyright (C) 2013 Advanced Micro Devices, Inc.
3  *
4  * Author: Jacob Shin <jacob.shin@amd.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/perf_event.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
18
19 #include <asm/cpufeature.h>
20 #include <asm/perf_event.h>
21 #include <asm/msr.h>
22 #include <asm/smp.h>
23
24 #define NUM_COUNTERS_NB         4
25 #define NUM_COUNTERS_L2         4
26 #define MAX_COUNTERS            NUM_COUNTERS_NB
27
28 #define RDPMC_BASE_NB           6
29 #define RDPMC_BASE_LLC          10
30
31 #define COUNTER_SHIFT           16
32
33 static HLIST_HEAD(uncore_unused_list);
34
35 struct amd_uncore {
36         int id;
37         int refcnt;
38         int cpu;
39         int num_counters;
40         int rdpmc_base;
41         u32 msr_base;
42         cpumask_t *active_mask;
43         struct pmu *pmu;
44         struct perf_event *events[MAX_COUNTERS];
45         struct hlist_node node;
46 };
47
48 static struct amd_uncore * __percpu *amd_uncore_nb;
49 static struct amd_uncore * __percpu *amd_uncore_llc;
50
51 static struct pmu amd_nb_pmu;
52 static struct pmu amd_llc_pmu;
53
54 static cpumask_t amd_nb_active_mask;
55 static cpumask_t amd_llc_active_mask;
56
57 static bool is_nb_event(struct perf_event *event)
58 {
59         return event->pmu->type == amd_nb_pmu.type;
60 }
61
62 static bool is_llc_event(struct perf_event *event)
63 {
64         return event->pmu->type == amd_llc_pmu.type;
65 }
66
67 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
68 {
69         if (is_nb_event(event) && amd_uncore_nb)
70                 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
71         else if (is_llc_event(event) && amd_uncore_llc)
72                 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
73
74         return NULL;
75 }
76
77 static void amd_uncore_read(struct perf_event *event)
78 {
79         struct hw_perf_event *hwc = &event->hw;
80         u64 prev, new;
81         s64 delta;
82
83         /*
84          * since we do not enable counter overflow interrupts,
85          * we do not have to worry about prev_count changing on us
86          */
87
88         prev = local64_read(&hwc->prev_count);
89         rdpmcl(hwc->event_base_rdpmc, new);
90         local64_set(&hwc->prev_count, new);
91         delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
92         delta >>= COUNTER_SHIFT;
93         local64_add(delta, &event->count);
94 }
95
96 static void amd_uncore_start(struct perf_event *event, int flags)
97 {
98         struct hw_perf_event *hwc = &event->hw;
99
100         if (flags & PERF_EF_RELOAD)
101                 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
102
103         hwc->state = 0;
104         wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
105         perf_event_update_userpage(event);
106 }
107
108 static void amd_uncore_stop(struct perf_event *event, int flags)
109 {
110         struct hw_perf_event *hwc = &event->hw;
111
112         wrmsrl(hwc->config_base, hwc->config);
113         hwc->state |= PERF_HES_STOPPED;
114
115         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
116                 amd_uncore_read(event);
117                 hwc->state |= PERF_HES_UPTODATE;
118         }
119 }
120
121 static int amd_uncore_add(struct perf_event *event, int flags)
122 {
123         int i;
124         struct amd_uncore *uncore = event_to_amd_uncore(event);
125         struct hw_perf_event *hwc = &event->hw;
126
127         /* are we already assigned? */
128         if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
129                 goto out;
130
131         for (i = 0; i < uncore->num_counters; i++) {
132                 if (uncore->events[i] == event) {
133                         hwc->idx = i;
134                         goto out;
135                 }
136         }
137
138         /* if not, take the first available counter */
139         hwc->idx = -1;
140         for (i = 0; i < uncore->num_counters; i++) {
141                 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
142                         hwc->idx = i;
143                         break;
144                 }
145         }
146
147 out:
148         if (hwc->idx == -1)
149                 return -EBUSY;
150
151         hwc->config_base = uncore->msr_base + (2 * hwc->idx);
152         hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
153         hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
154         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
155
156         if (flags & PERF_EF_START)
157                 amd_uncore_start(event, PERF_EF_RELOAD);
158
159         return 0;
160 }
161
162 static void amd_uncore_del(struct perf_event *event, int flags)
163 {
164         int i;
165         struct amd_uncore *uncore = event_to_amd_uncore(event);
166         struct hw_perf_event *hwc = &event->hw;
167
168         amd_uncore_stop(event, PERF_EF_UPDATE);
169
170         for (i = 0; i < uncore->num_counters; i++) {
171                 if (cmpxchg(&uncore->events[i], event, NULL) == event)
172                         break;
173         }
174
175         hwc->idx = -1;
176 }
177
178 static int amd_uncore_event_init(struct perf_event *event)
179 {
180         struct amd_uncore *uncore;
181         struct hw_perf_event *hwc = &event->hw;
182
183         if (event->attr.type != event->pmu->type)
184                 return -ENOENT;
185
186         /*
187          * NB and Last level cache counters (MSRs) are shared across all cores
188          * that share the same NB / Last level cache. Interrupts can be directed
189          * to a single target core, however, event counts generated by processes
190          * running on other cores cannot be masked out. So we do not support
191          * sampling and per-thread events.
192          */
193         if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
194                 return -EINVAL;
195
196         /* NB and Last level cache counters do not have usr/os/guest/host bits */
197         if (event->attr.exclude_user || event->attr.exclude_kernel ||
198             event->attr.exclude_host || event->attr.exclude_guest)
199                 return -EINVAL;
200
201         /* and we do not enable counter overflow interrupts */
202         hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
203         hwc->idx = -1;
204
205         if (event->cpu < 0)
206                 return -EINVAL;
207
208         uncore = event_to_amd_uncore(event);
209         if (!uncore)
210                 return -ENODEV;
211
212         /*
213          * since request can come in to any of the shared cores, we will remap
214          * to a single common cpu.
215          */
216         event->cpu = uncore->cpu;
217
218         return 0;
219 }
220
221 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
222                                             struct device_attribute *attr,
223                                             char *buf)
224 {
225         cpumask_t *active_mask;
226         struct pmu *pmu = dev_get_drvdata(dev);
227
228         if (pmu->type == amd_nb_pmu.type)
229                 active_mask = &amd_nb_active_mask;
230         else if (pmu->type == amd_llc_pmu.type)
231                 active_mask = &amd_llc_active_mask;
232         else
233                 return 0;
234
235         return cpumap_print_to_pagebuf(true, buf, active_mask);
236 }
237 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
238
239 static struct attribute *amd_uncore_attrs[] = {
240         &dev_attr_cpumask.attr,
241         NULL,
242 };
243
244 static struct attribute_group amd_uncore_attr_group = {
245         .attrs = amd_uncore_attrs,
246 };
247
248 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
249 PMU_FORMAT_ATTR(umask, "config:8-15");
250
251 static struct attribute *amd_uncore_format_attr[] = {
252         &format_attr_event.attr,
253         &format_attr_umask.attr,
254         NULL,
255 };
256
257 static struct attribute_group amd_uncore_format_group = {
258         .name = "format",
259         .attrs = amd_uncore_format_attr,
260 };
261
262 static const struct attribute_group *amd_uncore_attr_groups[] = {
263         &amd_uncore_attr_group,
264         &amd_uncore_format_group,
265         NULL,
266 };
267
268 static struct pmu amd_nb_pmu = {
269         .task_ctx_nr    = perf_invalid_context,
270         .attr_groups    = amd_uncore_attr_groups,
271         .name           = "amd_nb",
272         .event_init     = amd_uncore_event_init,
273         .add            = amd_uncore_add,
274         .del            = amd_uncore_del,
275         .start          = amd_uncore_start,
276         .stop           = amd_uncore_stop,
277         .read           = amd_uncore_read,
278 };
279
280 static struct pmu amd_llc_pmu = {
281         .task_ctx_nr    = perf_invalid_context,
282         .attr_groups    = amd_uncore_attr_groups,
283         .name           = "amd_l2",
284         .event_init     = amd_uncore_event_init,
285         .add            = amd_uncore_add,
286         .del            = amd_uncore_del,
287         .start          = amd_uncore_start,
288         .stop           = amd_uncore_stop,
289         .read           = amd_uncore_read,
290 };
291
292 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
293 {
294         return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
295                         cpu_to_node(cpu));
296 }
297
298 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
299 {
300         struct amd_uncore *uncore_nb = NULL, *uncore_llc;
301
302         if (amd_uncore_nb) {
303                 uncore_nb = amd_uncore_alloc(cpu);
304                 if (!uncore_nb)
305                         goto fail;
306                 uncore_nb->cpu = cpu;
307                 uncore_nb->num_counters = NUM_COUNTERS_NB;
308                 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
309                 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
310                 uncore_nb->active_mask = &amd_nb_active_mask;
311                 uncore_nb->pmu = &amd_nb_pmu;
312                 uncore_nb->id = -1;
313                 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
314         }
315
316         if (amd_uncore_llc) {
317                 uncore_llc = amd_uncore_alloc(cpu);
318                 if (!uncore_llc)
319                         goto fail;
320                 uncore_llc->cpu = cpu;
321                 uncore_llc->num_counters = NUM_COUNTERS_L2;
322                 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
323                 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
324                 uncore_llc->active_mask = &amd_llc_active_mask;
325                 uncore_llc->pmu = &amd_llc_pmu;
326                 uncore_llc->id = -1;
327                 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
328         }
329
330         return 0;
331
332 fail:
333         if (amd_uncore_nb)
334                 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
335         kfree(uncore_nb);
336         return -ENOMEM;
337 }
338
339 static struct amd_uncore *
340 amd_uncore_find_online_sibling(struct amd_uncore *this,
341                                struct amd_uncore * __percpu *uncores)
342 {
343         unsigned int cpu;
344         struct amd_uncore *that;
345
346         for_each_online_cpu(cpu) {
347                 that = *per_cpu_ptr(uncores, cpu);
348
349                 if (!that)
350                         continue;
351
352                 if (this == that)
353                         continue;
354
355                 if (this->id == that->id) {
356                         hlist_add_head(&this->node, &uncore_unused_list);
357                         this = that;
358                         break;
359                 }
360         }
361
362         this->refcnt++;
363         return this;
364 }
365
366 static int amd_uncore_cpu_starting(unsigned int cpu)
367 {
368         unsigned int eax, ebx, ecx, edx;
369         struct amd_uncore *uncore;
370
371         if (amd_uncore_nb) {
372                 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
373                 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
374                 uncore->id = ecx & 0xff;
375
376                 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
377                 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
378         }
379
380         if (amd_uncore_llc) {
381                 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
382                 uncore->id = per_cpu(cpu_llc_id, cpu);
383
384                 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
385                 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
386         }
387
388         return 0;
389 }
390
391 static void uncore_clean_online(void)
392 {
393         struct amd_uncore *uncore;
394         struct hlist_node *n;
395
396         hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
397                 hlist_del(&uncore->node);
398                 kfree(uncore);
399         }
400 }
401
402 static void uncore_online(unsigned int cpu,
403                           struct amd_uncore * __percpu *uncores)
404 {
405         struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
406
407         uncore_clean_online();
408
409         if (cpu == uncore->cpu)
410                 cpumask_set_cpu(cpu, uncore->active_mask);
411 }
412
413 static int amd_uncore_cpu_online(unsigned int cpu)
414 {
415         if (amd_uncore_nb)
416                 uncore_online(cpu, amd_uncore_nb);
417
418         if (amd_uncore_llc)
419                 uncore_online(cpu, amd_uncore_llc);
420
421         return 0;
422 }
423
424 static void uncore_down_prepare(unsigned int cpu,
425                                 struct amd_uncore * __percpu *uncores)
426 {
427         unsigned int i;
428         struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
429
430         if (this->cpu != cpu)
431                 return;
432
433         /* this cpu is going down, migrate to a shared sibling if possible */
434         for_each_online_cpu(i) {
435                 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
436
437                 if (cpu == i)
438                         continue;
439
440                 if (this == that) {
441                         perf_pmu_migrate_context(this->pmu, cpu, i);
442                         cpumask_clear_cpu(cpu, that->active_mask);
443                         cpumask_set_cpu(i, that->active_mask);
444                         that->cpu = i;
445                         break;
446                 }
447         }
448 }
449
450 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
451 {
452         if (amd_uncore_nb)
453                 uncore_down_prepare(cpu, amd_uncore_nb);
454
455         if (amd_uncore_llc)
456                 uncore_down_prepare(cpu, amd_uncore_llc);
457
458         return 0;
459 }
460
461 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
462 {
463         struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
464
465         if (cpu == uncore->cpu)
466                 cpumask_clear_cpu(cpu, uncore->active_mask);
467
468         if (!--uncore->refcnt)
469                 kfree(uncore);
470         *per_cpu_ptr(uncores, cpu) = NULL;
471 }
472
473 static int amd_uncore_cpu_dead(unsigned int cpu)
474 {
475         if (amd_uncore_nb)
476                 uncore_dead(cpu, amd_uncore_nb);
477
478         if (amd_uncore_llc)
479                 uncore_dead(cpu, amd_uncore_llc);
480
481         return 0;
482 }
483
484 static int __init amd_uncore_init(void)
485 {
486         int ret = -ENODEV;
487
488         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
489                 goto fail_nodev;
490
491         if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
492                 goto fail_nodev;
493
494         if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
495                 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
496                 if (!amd_uncore_nb) {
497                         ret = -ENOMEM;
498                         goto fail_nb;
499                 }
500                 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
501                 if (ret)
502                         goto fail_nb;
503
504                 pr_info("perf: AMD NB counters detected\n");
505                 ret = 0;
506         }
507
508         if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
509                 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
510                 if (!amd_uncore_llc) {
511                         ret = -ENOMEM;
512                         goto fail_llc;
513                 }
514                 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
515                 if (ret)
516                         goto fail_llc;
517
518                 pr_info("perf: AMD LLC counters detected\n");
519                 ret = 0;
520         }
521
522         /*
523          * Install callbacks. Core will call them for each online cpu.
524          */
525         if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
526                               "PERF_X86_AMD_UNCORE_PREP",
527                               amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
528                 goto fail_llc;
529
530         if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
531                               "AP_PERF_X86_AMD_UNCORE_STARTING",
532                               amd_uncore_cpu_starting, NULL))
533                 goto fail_prep;
534         if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
535                               "AP_PERF_X86_AMD_UNCORE_ONLINE",
536                               amd_uncore_cpu_online,
537                               amd_uncore_cpu_down_prepare))
538                 goto fail_start;
539         return 0;
540
541 fail_start:
542         cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
543 fail_prep:
544         cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
545 fail_llc:
546         if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
547                 perf_pmu_unregister(&amd_nb_pmu);
548         if (amd_uncore_llc)
549                 free_percpu(amd_uncore_llc);
550 fail_nb:
551         if (amd_uncore_nb)
552                 free_percpu(amd_uncore_nb);
553
554 fail_nodev:
555         return ret;
556 }
557 device_initcall(amd_uncore_init);