OSDN Git Service

ARM: perf: add basic support for Krait CPU PMUs
authorStephen Boyd <sboyd@codeaurora.org>
Fri, 7 Feb 2014 21:01:21 +0000 (21:01 +0000)
committerWill Deacon <will.deacon@arm.com>
Fri, 21 Feb 2014 11:11:02 +0000 (11:11 +0000)
Add basic support for the Krait CPU PMU. This allows us to use
the architected functionality of the PMU.

This is based on code originally written by Ashwin Chaugule and
Neil Leeder [1].

[1] https://www.codeaurora.org/cgit/quic/la/kernel/msm/tree/arch/arm/kernel/perf_event_msm_krait.c?h=msm-3.4

Cc: Neil Leeder <nleeder@codeaurora.org>
Cc: Ashwin Chaugule <ashwinc@codeaurora.org>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm/kernel/perf_event_cpu.c
arch/arm/kernel/perf_event_v7.c

index 68d02ca..ed571d3 100644 (file)
@@ -229,6 +229,7 @@ static struct of_device_id cpu_pmu_of_device_ids[] = {
        {.compatible = "arm,arm11mpcore-pmu",   .data = armv6mpcore_pmu_init},
        {.compatible = "arm,arm1176-pmu",       .data = armv6pmu_init},
        {.compatible = "arm,arm1136-pmu",       .data = armv6pmu_init},
+       {.compatible = "qcom,krait-pmu",        .data = krait_pmu_init},
        {},
 };
 
index 039cffb..16386b1 100644 (file)
@@ -732,6 +732,138 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 };
 
 /*
+ * Krait HW events mapping
+ */
+static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
+       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
+       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
+       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
+       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+                                         [PERF_COUNT_HW_CACHE_OP_MAX]
+                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+       [C(L1D)] = {
+               /*
+                * The performance counters don't differentiate between read
+                * and write accesses/misses so this isn't strictly correct,
+                * but it's the best we can do. Writes and reads get
+                * combined.
+                */
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(L1I)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(DTLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(ITLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(BPU)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(NODE)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+};
+
+/*
  * Perf Events' indices
  */
 #define        ARMV7_IDX_CYCLE_COUNTER 0
@@ -1212,6 +1344,18 @@ static int armv7_a7_map_event(struct perf_event *event)
                                &armv7_a7_perf_cache_map, 0xFF);
 }
 
+static int krait_map_event(struct perf_event *event)
+{
+       return armpmu_map_event(event, &krait_perf_map,
+                               &krait_perf_cache_map, 0xFFFFF);
+}
+
+static int krait_map_event_no_branch(struct perf_event *event)
+{
+       return armpmu_map_event(event, &krait_perf_map_no_branch,
+                               &krait_perf_cache_map, 0xFFFFF);
+}
+
 static void armv7pmu_init(struct arm_pmu *cpu_pmu)
 {
        cpu_pmu->handle_irq     = armv7pmu_handle_irq;
@@ -1283,6 +1427,21 @@ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
        cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
        return 0;
 }
+
+static int krait_pmu_init(struct arm_pmu *cpu_pmu)
+{
+       armv7pmu_init(cpu_pmu);
+       cpu_pmu->name           = "ARMv7 Krait";
+       /* Some early versions of Krait don't support PC write events */
+       if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
+                                 "qcom,no-pc-write"))
+               cpu_pmu->map_event = krait_map_event_no_branch;
+       else
+               cpu_pmu->map_event = krait_map_event;
+       cpu_pmu->num_events     = armv7_read_num_pmnc_events();
+       cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+       return 0;
+}
 #else
 static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
 {
@@ -1308,4 +1467,9 @@ static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
 {
        return -ENODEV;
 }
+
+static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
+{
+       return -ENODEV;
+}
 #endif /* CONFIG_CPU_V7 */