| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Performance counter support for e500 family processors. | 
|  | 3 | * | 
|  | 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | 
|  | 5 | * Copyright 2010 Freescale Semiconductor, Inc. | 
|  | 6 | * | 
|  | 7 | * This program is free software; you can redistribute it and/or | 
|  | 8 | * modify it under the terms of the GNU General Public License | 
|  | 9 | * as published by the Free Software Foundation; either version | 
|  | 10 | * 2 of the License, or (at your option) any later version. | 
|  | 11 | */ | 
|  | 12 | #include <linux/string.h> | 
|  | 13 | #include <linux/perf_event.h> | 
|  | 14 | #include <asm/reg.h> | 
|  | 15 | #include <asm/cputable.h> | 
|  | 16 |  | 
|  | 17 | /* | 
|  | 18 | * Map of generic hardware event types to hardware events | 
|  | 19 | * Zero if unsupported | 
|  | 20 | */ | 
|  | 21 | static int e500_generic_events[] = { | 
|  | 22 | [PERF_COUNT_HW_CPU_CYCLES] = 1, | 
|  | 23 | [PERF_COUNT_HW_INSTRUCTIONS] = 2, | 
|  | 24 | [PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */ | 
|  | 25 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12, | 
|  | 26 | [PERF_COUNT_HW_BRANCH_MISSES] = 15, | 
|  | 27 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 18, | 
|  | 28 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 19, | 
|  | 29 | }; | 
|  | 30 |  | 
|  | 31 | #define C(x)	PERF_COUNT_HW_CACHE_##x | 
|  | 32 |  | 
|  | 33 | /* | 
|  | 34 | * Table of generalized cache-related events. | 
|  | 35 | * 0 means not supported, -1 means nonsensical, other values | 
|  | 36 | * are event codes. | 
|  | 37 | */ | 
|  | 38 | static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | 
|  | 39 | /* | 
|  | 40 | * D-cache misses are not split into read/write/prefetch; | 
|  | 41 | * use raw event 41. | 
|  | 42 | */ | 
|  | 43 | [C(L1D)] = {		/* 	RESULT_ACCESS	RESULT_MISS */ | 
|  | 44 | [C(OP_READ)] = {	27,		0	}, | 
|  | 45 | [C(OP_WRITE)] = {	28,		0	}, | 
|  | 46 | [C(OP_PREFETCH)] = {	29,		0	}, | 
|  | 47 | }, | 
|  | 48 | [C(L1I)] = {		/* 	RESULT_ACCESS	RESULT_MISS */ | 
|  | 49 | [C(OP_READ)] = {	2,		60	}, | 
|  | 50 | [C(OP_WRITE)] = {	-1,		-1	}, | 
|  | 51 | [C(OP_PREFETCH)] = {	0,		0	}, | 
|  | 52 | }, | 
|  | 53 | /* | 
|  | 54 | * Assuming LL means L2, it's not a good match for this model. | 
|  | 55 | * It allocates only on L1 castout or explicit prefetch, and | 
|  | 56 | * does not have separate read/write events (but it does have | 
|  | 57 | * separate instruction/data events). | 
|  | 58 | */ | 
|  | 59 | [C(LL)] = {		/* 	RESULT_ACCESS	RESULT_MISS */ | 
|  | 60 | [C(OP_READ)] = {	0,		0	}, | 
|  | 61 | [C(OP_WRITE)] = {	0,		0	}, | 
|  | 62 | [C(OP_PREFETCH)] = {	0,		0	}, | 
|  | 63 | }, | 
|  | 64 | /* | 
|  | 65 | * There are data/instruction MMU misses, but that's a miss on | 
|  | 66 | * the chip's internal level-one TLB which is probably not | 
|  | 67 | * what the user wants.  Instead, unified level-two TLB misses | 
|  | 68 | * are reported here. | 
|  | 69 | */ | 
|  | 70 | [C(DTLB)] = {		/* 	RESULT_ACCESS	RESULT_MISS */ | 
|  | 71 | [C(OP_READ)] = {	26,		66	}, | 
|  | 72 | [C(OP_WRITE)] = {	-1,		-1	}, | 
|  | 73 | [C(OP_PREFETCH)] = {	-1,		-1	}, | 
|  | 74 | }, | 
|  | 75 | [C(BPU)] = {		/* 	RESULT_ACCESS	RESULT_MISS */ | 
|  | 76 | [C(OP_READ)] = {	12,		15 	}, | 
|  | 77 | [C(OP_WRITE)] = {	-1,		-1	}, | 
|  | 78 | [C(OP_PREFETCH)] = {	-1,		-1	}, | 
|  | 79 | }, | 
|  | 80 | [C(NODE)] = {		/* 	RESULT_ACCESS	RESULT_MISS */ | 
|  | 81 | [C(OP_READ)] = {	-1,		-1 	}, | 
|  | 82 | [C(OP_WRITE)] = {	-1,		-1	}, | 
|  | 83 | [C(OP_PREFETCH)] = {	-1,		-1	}, | 
|  | 84 | }, | 
|  | 85 | }; | 
|  | 86 |  | 
|  | 87 | static int num_events = 128; | 
|  | 88 |  | 
|  | 89 | /* Upper half of event id is PMLCb, for threshold events */ | 
|  | 90 | static u64 e500_xlate_event(u64 event_id) | 
|  | 91 | { | 
|  | 92 | u32 event_low = (u32)event_id; | 
|  | 93 | u64 ret; | 
|  | 94 |  | 
|  | 95 | if (event_low >= num_events) | 
|  | 96 | return 0; | 
|  | 97 |  | 
|  | 98 | ret = FSL_EMB_EVENT_VALID; | 
|  | 99 |  | 
|  | 100 | if (event_low >= 76 && event_low <= 81) { | 
|  | 101 | ret |= FSL_EMB_EVENT_RESTRICTED; | 
|  | 102 | ret |= event_id & | 
|  | 103 | (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH); | 
|  | 104 | } else if (event_id & | 
|  | 105 | (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)) { | 
|  | 106 | /* Threshold requested on non-threshold event */ | 
|  | 107 | return 0; | 
|  | 108 | } | 
|  | 109 |  | 
|  | 110 | return ret; | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | static struct fsl_emb_pmu e500_pmu = { | 
|  | 114 | .name			= "e500 family", | 
|  | 115 | .n_counter		= 4, | 
|  | 116 | .n_restricted		= 2, | 
|  | 117 | .xlate_event		= e500_xlate_event, | 
|  | 118 | .n_generic		= ARRAY_SIZE(e500_generic_events), | 
|  | 119 | .generic_events		= e500_generic_events, | 
|  | 120 | .cache_events		= &e500_cache_events, | 
|  | 121 | }; | 
|  | 122 |  | 
|  | 123 | static int init_e500_pmu(void) | 
|  | 124 | { | 
|  | 125 | if (!cur_cpu_spec->oprofile_cpu_type) | 
|  | 126 | return -ENODEV; | 
|  | 127 |  | 
|  | 128 | if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500mc")) | 
|  | 129 | num_events = 256; | 
|  | 130 | else if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500")) | 
|  | 131 | return -ENODEV; | 
|  | 132 |  | 
|  | 133 | return register_fsl_emb_pmu(&e500_pmu); | 
|  | 134 | } | 
|  | 135 |  | 
|  | 136 | early_initcall(init_e500_pmu); |