radix: Implement radix_tree_destroy()
[akaros.git] / user / perfmon / pfmlib_amd64.c
1 /*
2  * pfmlib_amd64.c : support for the AMD64 architected PMU
3  *                  (for both 64 and 32 bit modes)
4  *
5  * Copyright (c) 2009 Google, Inc
6  * Contributed by Stephane Eranian <eranian@gmail.com>
7  *
8  * Based on:
9  * Copyright (c) 2005-2006 Hewlett-Packard Development Company, L.P.
10  * Contributed by Stephane Eranian <eranian@hpl.hp.com>
11  *
12  * Permission is hereby granted, free of charge, to any person obtaining a copy
13  * of this software and associated documentation files (the "Software"), to deal
14  * in the Software without restriction, including without limitation the rights
15  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
16  * of the Software, and to permit persons to whom the Software is furnished to do so,
17  * subject to the following conditions:
18  *
19  * The above copyright notice and this permission notice shall be included in all
20  * copies or substantial portions of the Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
23  * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
24  * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26  * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
27  * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28  */
29 #include <sys/types.h>
30 #include <string.h>
31 #include <stdlib.h>
32
33 /* private headers */
34 #include "pfmlib_priv.h"                /* library private */
35 #include "pfmlib_amd64_priv.h"          /* architecture private */
36
37 const pfmlib_attr_desc_t amd64_mods[]={
38         PFM_ATTR_B("k", "monitor at priv level 0"),             /* monitor priv level 0 */
39         PFM_ATTR_B("u", "monitor at priv level 1, 2, 3"),       /* monitor priv level 1, 2, 3 */
40         PFM_ATTR_B("e", "edge level"),                          /* edge */
41         PFM_ATTR_B("i", "invert"),                              /* invert */
42         PFM_ATTR_I("c", "counter-mask in range [0-255]"),       /* counter-mask */
43         PFM_ATTR_B("h", "monitor in hypervisor"),               /* monitor in hypervisor*/
44         PFM_ATTR_B("g", "measure in guest"),                    /* monitor in guest */
45         PFM_ATTR_NULL /* end-marker to avoid exporting number of entries */
46 };
47
48 pfmlib_pmu_t amd64_support;
49 pfm_amd64_config_t pfm_amd64_cfg;
50
51 static int
52 amd64_num_mods(void *this, int idx)
53 {
54         const amd64_entry_t *pe = this_pe(this);
55         unsigned int mask;
56
57         mask = pe[idx].modmsk;
58         return pfmlib_popcnt(mask);
59 }
60
61 static inline int
62 amd64_eflag(void *this, int idx, int flag)
63 {
64         const amd64_entry_t *pe = this_pe(this);
65         return !!(pe[idx].flags & flag);
66 }
67
68 static inline int
69 amd64_uflag(void *this, int idx, int attr, int flag)
70 {
71         const amd64_entry_t *pe = this_pe(this);
72         return !!(pe[idx].umasks[attr].uflags & flag);
73 }
74
75 static inline int
76 amd64_event_ibsfetch(void *this, int idx)
77 {
78         return amd64_eflag(this, idx, AMD64_FL_IBSFE);
79 }
80
81 static inline int
82 amd64_event_ibsop(void *this, int idx)
83 {
84         return amd64_eflag(this, idx, AMD64_FL_IBSOP);
85 }
86
87 static inline int
88 amd64_from_rev(unsigned int flags)
89 {
90         return ((flags) >> 8) & 0xff;
91 }
92
93 static inline int
94 amd64_till_rev(unsigned int flags)
95 {
96         int till = (((flags)>>16) & 0xff);
97         if (!till)
98                 return 0xff;
99         return till;
100 }
101
102 static void
103 amd64_get_revision(pfm_amd64_config_t *cfg)
104 {
105         pfm_pmu_t rev = PFM_PMU_NONE;
106
107         if (cfg->family == 6) {
108                 cfg->revision = PFM_PMU_AMD64_K7;
109                 return;
110         }
111
112         if (cfg->family == 15) {
113                 switch (cfg->model >> 4) {
114                 case 0:
115                         if (cfg->model == 5 && cfg->stepping < 2) {
116                                 rev = PFM_PMU_AMD64_K8_REVB;
117                                 break;
118                         }
119                         if (cfg->model == 4 && cfg->stepping == 0) {
120                                 rev = PFM_PMU_AMD64_K8_REVB;
121                                 break;
122                         }
123                         rev = PFM_PMU_AMD64_K8_REVC;
124                         break;
125                 case 1:
126                         rev = PFM_PMU_AMD64_K8_REVD;
127                         break;
128                 case 2:
129                 case 3:
130                         rev = PFM_PMU_AMD64_K8_REVE;
131                         break;
132                 case 4:
133                 case 5:
134                 case 0xc:
135                         rev = PFM_PMU_AMD64_K8_REVF;
136                         break;
137                 case 6:
138                 case 7:
139                 case 8:
140                         rev = PFM_PMU_AMD64_K8_REVG;
141                         break;
142                 default:
143                         rev = PFM_PMU_AMD64_K8_REVB;
144                 }
145         } else if (cfg->family == 16) { /* family 10h */
146                 switch (cfg->model) {
147                 case 4:
148                 case 5:
149                 case 6:
150                         rev = PFM_PMU_AMD64_FAM10H_SHANGHAI;
151                         break;
152                 case 8:
153                 case 9:
154                         rev = PFM_PMU_AMD64_FAM10H_ISTANBUL;
155                         break;
156                 default:
157                         rev = PFM_PMU_AMD64_FAM10H_BARCELONA;
158                 }
159         } else if (cfg->family == 17) { /* family 11h */
160                 switch (cfg->model) {
161                 default:
162                         rev = PFM_PMU_AMD64_FAM11H_TURION;
163                 }
164         } else if (cfg->family == 18) { /* family 12h */
165                 switch (cfg->model) {
166                 default:
167                         rev = PFM_PMU_AMD64_FAM12H_LLANO;
168                 }
169         } else if (cfg->family == 20) { /* family 14h */
170                 switch (cfg->model) {
171                 default:
172                         rev = PFM_PMU_AMD64_FAM14H_BOBCAT;
173                 }
174         } else if (cfg->family == 21) { /* family 15h */
175                 rev = PFM_PMU_AMD64_FAM15H_INTERLAGOS;
176         }
177         cfg->revision = rev;
178 }
179
180 /*
181  * .byte 0x53 == push ebx. it's universal for 32 and 64 bit
182  * .byte 0x5b == pop ebx.
183  * Some gcc's (4.1.2 on Core2) object to pairing push/pop and ebx in 64 bit mode.
184  * Using the opcode directly avoids this problem.
185  */
186 static inline void
187 cpuid(unsigned int op, unsigned int *a, unsigned int *b, unsigned int *c, unsigned int *d)
188 {
189   __asm__ __volatile__ (".byte 0x53\n\tcpuid\n\tmovl %%ebx, %%esi\n\t.byte 0x5b"
190        : "=a" (*a),
191              "=S" (*b),
192                  "=c" (*c),
193                  "=d" (*d)
194        : "a" (op));
195 }
196
197 static int
198 amd64_event_valid(void *this, int i)
199 {
200         const amd64_entry_t *pe = this_pe(this);
201         pfmlib_pmu_t *pmu = this;
202         int flags;
203
204         flags = pe[i].flags;
205
206         if (pmu->pmu_rev  < amd64_from_rev(flags))
207                 return 0;
208
209         if (pmu->pmu_rev > amd64_till_rev(flags))
210                 return 0;
211
212         /* no restrictions or matches restrictions */
213         return 1;
214 }
215
216 static int
217 amd64_umask_valid(void *this, int i, int attr)
218 {
219         pfmlib_pmu_t *pmu = this;
220         const amd64_entry_t *pe = this_pe(this);
221         int flags;
222
223         flags = pe[i].umasks[attr].uflags;
224
225         if (pmu->pmu_rev < amd64_from_rev(flags))
226                 return 0;
227
228         if (pmu->pmu_rev > amd64_till_rev(flags))
229                 return 0;
230
231         /* no restrictions or matches restrictions */
232         return 1;
233 }
234
235 static unsigned int
236 amd64_num_umasks(void *this, int pidx)
237 {
238         const amd64_entry_t *pe = this_pe(this);
239         unsigned int i, n = 0;
240
241         /* unit masks + modifiers */
242         for (i = 0; i < pe[pidx].numasks; i++)
243                 if (amd64_umask_valid(this, pidx, i))
244                         n++;
245         return n;
246 }
247
248 static int
249 amd64_get_umask(void *this, int pidx, int attr_idx)
250 {
251         const amd64_entry_t *pe = this_pe(this);
252         unsigned int i;
253         int n;
254
255         for (i=0, n = 0; i < pe[pidx].numasks; i++) {
256                 if (!amd64_umask_valid(this, pidx, i))
257                         continue;
258                 if (n++ == attr_idx)
259                         return i;
260         }
261         return -1;
262 }
263
264 static inline int
265 amd64_attr2mod(void *this, int pidx, int attr_idx)
266 {
267         const amd64_entry_t *pe = this_pe(this);
268         size_t x;
269         int n;
270
271         n = attr_idx - amd64_num_umasks(this, pidx);
272
273         pfmlib_for_each_bit(x, pe[pidx].modmsk) {
274                 if (n == 0)
275                         break;
276                 n--;
277         }
278         return x;
279 }
280
281 void amd64_display_reg(void *this, pfmlib_event_desc_t *e, pfm_amd64_reg_t reg)
282 {
283         pfmlib_pmu_t *pmu = this;
284
285         if (IS_FAMILY_10H(pmu) || IS_FAMILY_15H(pmu))
286                 __pfm_vbprintf("[0x%"PRIx64" event_sel=0x%x umask=0x%x os=%d usr=%d en=%d int=%d inv=%d edge=%d cnt_mask=%d guest=%d host=%d] %s\n",
287                         reg.val,
288                         reg.sel_event_mask | (reg.sel_event_mask2 << 8),
289                         reg.sel_unit_mask,
290                         reg.sel_os,
291                         reg.sel_usr,
292                         reg.sel_en,
293                         reg.sel_int,
294                         reg.sel_inv,
295                         reg.sel_edge,
296                         reg.sel_cnt_mask,
297                         reg.sel_guest,
298                         reg.sel_host,
299                         e->fstr);
300         else
301                 __pfm_vbprintf("[0x%"PRIx64" event_sel=0x%x umask=0x%x os=%d usr=%d en=%d int=%d inv=%d edge=%d cnt_mask=%d] %s\n",
302                         reg.val,
303                         reg.sel_event_mask,
304                         reg.sel_unit_mask,
305                         reg.sel_os,
306                         reg.sel_usr,
307                         reg.sel_en,
308                         reg.sel_int,
309                         reg.sel_inv,
310                         reg.sel_edge,
311                         reg.sel_cnt_mask,
312                         e->fstr);
313 }
314
315 int
316 pfm_amd64_detect(void *this)
317 {
318         unsigned int a, b, c, d;
319         char buffer[128];
320
321         if (pfm_amd64_cfg.family)
322                 return PFM_SUCCESS;
323
324         cpuid(0, &a, &b, &c, &d);
325         strncpy(&buffer[0], (char *)(&b), 4);
326         strncpy(&buffer[4], (char *)(&d), 4);
327         strncpy(&buffer[8], (char *)(&c), 4);
328         buffer[12] = '\0';
329
330         if (strcmp(buffer, "AuthenticAMD"))
331                 return PFM_ERR_NOTSUPP;
332
333         cpuid(1, &a, &b, &c, &d);
334         pfm_amd64_cfg.family = (a >> 8) & 0x0000000f;  // bits 11 - 8
335         pfm_amd64_cfg.model  = (a >> 4) & 0x0000000f;  // Bits  7 - 4
336         if (pfm_amd64_cfg.family == 0xf) {
337                 pfm_amd64_cfg.family += (a >> 20) & 0x000000ff; // Extended family
338                 pfm_amd64_cfg.model  |= (a >> 12) & 0x000000f0; // Extended model
339         }
340         pfm_amd64_cfg.stepping= a & 0x0000000f;  // bits  3 - 0
341
342         amd64_get_revision(&pfm_amd64_cfg);
343
344         if (pfm_amd64_cfg.revision == PFM_PMU_NONE)
345                 return PFM_ERR_NOTSUPP;
346
347         return PFM_SUCCESS;
348 }
349
350 int
351 pfm_amd64_family_detect(void *this)
352 {
353         struct pfmlib_pmu *pmu = this;
354         int ret;
355
356         ret = pfm_amd64_detect(this);
357         if (ret != PFM_SUCCESS)
358                 return ret;
359
360         ret = pfm_amd64_cfg.revision;
361         return ret == pmu->cpu_family ? PFM_SUCCESS : PFM_ERR_NOTSUPP;
362 }
363
364 static int
365 amd64_add_defaults(void *this, pfmlib_event_desc_t *e, unsigned int msk, uint64_t *umask)
366 {
367         const amd64_entry_t *ent, *pe = this_pe(this);
368         unsigned int i;
369         int j, k, added, omit, numasks_grp;
370         int idx;
371
372         k = e->nattrs;
373         ent = pe+e->event;
374
375         for(i=0; msk; msk >>=1, i++) {
376
377                 if (!(msk & 0x1))
378                         continue;
379
380                 added = omit = numasks_grp = 0;
381
382                 for (j = 0; j < e->npattrs; j++) {
383                         if (e->pattrs[j].ctrl != PFM_ATTR_CTRL_PMU)
384                                 continue;
385
386                         if (e->pattrs[j].type != PFM_ATTR_UMASK)
387                                 continue;
388
389                         idx = e->pattrs[j].idx;
390
391                         if (ent->umasks[idx].grpid != i)
392                                 continue;
393
394                         /* number of umasks in this group */
395                         numasks_grp++;
396
397                         if (amd64_uflag(this, e->event, idx, AMD64_FL_DFL)) {
398                                 DPRINT("added default for %s j=%d idx=%d\n", ent->umasks[idx].uname, j, idx);
399
400                                 *umask |= ent->umasks[idx].ucode;
401
402                                 e->attrs[k].id = j; /* pattrs index */
403                                 e->attrs[k].ival = 0;
404                                 k++;
405
406                                 added++;
407                         }
408                         if (amd64_uflag(this, e->event, idx, AMD64_FL_OMIT))
409                                 omit++;
410                 }
411                 /*
412                  * fail if no default was found AND at least one umasks cannot be omitted
413                  * in the group
414                  */
415                 if (!added && omit != numasks_grp) {
416                         DPRINT("no default found for event %s unit mask group %d\n", ent->name, i);
417                         return PFM_ERR_UMASK;
418                 }
419         }
420         e->nattrs = k;
421         return PFM_SUCCESS;
422 }
423
424 int
425 pfm_amd64_get_encoding(void *this, pfmlib_event_desc_t *e)
426 {
427         const amd64_entry_t *pe = this_pe(this);
428         pfm_amd64_reg_t reg;
429         pfm_event_attr_info_t *a;
430         uint64_t umask = 0;
431         unsigned int plmmsk = 0;
432         int k, ret, grpid;
433         unsigned int grpmsk, ugrpmsk = 0;
434         int grpcounts[AMD64_MAX_GRP];
435         int ncombo[AMD64_MAX_GRP];
436
437         memset(grpcounts, 0, sizeof(grpcounts));
438         memset(ncombo, 0, sizeof(ncombo));
439
440         e->fstr[0] = '\0';
441
442         reg.val = 0; /* assume reserved bits are zerooed */
443
444         grpmsk = (1 << pe[e->event].ngrp)-1;
445
446         if (amd64_event_ibsfetch(this, e->event))
447                 reg.ibsfetch.en = 1;
448         else if (amd64_event_ibsop(this, e->event))
449                 reg.ibsop.en = 1;
450         else {
451                 reg.sel_event_mask  = pe[e->event].code;
452                 reg.sel_event_mask2 = pe[e->event].code >> 8;
453                 reg.sel_en = 1; /* force enable */
454                 reg.sel_int = 1; /* force APIC  */
455         }
456
457         for(k=0; k < e->nattrs; k++) {
458                 a = attr(e, k);
459
460                 if (a->ctrl != PFM_ATTR_CTRL_PMU)
461                         continue;
462
463                 if (a->type == PFM_ATTR_UMASK) {
464                         grpid = pe[e->event].umasks[a->idx].grpid;
465                         ++grpcounts[grpid];
466
467                         /*
468                          * upper layer has removed duplicates
469                          * so if we come here more than once, it is for two
470                          * diinct umasks
471                          */
472                         if (amd64_uflag(this, e->event, a->idx, AMD64_FL_NCOMBO))
473                                 ncombo[grpid] = 1;
474                         /*
475                          * if more than one umask in this group but one is marked
476                          * with ncombo, then fail. It is okay to combine umask within
477                          * a group as long as none is tagged with NCOMBO
478                          */
479                         if (grpcounts[grpid] > 1 && ncombo[grpid])  {
480                                 DPRINT("event does not support unit mask combination within a group\n");
481                                 return PFM_ERR_FEATCOMB;
482                         }
483
484                         umask |= pe[e->event].umasks[a->idx].ucode;
485                         ugrpmsk  |= 1 << pe[e->event].umasks[a->idx].grpid;
486
487                 } else if (a->type == PFM_ATTR_RAW_UMASK) {
488
489                         /* there can only be one RAW_UMASK per event */
490
491                         /* sanity checks */
492                         if (a->idx & ~0xff) {
493                                 DPRINT("raw umask is invalid\n");
494                                 return PFM_ERR_ATTR;
495                         }
496                         /* override umask */
497                         umask = a->idx & 0xff;
498                         ugrpmsk = grpmsk;
499
500                 } else { /* modifiers */
501                         uint64_t ival = e->attrs[k].ival;
502
503                         switch(a->idx) { //amd64_attr2mod(this, e->osid, e->event, a->idx)) {
504                                 case AMD64_ATTR_I: /* invert */
505                                         reg.sel_inv = !!ival;
506                                         break;
507                                 case AMD64_ATTR_E: /* edge */
508                                         reg.sel_edge = !!ival;
509                                         break;
510                                 case AMD64_ATTR_C: /* counter-mask */
511                                         if (ival > 255)
512                                                 return PFM_ERR_ATTR_VAL;
513                                         reg.sel_cnt_mask = ival;
514                                         break;
515                                 case AMD64_ATTR_U: /* USR */
516                                         reg.sel_usr = !!ival;
517                                         plmmsk |= _AMD64_ATTR_U;
518                                         break;
519                                 case AMD64_ATTR_K: /* OS */
520                                         reg.sel_os = !!ival;
521                                         plmmsk |= _AMD64_ATTR_K;
522                                         break;
523                                 case AMD64_ATTR_G: /* GUEST */
524                                         reg.sel_guest = !!ival;
525                                         plmmsk |= _AMD64_ATTR_G;
526                                         break;
527                                 case AMD64_ATTR_H: /* HOST */
528                                         reg.sel_host = !!ival;
529                                         plmmsk |= _AMD64_ATTR_H;
530                                         break;
531                         }
532                 }
533         }
534
535         /*
536          * handle case where no priv level mask was passed.
537          * then we use the dfl_plm
538          */
539         if (!(plmmsk & (_AMD64_ATTR_K|_AMD64_ATTR_U|_AMD64_ATTR_H))) {
540                 if (e->dfl_plm & PFM_PLM0)
541                         reg.sel_os = 1;
542                 if (e->dfl_plm & PFM_PLM3)
543                         reg.sel_usr = 1;
544                 if ((IS_FAMILY_10H(this) || IS_FAMILY_15H(this))
545                      && e->dfl_plm & PFM_PLMH)
546                         reg.sel_host = 1;
547         }
548
549         /*
550          * check that there is at least of unit mask in each unit
551          * mask group
552          */
553         if (ugrpmsk != grpmsk) {
554                 ugrpmsk ^= grpmsk;
555                 ret = amd64_add_defaults(this, e, ugrpmsk, &umask);
556                 if (ret != PFM_SUCCESS)
557                         return ret;
558         }
559
560         reg.sel_unit_mask = umask;
561
562         e->codes[0] = reg.val;
563         e->count = 1;
564
565         /*
566          * reorder all the attributes such that the fstr appears always
567          * the same regardless of how the attributes were submitted.
568          */
569         evt_strcat(e->fstr, "%s", pe[e->event].name);
570         pfmlib_sort_attr(e);
571         for (k = 0; k < e->nattrs; k++) {
572                 a = attr(e, k);
573                 if (a->ctrl != PFM_ATTR_CTRL_PMU)
574                         continue;
575                 if (a->type == PFM_ATTR_UMASK)
576                         evt_strcat(e->fstr, ":%s", pe[e->event].umasks[a->idx].uname);
577                 else if (a->type == PFM_ATTR_RAW_UMASK)
578                         evt_strcat(e->fstr, ":0x%x", a->idx);
579         }
580
581         for (k = 0; k < e->npattrs; k++) {
582                 int idx;
583
584                 if (e->pattrs[k].ctrl != PFM_ATTR_CTRL_PMU)
585                         continue;
586
587                 if (e->pattrs[k].type == PFM_ATTR_UMASK)
588                         continue;
589
590                 idx = e->pattrs[k].idx;
591                 switch(idx) {
592                 case AMD64_ATTR_K:
593                         evt_strcat(e->fstr, ":%s=%lu", amd64_mods[idx].name, reg.sel_os);
594                         break;
595                 case AMD64_ATTR_U:
596                         evt_strcat(e->fstr, ":%s=%lu", amd64_mods[idx].name, reg.sel_usr);
597                         break;
598                 case AMD64_ATTR_E:
599                         evt_strcat(e->fstr, ":%s=%lu", amd64_mods[idx].name, reg.sel_edge);
600                         break;
601                 case AMD64_ATTR_I:
602                         evt_strcat(e->fstr, ":%s=%lu", amd64_mods[idx].name, reg.sel_inv);
603                         break;
604                 case AMD64_ATTR_C:
605                         evt_strcat(e->fstr, ":%s=%lu", amd64_mods[idx].name, reg.sel_cnt_mask);
606                         break;
607                 case AMD64_ATTR_H:
608                         evt_strcat(e->fstr, ":%s=%lu", amd64_mods[idx].name, reg.sel_host);
609                         break;
610                 case AMD64_ATTR_G:
611                         evt_strcat(e->fstr, ":%s=%lu", amd64_mods[idx].name, reg.sel_guest);
612                         break;
613                 }
614         }
615         amd64_display_reg(this, e, reg);
616         return PFM_SUCCESS;
617 }
618
619 int
620 pfm_amd64_get_event_first(void *this)
621 {
622         pfmlib_pmu_t *pmu = this;
623         int idx;
624
625         for(idx=0; idx < pmu->pme_count; idx++)
626                 if (amd64_event_valid(this, idx))
627                         return idx;
628         return -1;
629 }
630
631 int
632 pfm_amd64_get_event_next(void *this, int idx)
633 {
634         pfmlib_pmu_t *pmu = this;
635
636         /* basic validity checks on idx down by caller */
637         if (idx >= (pmu->pme_count-1))
638                 return -1;
639
640         /* validate event fo this host PMU */
641         if (!amd64_event_valid(this, idx))
642                 return -1;
643
644         for(++idx; idx < pmu->pme_count; idx++) {
645                 if (amd64_event_valid(this, idx))
646                         return idx;
647         }
648         return -1;
649 }
650
651 int
652 pfm_amd64_event_is_valid(void *this, int pidx)
653 {
654         pfmlib_pmu_t *pmu = this;
655
656         if (pidx < 0 || pidx >= pmu->pme_count)
657                 return 0;
658
659         /* valid revision */
660         return amd64_event_valid(this, pidx);
661 }
662
663 int
664 pfm_amd64_get_event_attr_info(void *this, int pidx, int attr_idx, pfm_event_attr_info_t *info)
665 {
666         const amd64_entry_t *pe = this_pe(this);
667         int numasks, idx;
668
669         numasks = amd64_num_umasks(this, pidx);
670
671         if (attr_idx < numasks) {
672                 idx = amd64_get_umask(this, pidx, attr_idx);
673                 if (idx == -1)
674                         return PFM_ERR_ATTR;
675
676                 info->name = pe[pidx].umasks[idx].uname;
677                 info->desc = pe[pidx].umasks[idx].udesc;
678                 info->code = pe[pidx].umasks[idx].ucode;
679                 info->type = PFM_ATTR_UMASK;
680                 info->is_dfl = amd64_uflag(this, pidx, idx, AMD64_FL_DFL);
681         } else {
682                 idx = amd64_attr2mod(this, pidx, attr_idx);
683                 info->name = amd64_mods[idx].name;
684                 info->desc = amd64_mods[idx].desc;
685                 info->type = amd64_mods[idx].type;
686                 info->code = idx;
687                 info->is_dfl = 0;
688         }
689         info->is_precise = 0;
690         info->equiv  = NULL;
691         info->ctrl   = PFM_ATTR_CTRL_PMU;
692         info->idx    = idx; /* namespace specific index */
693         info->dfl_val64 = 0;
694
695         return PFM_SUCCESS;
696 }
697
698 int
699 pfm_amd64_get_event_info(void *this, int idx, pfm_event_info_t *info)
700 {
701         pfmlib_pmu_t *pmu = this;
702         const amd64_entry_t *pe = this_pe(this);
703
704         info->name  = pe[idx].name;
705         info->desc  = pe[idx].desc;
706         info->equiv = NULL;
707         info->code  = pe[idx].code;
708         info->idx   = idx;
709         info->pmu   = pmu->pmu;
710
711         info->is_precise = 0;
712         info->nattrs  = amd64_num_umasks(this, idx);
713         info->nattrs += amd64_num_mods(this, idx);
714
715         return PFM_SUCCESS;
716 }
717
718 int
719 pfm_amd64_validate_table(void *this, FILE *fp)
720 {
721         pfmlib_pmu_t *pmu = this;
722         const amd64_entry_t *pe = this_pe(this);
723         const char *name =  pmu->name;
724         unsigned int j, k;
725         int i, ndfl;
726         int error = 0;
727
728         if (!pmu->atdesc) {
729                 fprintf(fp, "pmu: %s missing attr_desc\n", pmu->name);
730                 error++;
731         }
732
733         if (!pmu->supported_plm && pmu->type == PFM_PMU_TYPE_CORE) {
734                 fprintf(fp, "pmu: %s supported_plm not set\n", pmu->name);
735                 error++;
736         }
737
738         for(i=0; i < pmu->pme_count; i++) {
739
740                 if (!pe[i].name) {
741                         fprintf(fp, "pmu: %s event%d: :: no name (prev event was %s)\n", pmu->name, i,
742                         i > 1 ? pe[i-1].name : "??");
743                         error++;
744                 }
745
746                 if (!pe[i].desc) {
747                         fprintf(fp, "pmu: %s event%d: %s :: no description\n", name, i, pe[i].name);
748                         error++;
749
750                 }
751                 if (pe[i].numasks && pe[i].umasks == NULL) {
752                         fprintf(fp, "pmu: %s event%d: %s :: numasks but no umasks\n", pmu->name, i, pe[i].name);
753                         error++;
754                 }
755
756                 if (pe[i].numasks == 0 && pe[i].umasks) {
757                         fprintf(fp, "pmu: %s event%d: %s :: numasks=0 but umasks defined\n", pmu->name, i, pe[i].name);
758                         error++;
759                 }
760
761                 if (pe[i].numasks && pe[i].ngrp == 0) {
762                         fprintf(fp, "pmu: %s event%d: %s :: ngrp cannot be zero\n", name, i, pe[i].name);
763                         error++;
764                 }
765
766                 if (pe[i].numasks == 0 && pe[i].ngrp) {
767                         fprintf(fp, "pmu: %s event%d: %s :: ngrp must be zero\n", name, i, pe[i].name);
768                         error++;
769                 }
770
771                 if (pe[i].ngrp >= AMD64_MAX_GRP) {
772                         fprintf(fp, "pmu: %s event%d: %s :: ngrp too big (max=%d)\n", name, i, pe[i].name, AMD64_MAX_GRP);
773                         error++;
774                 }
775
776                 for(ndfl = 0, j= 0; j < pe[i].numasks; j++) {
777
778                         if (!pe[i].umasks[j].uname) {
779                                 fprintf(fp, "pmu: %s event%d: %s umask%d :: no name\n", pmu->name, i, pe[i].name, j);
780                                 error++;
781                         }
782
783                         if (!pe[i].umasks[j].udesc) {
784                                 fprintf(fp, "pmu: %s event%d:%s umask%d: %s :: no description\n", name, i, pe[i].name, j, pe[i].umasks[j].uname);
785                                 error++;
786                         }
787
788                         if (pe[i].ngrp && pe[i].umasks[j].grpid >= pe[i].ngrp) {
789                                 fprintf(fp, "pmu: %s event%d: %s umask%d: %s :: invalid grpid %d (must be < %d)\n", name, i, pe[i].name, j, pe[i].umasks[j].uname, pe[i].umasks[j].grpid, pe[i].ngrp);
790                                 error++;
791                         }
792
793                         if (pe[i].umasks[j].uflags & AMD64_FL_DFL) {
794                                 for(k=0; k < j; k++)
795                                         if ((pe[i].umasks[k].uflags == pe[i].umasks[j].uflags)
796                                             && (pe[i].umasks[k].grpid == pe[i].umasks[j].grpid))
797                                                 ndfl++;
798                                 if (pe[i].numasks == 1)
799                                         ndfl = 1;
800                         }
801                 }
802
803                 if (pe[i].numasks > 1 && ndfl) {
804                         fprintf(fp, "pmu: %s event%d: %s :: more than one default unit mask with same code\n", name, i, pe[i].name);
805                         error++;
806                 }
807
808                 /* if only one umask, then ought to be default */
809                 if (pe[i].numasks == 1 && ndfl != 1) {
810                         fprintf(fp, "pmu: %s event%d: %s, only one umask but no default\n", pmu->name, i, pe[i].name);
811                         error++;
812                 }
813
814                 if (pe[i].flags & AMD64_FL_NCOMBO) {
815                         fprintf(fp, "pmu: %s event%d: %s :: NCOMBO is unit mask only flag\n", name, i, pe[i].name);
816                         error++;
817                 }
818
819                 for(j=0; j < pe[i].numasks; j++) {
820
821                         if (pe[i].umasks[j].uflags & AMD64_FL_NCOMBO)
822                                 continue;
823
824                         for(k=j+1; k < pe[i].numasks; k++) {
825                                 if (pe[i].umasks[k].uflags & AMD64_FL_NCOMBO)
826                                         continue;
827                                 if ((pe[i].umasks[j].ucode &  pe[i].umasks[k].ucode)) {
828                                         fprintf(fp, "pmu: %s event%d: %s :: umask %s and %s have overlapping code bits\n", name, i, pe[i].name, pe[i].umasks[j].uname, pe[i].umasks[k].uname);
829                                         error++;
830                                 }
831                         }
832                 }
833         }
834         return error ? PFM_ERR_INVAL : PFM_SUCCESS;
835 }
836
837 unsigned int
838 pfm_amd64_get_event_nattrs(void *this, int pidx)
839 {
840         unsigned int nattrs;
841         nattrs  = amd64_num_umasks(this, pidx);
842         nattrs += amd64_num_mods(this, pidx);
843         return nattrs;
844 }
845
846 int pfm_amd64_get_num_events(void *this)
847 {
848         pfmlib_pmu_t *pmu = this;
849         int i, num = 0;
850
851         /*
852          * count actual number of events for specific PMU.
853          * Table may contain more events for the family than
854          * what a specific model actually supports.
855          */
856         for (i = 0; i < pmu->pme_count; i++)
857                 if (amd64_event_valid(this, i))
858                         num++;
859         return num;
860 }