Remove the BUILD_INFO_FILE variable
[akaros.git] / kern / arch / x86 / devarch.c
index bef8725..2a45f6e 100644 (file)
@@ -1,4 +1,4 @@
-/* 
+/*
  * This file is part of the UCB release of Plan 9. It is subject to the license
  * terms in the LICENSE file found in the top-level directory of this
  * distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
@@ -7,6 +7,7 @@
  * in the LICENSE file.
  */
 
+#include <ros/memops.h>
 #include <vfs.h>
 #include <kmalloc.h>
 #include <kref.h>
 #include <string.h>
 #include <stdio.h>
 #include <assert.h>
-#include <error.h>
+#include <err.h>
 #include <pmap.h>
 #include <umem.h>
 #include <smp.h>
 #include <ip.h>
 #include <time.h>
+#include <bitops.h>
 #include <core_set.h>
 #include <address_range.h>
+#include <arch/ros/perfmon.h>
+#include <arch/topology.h>
+#include <arch/perfmon.h>
 #include <arch/ros/msr-index.h>
 #include <arch/msr.h>
 #include <arch/devarch.h>
 
 #define REAL_MEM_SIZE (1024 * 1024)
 
+struct perf_context {
+       struct perfmon_session *ps;
+       size_t resp_size;
+       uint8_t *resp;
+};
+
 struct io_map {
        struct io_map *next;
        int reserved;
@@ -53,6 +64,7 @@ enum {
        Qgdb,
        Qrealmem,
        Qmsr,
+       Qperf,
 
        Qmax,
 };
@@ -71,22 +83,24 @@ static struct dirtab archdir[Qmax] = {
        {"gdb", {Qgdb, 0}, 0, 0660},
        {"realmem", {Qrealmem, 0}, 0, 0444},
        {"msr", {Qmsr, 0}, 0, 0666},
+       {"perf", {Qperf, 0}, 0, 0666},
 };
 /* White list entries needs to be ordered by start address, and never overlap.
  */
-#define MAX_COUNTERS 16
-#define MAX_FIX_COUNTERS 4
+#define MSR_MAX_VAR_COUNTERS 16
+#define MSR_MAX_FIX_COUNTERS 4
 
-static const struct address_range msr_rd_wlist[] = {
+static struct address_range msr_rd_wlist[] = {
        ADDRESS_RANGE(0x00000000, 0xffffffff),
 };
-static const struct address_range msr_wr_wlist[] = {
-       ADDRESS_RANGE(MSR_IA32_PERFCTR0, MSR_IA32_PERFCTR0 + MAX_COUNTERS - 1),
+static struct address_range msr_wr_wlist[] = {
+       ADDRESS_RANGE(MSR_IA32_PERFCTR0,
+                                 MSR_IA32_PERFCTR0 + MSR_MAX_VAR_COUNTERS - 1),
        ADDRESS_RANGE(MSR_ARCH_PERFMON_EVENTSEL0,
-                                 MSR_ARCH_PERFMON_EVENTSEL0 + MAX_COUNTERS - 1),
+                                 MSR_ARCH_PERFMON_EVENTSEL0 + MSR_MAX_VAR_COUNTERS - 1),
        ADDRESS_RANGE(MSR_IA32_PERF_CTL, MSR_IA32_PERF_CTL),
        ADDRESS_RANGE(MSR_CORE_PERF_FIXED_CTR0,
-                                 MSR_CORE_PERF_FIXED_CTR0 + MAX_FIX_COUNTERS - 1),
+                                 MSR_CORE_PERF_FIXED_CTR0 + MSR_MAX_FIX_COUNTERS - 1),
        ADDRESS_RANGE(MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL),
 };
 int gdbactive = 0;
@@ -186,8 +200,8 @@ int iounused(int start, int end)
        struct io_map *map;
 
        for (map = iomap.map; map; map = map->next) {
-               if (((start >= map->start) && (start < map->end))
-                       || ((start <= map->start) && (end > map->start)))
+               if (((start >= map->start) && (start < map->end)) ||
+                   ((start <= map->start) && (end > map->start)))
                        return 0;
        }
        return 1;
@@ -283,7 +297,7 @@ static void checkport(int start, int end)
 
        if (iounused(start, end))
                return;
-       error(EPERM, NULL);
+       error(EPERM, ERROR_FIXME);
 }
 
 static struct chan *archattach(char *spec)
@@ -304,13 +318,184 @@ static int archstat(struct chan *c, uint8_t * dp, int n)
        return devstat(c, dp, n, archdir, Qmax, devgen);
 }
 
+static struct perf_context *arch_create_perf_context(void)
+{
+       ERRSTACK(1);
+       struct perf_context *pc = kzmalloc(sizeof(struct perf_context),
+                                          MEM_WAIT);
+
+       if (waserror()) {
+               kfree(pc);
+               nexterror();
+       }
+       pc->ps = perfmon_create_session();
+       poperror();
+
+       return pc;
+}
+
+static void arch_free_perf_context(struct perf_context *pc)
+{
+       if (likely(pc)) {
+               perfmon_close_session(pc->ps);
+               kfree(pc->resp);
+               kfree(pc);
+       }
+}
+
+static const uint8_t *arch_read_core_set(struct core_set *cset,
+                                         const uint8_t *kptr,
+                                         const uint8_t *ktop)
+{
+       int i, nb;
+       uint32_t n;
+
+       error_assert(EBADMSG, (kptr + sizeof(uint32_t)) <= ktop);
+       kptr = get_le_u32(kptr, &n);
+       error_assert(EBADMSG, (kptr + n) <= ktop);
+       core_set_init(cset);
+       nb = MIN((int) n * 8, num_cores);
+       for (i = 0; i < nb; i++) {
+               if (test_bit(i, (const unsigned long *) kptr))
+                       core_set_setcpu(cset, i);
+       }
+
+       return kptr + n;
+}
+
+static long arch_perf_write(struct perf_context *pc, const void *udata,
+                            long usize)
+{
+       ERRSTACK(1);
+       void *kdata;
+       const uint8_t *kptr, *ktop;
+
+       kfree(pc->resp);
+       pc->resp = NULL;
+       pc->resp_size = 0;
+
+       kdata = user_memdup_errno(current, udata, usize);
+       if (unlikely(!kdata))
+               return -1;
+       if (waserror()) {
+               kfree(kdata);
+               nexterror();
+       }
+       kptr = kdata;
+       ktop = kptr + usize;
+       error_assert(EBADMSG, (kptr + 1) <= ktop);
+       switch (*kptr++) {
+               case PERFMON_CMD_COUNTER_OPEN: {
+                       int ped;
+                       struct perfmon_event pev;
+                       struct core_set cset;
+
+                       error_assert(EBADMSG, (kptr + 3 * sizeof(uint64_t)) <= ktop);
+                       perfmon_init_event(&pev);
+                       kptr = get_le_u64(kptr, &pev.event);
+                       kptr = get_le_u64(kptr, &pev.flags);
+                       kptr = get_le_u64(kptr, &pev.trigger_count);
+                       kptr = arch_read_core_set(&cset, kptr, ktop);
+
+                       ped = perfmon_open_event(&cset, pc->ps, &pev);
+
+                       pc->resp_size = sizeof(uint32_t);
+                       pc->resp = kmalloc(pc->resp_size, MEM_WAIT);
+                       put_le_u32(pc->resp, (uint32_t) ped);
+                       break;
+               }
+               case PERFMON_CMD_COUNTER_STATUS: {
+                       int i;
+                       uint32_t ped;
+                       uint8_t *rptr;
+                       uint64_t *mvalues;
+                       struct perfmon_status *pef;
+
+                       error_assert(EBADMSG, (kptr + sizeof(uint32_t)) <= ktop);
+                       kptr = get_le_u32(kptr, &ped);
+
+                       pef = perfmon_get_event_status(pc->ps, (int) ped);
+
+                       mvalues = kzmalloc(num_cores * sizeof(mvalues),
+                                          MEM_WAIT);
+                       for (i = 0; i < num_cores; i++)
+                               mvalues[i] = pef->cores_values[i];
+
+                       pc->resp_size = 3 * sizeof(uint64_t) + sizeof(uint32_t) +
+                               num_cores * sizeof(uint64_t);
+                       pc->resp = kmalloc(pc->resp_size, MEM_WAIT);
+
+                       rptr = put_le_u64(pc->resp, pef->ev.event);
+                       rptr = put_le_u64(rptr, pef->ev.flags);
+                       rptr = put_le_u64(rptr, pef->ev.trigger_count);
+                       rptr = put_le_u32(rptr, num_cores);
+                       for (i = 0; i < num_cores; i++)
+                               rptr = put_le_u64(rptr, mvalues[i]);
+                       kfree(mvalues);
+                       perfmon_free_event_status(pef);
+                       break;
+               }
+               case PERFMON_CMD_COUNTER_CLOSE: {
+                       uint32_t ped;
+
+                       error_assert(EBADMSG, (kptr + sizeof(uint32_t)) <= ktop);
+                       kptr = get_le_u32(kptr, &ped);
+
+                       perfmon_close_event(pc->ps, (int) ped);
+                       break;
+               }
+               case PERFMON_CMD_CPU_CAPS: {
+                       uint8_t *rptr;
+                       struct perfmon_cpu_caps pcc;
+
+                       kptr++;
+                       perfmon_get_cpu_caps(&pcc);
+
+                       pc->resp_size = 6 * sizeof(uint32_t);
+                       pc->resp = kmalloc(pc->resp_size, MEM_WAIT);
+
+                       rptr = put_le_u32(pc->resp, pcc.perfmon_version);
+                       rptr = put_le_u32(rptr, pcc.proc_arch_events);
+                       rptr = put_le_u32(rptr, pcc.bits_x_counter);
+                       rptr = put_le_u32(rptr, pcc.counters_x_proc);
+                       rptr = put_le_u32(rptr, pcc.bits_x_fix_counter);
+                       rptr = put_le_u32(rptr, pcc.fix_counters_x_proc);
+                       break;
+               }
+               default:
+                       error(EINVAL, "Invalid perfmon command: 0x%x", kptr[-1]);
+       }
+       poperror();
+       kfree(kdata);
+
+       return (long) (kptr - (const uint8_t *) kdata);
+}
+
 static struct chan *archopen(struct chan *c, int omode)
 {
-       return devopen(c, omode, archdir, Qmax, devgen);
+       c = devopen(c, omode, archdir, Qmax, devgen);
+       switch ((uint32_t) c->qid.path) {
+               case Qperf:
+                       if (!perfmon_supported())
+                               error(ENODEV, "perf is not supported");
+                       assert(!c->aux);
+                       c->aux = arch_create_perf_context();
+                       break;
+       }
+
+       return c;
 }
 
-static void archclose(struct chan *unused)
+static void archclose(struct chan *c)
 {
+       switch ((uint32_t) c->qid.path) {
+               case Qperf:
+                       if (c->aux) {
+                               arch_free_perf_context((struct perf_context *) c->aux);
+                               c->aux = NULL;
+                       }
+                       break;
+       }
 }
 
 static long archread(struct chan *c, void *a, long n, int64_t offset)
@@ -339,7 +524,7 @@ static long archread(struct chan *c, void *a, long n, int64_t offset)
                        return n;
                case Qiow:
                        if (n & 1)
-                               error(EINVAL, NULL);
+                               error(EINVAL, ERROR_FIXME);
                        checkport(offset, offset + n);
                        sp = a;
                        for (port = offset; port < offset + n; port += 2)
@@ -347,7 +532,7 @@ static long archread(struct chan *c, void *a, long n, int64_t offset)
                        return n;
                case Qiol:
                        if (n & 3)
-                               error(EINVAL, NULL);
+                               error(EINVAL, ERROR_FIXME);
                        checkport(offset, offset + n);
                        lp = a;
                        for (port = offset; port < offset + n; port += 4)
@@ -359,39 +544,61 @@ static long archread(struct chan *c, void *a, long n, int64_t offset)
                        return readmem(offset, a, n, KADDR(0), REAL_MEM_SIZE);
                case Qmsr:
                        if (!address_range_find(msr_rd_wlist, ARRAY_SIZE(msr_rd_wlist),
-                                                                       (uintptr_t) offset))
-                               error(EPERM, NULL);
+                                               (uintptr_t) offset))
+                               error(EPERM, "MSR 0x%x not in read whitelist", offset);
                        core_set_init(&cset);
                        core_set_fill_available(&cset);
-                       msr_init_address(&msra);
                        msr_set_address(&msra, (uint32_t) offset);
-                       msr_init_value(&msrv);
-                       values = kzmalloc(num_cores * sizeof(uint64_t), KMALLOC_WAIT);
+                       values = kzmalloc(num_cores * sizeof(uint64_t),
+                                         MEM_WAIT);
                        if (!values)
-                               error(ENOMEM, NULL);
+                               error(ENOMEM, ERROR_FIXME);
                        msr_set_values(&msrv, values, num_cores);
 
                        err = msr_cores_read(&cset, &msra, &msrv);
 
-                       n = -1;
                        if (likely(!err)) {
                                if (n >= num_cores * sizeof(uint64_t)) {
                                        if (!memcpy_to_user_errno(current, a, values,
-                                                                                         num_cores * sizeof(uint64_t)))
+                                                                 num_cores * sizeof(uint64_t)))
                                                n = num_cores * sizeof(uint64_t);
+                                       else
+                                               n = -1;
                                } else {
                                        kfree(values);
-                                       error(ERANGE, NULL);
+                                       error(ERANGE, "Not enough space for MSR read");
                                }
+                       } else {
+                               switch (-err) {
+                               case (EFAULT):
+                                       error(-err, "read_msr() faulted on MSR 0x%x", offset);
+                               case (ERANGE):
+                                       error(-err, "Not enough space for MSR read");
+                               };
+                               error(-err, "MSR read failed");
                        }
                        kfree(values);
                        return n;
+               case Qperf: {
+                       struct perf_context *pc = (struct perf_context *) c->aux;
+
+                       assert(pc);
+                       if (pc->resp && ((size_t) offset < pc->resp_size)) {
+                               n = MIN(n, (long) pc->resp_size - (long) offset);
+                               if (memcpy_to_user_errno(current, a, pc->resp + offset, n))
+                                       n = -1;
+                       } else {
+                               n = 0;
+                       }
+
+                       return n;
+               }
                default:
-                       error(EINVAL, NULL);
+                       error(EINVAL, ERROR_FIXME);
        }
 
        if ((buf = kzmalloc(n, 0)) == NULL)
-               error(ENOMEM, NULL);
+               error(ENOMEM, ERROR_FIXME);
        p = buf;
        n = n / Linelen;
        offset = offset / Linelen;
@@ -403,7 +610,7 @@ static long archread(struct chan *c, void *a, long n, int64_t offset)
                                if (offset-- > 0)
                                        continue;
                                snprintf(p, n * Linelen, "%#8p %#8p %-12.12s\n", map->start,
-                                                map->end - 1, map->tag);
+                                        map->end - 1, map->tag);
                                p += Linelen;
                                n--;
                        }
@@ -449,7 +656,7 @@ static long archwrite(struct chan *c, void *a, long n, int64_t offset)
                        return n;
                case Qiow:
                        if (n & 1)
-                               error(EINVAL, NULL);
+                               error(EINVAL, ERROR_FIXME);
                        checkport(offset, offset + n);
                        sp = a;
                        for (port = offset; port < offset + n; port += 2)
@@ -457,7 +664,7 @@ static long archwrite(struct chan *c, void *a, long n, int64_t offset)
                        return n;
                case Qiol:
                        if (n & 3)
-                               error(EINVAL, NULL);
+                               error(EINVAL, ERROR_FIXME);
                        checkport(offset, offset + n);
                        lp = a;
                        for (port = offset; port < offset + n; port += 4)
@@ -465,35 +672,57 @@ static long archwrite(struct chan *c, void *a, long n, int64_t offset)
                        return n;
                case Qmsr:
                        if (!address_range_find(msr_wr_wlist, ARRAY_SIZE(msr_wr_wlist),
-                                                                       (uintptr_t) offset))
-                               error(EPERM, NULL);
+                                               (uintptr_t) offset))
+                               error(EPERM, "MSR 0x%x not in write whitelist", offset);
                        if (n != sizeof(uint64_t))
-                               error(EINVAL, NULL);
+                               error(EINVAL, "Tried to write more than a u64 (%p)", n);
                        if (memcpy_from_user_errno(current, &value, a, sizeof(value)))
                                return -1;
 
                        core_set_init(&cset);
                        core_set_fill_available(&cset);
-                       msr_init_address(&msra);
                        msr_set_address(&msra, (uint32_t) offset);
-                       msr_init_value(&msrv);
                        msr_set_value(&msrv, value);
 
                        err = msr_cores_write(&cset, &msra, &msrv);
-                       if (unlikely(err))
-                               error(-err, NULL);
+                       if (unlikely(err)) {
+                               switch (-err) {
+                               case (EFAULT):
+                                       error(-err, "write_msr() faulted on MSR 0x%x", offset);
+                               case (ERANGE):
+                                       error(-err, "Not enough space for MSR write");
+                               };
+                               error(-err, "MSR write failed");
+                       }
                        return sizeof(uint64_t);
+               case Qperf: {
+                       struct perf_context *pc = (struct perf_context *) c->aux;
+
+                       assert(pc);
+
+                       return arch_perf_write(pc, a, n);
+               }
                default:
-                       error(EINVAL, NULL);
+                       error(EINVAL, ERROR_FIXME);
        }
        return 0;
 }
 
+static void archinit(void)
+{
+       int ret;
+
+       ret = address_range_init(msr_rd_wlist, ARRAY_SIZE(msr_rd_wlist));
+       assert(!ret);
+       ret = address_range_init(msr_wr_wlist, ARRAY_SIZE(msr_wr_wlist));
+       assert(!ret);
+}
+
 struct dev archdevtab __devtab = {
        .name = "arch",
 
        .reset = devreset,
-       .init = devinit,
+       .init = archinit,
        .shutdown = devshutdown,
        .attach = archattach,
        .walk = archwalk,