Remove the BUILD_INFO_FILE variable
[akaros.git] / kern / arch / x86 / devarch.c
index 777133c..2a45f6e 100644 (file)
@@ -1,4 +1,4 @@
-/* 
+/*
  * This file is part of the UCB release of Plan 9. It is subject to the license
  * terms in the LICENSE file found in the top-level directory of this
  * distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
@@ -7,31 +7,40 @@
  * in the LICENSE file.
  */
 
+#include <ros/memops.h>
 #include <vfs.h>
-#include <kfs.h>
-#include <slab.h>
 #include <kmalloc.h>
 #include <kref.h>
+#include <kthread.h>
 #include <string.h>
 #include <stdio.h>
 #include <assert.h>
-#include <error.h>
-#include <cpio.h>
+#include <err.h>
 #include <pmap.h>
+#include <umem.h>
 #include <smp.h>
 #include <ip.h>
 #include <time.h>
+#include <bitops.h>
+#include <core_set.h>
+#include <address_range.h>
+#include <arch/ros/perfmon.h>
+#include <arch/topology.h>
+#include <arch/perfmon.h>
+#include <arch/ros/msr-index.h>
+#include <arch/msr.h>
+#include <arch/devarch.h>
+
+#define REAL_MEM_SIZE (1024 * 1024)
+
+struct perf_context {
+       struct perfmon_session *ps;
+       size_t resp_size;
+       uint8_t *resp;
+};
 
-struct dev archdevtab;
-
-static char *devname(void)
-{
-       return archdevtab.name;
-}
-
-typedef struct IOMap IOMap;
-struct IOMap {
-       IOMap *next;
+struct io_map {
+       struct io_map *next;
        int reserved;
        char tag[13];
        uint32_t start;
@@ -40,10 +49,9 @@ struct IOMap {
 
 static struct {
        spinlock_t lock;
-       IOMap *map;
-       IOMap *free;
-       IOMap maps[32];                         // some initial free maps
-
+       struct io_map *map;
+       struct io_map *free;
+       struct io_map maps[32];                         // some initial free maps
        qlock_t ql;                                     // lock for reading map
 } iomap;
 
@@ -54,18 +62,18 @@ enum {
        Qiow,
        Qiol,
        Qgdb,
-       Qbase,
-       Qmapram,
        Qrealmem,
+       Qmsr,
+       Qperf,
 
-       Qmax = 16,
+       Qmax,
 };
 
-typedef long Rdwrfn(struct chan *, void *, long, int64_t);
-
-static Rdwrfn *readfn[Qmax];
-static Rdwrfn *writefn[Qmax];
+enum {
+       Linelen = 31,
+};
 
+struct dev archdevtab;
 static struct dirtab archdir[Qmax] = {
        {".", {Qdir, 0, QTDIR}, 0, 0555},
        {"ioalloc", {Qioalloc, 0}, 0, 0444},
@@ -73,135 +81,29 @@ static struct dirtab archdir[Qmax] = {
        {"iow", {Qiow, 0}, 0, 0666},
        {"iol", {Qiol, 0}, 0, 0666},
        {"gdb", {Qgdb, 0}, 0, 0660},
-       {"mapram", {Qmapram, 0}, 0, 0444},
-       {"realmodemem", {Qrealmem, 0}, 0, 0660},
+       {"realmem", {Qrealmem, 0}, 0, 0444},
+       {"msr", {Qmsr, 0}, 0, 0666},
+       {"perf", {Qperf, 0}, 0, 0666},
 };
-
-spinlock_t archwlock;                  /* the lock is only for changing archdir */
-int narchdir = Qbase;
-int gdbactive = 0;
-
-/* If we use these, put this in a header */
-int ioalloc(int port, int size, int align, char *tag);
-
-/*
- * Add a file to the #P listing.  Once added, you can't delete it.
- * You can't add a file with the same name as one already there,
- * and you get a pointer to the Dirtab entry so you can do things
- * like change the Qid version.  Changing the Qid path is disallowed.
+/* White list entries needs to be ordered by start address, and never overlap.
  */
-struct dirtab *addarchfile(char *name, int perm, Rdwrfn * rdfn, Rdwrfn * wrfn)
-{
-       int i;
-       struct dirtab d;
-       struct dirtab *dp;
-
-       memset(&d, 0, sizeof d);
-       strncpy(d.name, name, sizeof(d.name));
-       d.perm = perm;
-
-       spin_lock(&archwlock);
-       if (narchdir >= Qmax) {
-               spin_unlock(&archwlock);
-               return NULL;
-       }
-
-       for (i = 0; i < narchdir; i++)
-               if (strcmp(archdir[i].name, name) == 0) {
-                       spin_unlock(&archwlock);
-                       return NULL;
-               }
-
-       d.qid.path = narchdir;
-       archdir[narchdir] = d;
-       readfn[narchdir] = rdfn;
-       writefn[narchdir] = wrfn;
-       dp = &archdir[narchdir++];
-       spin_unlock(&archwlock);
-
-       return dp;
-}
-
-void ioinit(void)
-{
-       int i;
-       char *excluded = "";
-
-       panic("Akaros doesn't do IO port allocation yet.  Don't init.");
-       for (i = 0; i < ARRAY_SIZE(iomap.maps) - 1; i++)
-               iomap.maps[i].next = &iomap.maps[i + 1];
-       iomap.maps[i].next = NULL;
-       iomap.free = iomap.maps;
-       char *s;
-
-       s = excluded;
-       while (s && *s != '\0' && *s != '\n') {
-               char *ends;
-               int io_s, io_e;
+#define MSR_MAX_VAR_COUNTERS 16
+#define MSR_MAX_FIX_COUNTERS 4
 
-               io_s = (int)strtol(s, &ends, 0);
-               if (ends == NULL || ends == s || *ends != '-') {
-                       printd("ioinit: cannot parse option string\n");
-                       break;
-               }
-               s = ++ends;
-
-               io_e = (int)strtol(s, &ends, 0);
-               if (ends && *ends == ',')
-                       *ends++ = '\0';
-               s = ends;
-
-               ioalloc(io_s, io_e - io_s + 1, 0, "pre-allocated");
-       }
-}
-
-// Reserve a range to be ioalloced later.
-// This is in particular useful for exchangable cards, such
-// as pcmcia and cardbus cards.
-int ioreserve(int unused_int, int size, int align, char *tag)
-{
-       IOMap *map, **l;
-       int i, port;
-
-       spin_lock(&(&iomap)->lock);
-       // find a free port above 0x400 and below 0x1000
-       port = 0x400;
-       for (l = &iomap.map; *l; l = &(*l)->next) {
-               map = *l;
-               if (map->start < 0x400)
-                       continue;
-               i = map->start - port;
-               if (i > size)
-                       break;
-               if (align > 0)
-                       port = ((port + align - 1) / align) * align;
-               else
-                       port = map->end;
-       }
-       if (*l == NULL) {
-               spin_unlock(&(&iomap)->lock);
-               return -1;
-       }
-       map = iomap.free;
-       if (map == NULL) {
-               printd("ioalloc: out of maps");
-               spin_unlock(&(&iomap)->lock);
-               return port;
-       }
-       iomap.free = map->next;
-       map->next = *l;
-       map->start = port;
-       map->end = port + size;
-       map->reserved = 1;
-       strncpy(map->tag, tag, sizeof(map->tag));
-       map->tag[sizeof(map->tag) - 1] = 0;
-       *l = map;
-
-       archdir[0].qid.vers++;
-
-       spin_unlock(&(&iomap)->lock);
-       return map->start;
-}
+static struct address_range msr_rd_wlist[] = {
+       ADDRESS_RANGE(0x00000000, 0xffffffff),
+};
+static struct address_range msr_wr_wlist[] = {
+       ADDRESS_RANGE(MSR_IA32_PERFCTR0,
+                                 MSR_IA32_PERFCTR0 + MSR_MAX_VAR_COUNTERS - 1),
+       ADDRESS_RANGE(MSR_ARCH_PERFMON_EVENTSEL0,
+                                 MSR_ARCH_PERFMON_EVENTSEL0 + MSR_MAX_VAR_COUNTERS - 1),
+       ADDRESS_RANGE(MSR_IA32_PERF_CTL, MSR_IA32_PERF_CTL),
+       ADDRESS_RANGE(MSR_CORE_PERF_FIXED_CTR0,
+                                 MSR_CORE_PERF_FIXED_CTR0 + MSR_MAX_FIX_COUNTERS - 1),
+       ADDRESS_RANGE(MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL),
+};
+int gdbactive = 0;
 
 //
 //  alloc some io port space and remember who it was
@@ -209,7 +111,7 @@ int ioreserve(int unused_int, int size, int align, char *tag)
 //
 int ioalloc(int port, int size, int align, char *tag)
 {
-       IOMap *map, **l;
+       struct io_map *map, **l;
        int i;
 
        spin_lock(&(&iomap)->lock);
@@ -264,8 +166,7 @@ int ioalloc(int port, int size, int align, char *tag)
        map->next = *l;
        map->start = port;
        map->end = port + size;
-       strncpy(map->tag, tag, sizeof(map->tag));
-       map->tag[sizeof(map->tag) - 1] = 0;
+       strlcpy(map->tag, tag, sizeof(map->tag));
        *l = map;
 
        archdir[0].qid.vers++;
@@ -276,7 +177,7 @@ int ioalloc(int port, int size, int align, char *tag)
 
 void iofree(int port)
 {
-       IOMap *map, **l;
+       struct io_map *map, **l;
 
        spin_lock(&(&iomap)->lock);
        for (l = &iomap.map; *l; l = &(*l)->next) {
@@ -296,16 +197,96 @@ void iofree(int port)
 
 int iounused(int start, int end)
 {
-       IOMap *map;
+       struct io_map *map;
 
        for (map = iomap.map; map; map = map->next) {
-               if (((start >= map->start) && (start < map->end))
-                       || ((start <= map->start) && (end > map->start)))
+               if (((start >= map->start) && (start < map->end)) ||
+                   ((start <= map->start) && (end > map->start)))
                        return 0;
        }
        return 1;
 }
 
+void ioinit(void)
+{
+       int i;
+       char *excluded = "";
+
+       panic("Akaros doesn't do IO port allocation yet.  Don't init.");
+       for (i = 0; i < ARRAY_SIZE(iomap.maps) - 1; i++)
+               iomap.maps[i].next = &iomap.maps[i + 1];
+       iomap.maps[i].next = NULL;
+       iomap.free = iomap.maps;
+       char *s;
+
+       s = excluded;
+       while (s && *s != '\0' && *s != '\n') {
+               char *ends;
+               int io_s, io_e;
+
+               io_s = (int)strtol(s, &ends, 0);
+               if (ends == NULL || ends == s || *ends != '-') {
+                       printd("ioinit: cannot parse option string\n");
+                       break;
+               }
+               s = ++ends;
+
+               io_e = (int)strtol(s, &ends, 0);
+               if (ends && *ends == ',')
+                       *ends++ = '\0';
+               s = ends;
+
+               ioalloc(io_s, io_e - io_s + 1, 0, "pre-allocated");
+       }
+}
+
+// Reserve a range to be ioalloced later.
+// This is in particular useful for exchangable cards, such
+// as pcmcia and cardbus cards.
+int ioreserve(int unused_int, int size, int align, char *tag)
+{
+       struct io_map *map, **l;
+       int i, port;
+
+       spin_lock(&(&iomap)->lock);
+       // find a free port above 0x400 and below 0x1000
+       port = 0x400;
+       for (l = &iomap.map; *l; l = &(*l)->next) {
+               map = *l;
+               if (map->start < 0x400)
+                       continue;
+               i = map->start - port;
+               if (i > size)
+                       break;
+               if (align > 0)
+                       port = ((port + align - 1) / align) * align;
+               else
+                       port = map->end;
+       }
+       if (*l == NULL) {
+               spin_unlock(&(&iomap)->lock);
+               return -1;
+       }
+       map = iomap.free;
+       if (map == NULL) {
+               printd("ioalloc: out of maps");
+               spin_unlock(&(&iomap)->lock);
+               return port;
+       }
+       iomap.free = map->next;
+       map->next = *l;
+       map->start = port;
+       map->end = port + size;
+       map->reserved = 1;
+       strlcpy(map->tag, tag, sizeof(map->tag));
+       *l = map;
+
+       archdir[0].qid.vers++;
+
+       spin_unlock(&(&iomap)->lock);
+       return map->start;
+}
+
 static void checkport(int start, int end)
 {
        /* standard vga regs are OK */
@@ -316,52 +297,222 @@ static void checkport(int start, int end)
 
        if (iounused(start, end))
                return;
-       error(EPERM, NULL);
+       error(EPERM, ERROR_FIXME);
 }
 
 static struct chan *archattach(char *spec)
 {
-       return devattach(devname(), spec);
+       return devattach(archdevtab.name, spec);
 }
 
 struct walkqid *archwalk(struct chan *c, struct chan *nc, char **name,
                                                 int nname)
 {
-       return devwalk(c, nc, name, nname, archdir, narchdir, devgen);
+       return devwalk(c, nc, name, nname, archdir, Qmax, devgen);
 }
 
 static int archstat(struct chan *c, uint8_t * dp, int n)
 {
-       return devstat(c, dp, n, archdir, narchdir, devgen);
+       archdir[Qrealmem].length = REAL_MEM_SIZE;
+
+       return devstat(c, dp, n, archdir, Qmax, devgen);
 }
 
-static struct chan *archopen(struct chan *c, int omode)
+static struct perf_context *arch_create_perf_context(void)
+{
+       ERRSTACK(1);
+       struct perf_context *pc = kzmalloc(sizeof(struct perf_context),
+                                          MEM_WAIT);
+
+       if (waserror()) {
+               kfree(pc);
+               nexterror();
+       }
+       pc->ps = perfmon_create_session();
+       poperror();
+
+       return pc;
+}
+
+static void arch_free_perf_context(struct perf_context *pc)
 {
-       return devopen(c, omode, archdir, narchdir, devgen);
+       if (likely(pc)) {
+               perfmon_close_session(pc->ps);
+               kfree(pc->resp);
+               kfree(pc);
+       }
 }
 
-static void archclose(struct chan *unused)
+static const uint8_t *arch_read_core_set(struct core_set *cset,
+                                         const uint8_t *kptr,
+                                         const uint8_t *ktop)
 {
+       int i, nb;
+       uint32_t n;
+
+       error_assert(EBADMSG, (kptr + sizeof(uint32_t)) <= ktop);
+       kptr = get_le_u32(kptr, &n);
+       error_assert(EBADMSG, (kptr + n) <= ktop);
+       core_set_init(cset);
+       nb = MIN((int) n * 8, num_cores);
+       for (i = 0; i < nb; i++) {
+               if (test_bit(i, (const unsigned long *) kptr))
+                       core_set_setcpu(cset, i);
+       }
+
+       return kptr + n;
 }
 
-enum {
-       Linelen = 31,
-};
+static long arch_perf_write(struct perf_context *pc, const void *udata,
+                            long usize)
+{
+       ERRSTACK(1);
+       void *kdata;
+       const uint8_t *kptr, *ktop;
+
+       kfree(pc->resp);
+       pc->resp = NULL;
+       pc->resp_size = 0;
+
+       kdata = user_memdup_errno(current, udata, usize);
+       if (unlikely(!kdata))
+               return -1;
+       if (waserror()) {
+               kfree(kdata);
+               nexterror();
+       }
+       kptr = kdata;
+       ktop = kptr + usize;
+       error_assert(EBADMSG, (kptr + 1) <= ktop);
+       switch (*kptr++) {
+               case PERFMON_CMD_COUNTER_OPEN: {
+                       int ped;
+                       struct perfmon_event pev;
+                       struct core_set cset;
+
+                       error_assert(EBADMSG, (kptr + 3 * sizeof(uint64_t)) <= ktop);
+                       perfmon_init_event(&pev);
+                       kptr = get_le_u64(kptr, &pev.event);
+                       kptr = get_le_u64(kptr, &pev.flags);
+                       kptr = get_le_u64(kptr, &pev.trigger_count);
+                       kptr = arch_read_core_set(&cset, kptr, ktop);
+
+                       ped = perfmon_open_event(&cset, pc->ps, &pev);
+
+                       pc->resp_size = sizeof(uint32_t);
+                       pc->resp = kmalloc(pc->resp_size, MEM_WAIT);
+                       put_le_u32(pc->resp, (uint32_t) ped);
+                       break;
+               }
+               case PERFMON_CMD_COUNTER_STATUS: {
+                       int i;
+                       uint32_t ped;
+                       uint8_t *rptr;
+                       uint64_t *mvalues;
+                       struct perfmon_status *pef;
+
+                       error_assert(EBADMSG, (kptr + sizeof(uint32_t)) <= ktop);
+                       kptr = get_le_u32(kptr, &ped);
+
+                       pef = perfmon_get_event_status(pc->ps, (int) ped);
+
+                       mvalues = kzmalloc(num_cores * sizeof(mvalues),
+                                          MEM_WAIT);
+                       for (i = 0; i < num_cores; i++)
+                               mvalues[i] = pef->cores_values[i];
+
+                       pc->resp_size = 3 * sizeof(uint64_t) + sizeof(uint32_t) +
+                               num_cores * sizeof(uint64_t);
+                       pc->resp = kmalloc(pc->resp_size, MEM_WAIT);
+
+                       rptr = put_le_u64(pc->resp, pef->ev.event);
+                       rptr = put_le_u64(rptr, pef->ev.flags);
+                       rptr = put_le_u64(rptr, pef->ev.trigger_count);
+                       rptr = put_le_u32(rptr, num_cores);
+                       for (i = 0; i < num_cores; i++)
+                               rptr = put_le_u64(rptr, mvalues[i]);
+                       kfree(mvalues);
+                       perfmon_free_event_status(pef);
+                       break;
+               }
+               case PERFMON_CMD_COUNTER_CLOSE: {
+                       uint32_t ped;
+
+                       error_assert(EBADMSG, (kptr + sizeof(uint32_t)) <= ktop);
+                       kptr = get_le_u32(kptr, &ped);
+
+                       perfmon_close_event(pc->ps, (int) ped);
+                       break;
+               }
+               case PERFMON_CMD_CPU_CAPS: {
+                       uint8_t *rptr;
+                       struct perfmon_cpu_caps pcc;
+
+                       kptr++;
+                       perfmon_get_cpu_caps(&pcc);
+
+                       pc->resp_size = 6 * sizeof(uint32_t);
+                       pc->resp = kmalloc(pc->resp_size, MEM_WAIT);
+
+                       rptr = put_le_u32(pc->resp, pcc.perfmon_version);
+                       rptr = put_le_u32(rptr, pcc.proc_arch_events);
+                       rptr = put_le_u32(rptr, pcc.bits_x_counter);
+                       rptr = put_le_u32(rptr, pcc.counters_x_proc);
+                       rptr = put_le_u32(rptr, pcc.bits_x_fix_counter);
+                       rptr = put_le_u32(rptr, pcc.fix_counters_x_proc);
+                       break;
+               }
+               default:
+                       error(EINVAL, "Invalid perfmon command: 0x%x", kptr[-1]);
+       }
+       poperror();
+       kfree(kdata);
+
+       return (long) (kptr - (const uint8_t *) kdata);
+}
+
+static struct chan *archopen(struct chan *c, int omode)
+{
+       c = devopen(c, omode, archdir, Qmax, devgen);
+       switch ((uint32_t) c->qid.path) {
+               case Qperf:
+                       if (!perfmon_supported())
+                               error(ENODEV, "perf is not supported");
+                       assert(!c->aux);
+                       c->aux = arch_create_perf_context();
+                       break;
+       }
+
+       return c;
+}
+
+static void archclose(struct chan *c)
+{
+       switch ((uint32_t) c->qid.path) {
+               case Qperf:
+                       if (c->aux) {
+                               arch_free_perf_context((struct perf_context *) c->aux);
+                               c->aux = NULL;
+                       }
+                       break;
+       }
+}
 
 static long archread(struct chan *c, void *a, long n, int64_t offset)
 {
        char *buf, *p;
-       int port;
+       int err, port;
+       uint64_t *values;
        uint16_t *sp;
        uint32_t *lp;
-       IOMap *map;
-       Rdwrfn *fn;
+       struct io_map *map;
+       struct core_set cset;
+       struct msr_address msra;
+       struct msr_value msrv;
 
        switch ((uint32_t) c->qid.path) {
-
                case Qdir:
-                       return devdirread(c, a, n, archdir, narchdir, devgen);
-
+                       return devdirread(c, a, n, archdir, Qmax, devgen);
                case Qgdb:
                        p = gdbactive ? "1" : "0";
                        return readstr(offset, a, n, p);
@@ -371,37 +522,83 @@ static long archread(struct chan *c, void *a, long n, int64_t offset)
                        for (p = a; port < offset + n; port++)
                                *p++ = inb(port);
                        return n;
-
                case Qiow:
                        if (n & 1)
-                               error(EINVAL, NULL);
+                               error(EINVAL, ERROR_FIXME);
                        checkport(offset, offset + n);
                        sp = a;
                        for (port = offset; port < offset + n; port += 2)
                                *sp++ = inw(port);
                        return n;
-
                case Qiol:
                        if (n & 3)
-                               error(EINVAL, NULL);
+                               error(EINVAL, ERROR_FIXME);
                        checkport(offset, offset + n);
                        lp = a;
                        for (port = offset; port < offset + n; port += 4)
                                *lp++ = inl(port);
                        return n;
-
                case Qioalloc:
                        break;
+               case Qrealmem:
+                       return readmem(offset, a, n, KADDR(0), REAL_MEM_SIZE);
+               case Qmsr:
+                       if (!address_range_find(msr_rd_wlist, ARRAY_SIZE(msr_rd_wlist),
+                                               (uintptr_t) offset))
+                               error(EPERM, "MSR 0x%x not in read whitelist", offset);
+                       core_set_init(&cset);
+                       core_set_fill_available(&cset);
+                       msr_set_address(&msra, (uint32_t) offset);
+                       values = kzmalloc(num_cores * sizeof(uint64_t),
+                                         MEM_WAIT);
+                       if (!values)
+                               error(ENOMEM, ERROR_FIXME);
+                       msr_set_values(&msrv, values, num_cores);
+
+                       err = msr_cores_read(&cset, &msra, &msrv);
+
+                       if (likely(!err)) {
+                               if (n >= num_cores * sizeof(uint64_t)) {
+                                       if (!memcpy_to_user_errno(current, a, values,
+                                                                 num_cores * sizeof(uint64_t)))
+                                               n = num_cores * sizeof(uint64_t);
+                                       else
+                                               n = -1;
+                               } else {
+                                       kfree(values);
+                                       error(ERANGE, "Not enough space for MSR read");
+                               }
+                       } else {
+                               switch (-err) {
+                               case (EFAULT):
+                                       error(-err, "read_msr() faulted on MSR 0x%x", offset);
+                               case (ERANGE):
+                                       error(-err, "Not enough space for MSR read");
+                               };
+                               error(-err, "MSR read failed");
+                       }
+                       kfree(values);
+                       return n;
+               case Qperf: {
+                       struct perf_context *pc = (struct perf_context *) c->aux;
+
+                       assert(pc);
+                       if (pc->resp && ((size_t) offset < pc->resp_size)) {
+                               n = MIN(n, (long) pc->resp_size - (long) offset);
+                               if (memcpy_to_user_errno(current, a, pc->resp + offset, n))
+                                       n = -1;
+                       } else {
+                               n = 0;
+                       }
 
+                       return n;
+               }
                default:
-                       if (c->qid.path < narchdir && (fn = readfn[c->qid.path]))
-                               return fn(c, a, n, offset);
-                       error(EPERM, NULL);
-                       break;
+                       error(EINVAL, ERROR_FIXME);
        }
 
        if ((buf = kzmalloc(n, 0)) == NULL)
-               error(ENOMEM, NULL);
+               error(ENOMEM, ERROR_FIXME);
        p = buf;
        n = n / Linelen;
        offset = offset / Linelen;
@@ -413,15 +610,12 @@ static long archread(struct chan *c, void *a, long n, int64_t offset)
                                if (offset-- > 0)
                                        continue;
                                snprintf(p, n * Linelen, "%#8p %#8p %-12.12s\n", map->start,
-                                                map->end - 1, map->tag);
+                                        map->end - 1, map->tag);
                                p += Linelen;
                                n--;
                        }
                        spin_unlock(&(&iomap)->lock);
                        break;
-               case Qmapram:
-                       error(ENOSYS, NULL);
-                       break;
        }
 
        n = p - buf;
@@ -434,13 +628,15 @@ static long archread(struct chan *c, void *a, long n, int64_t offset)
 static long archwrite(struct chan *c, void *a, long n, int64_t offset)
 {
        char *p;
-       int port;
+       int port, err;
+       uint64_t value;
        uint16_t *sp;
        uint32_t *lp;
-       Rdwrfn *fn;
+       struct core_set cset;
+       struct msr_address msra;
+       struct msr_value msrv;
 
        switch ((uint32_t) c->qid.path) {
-
                case Qgdb:
                        p = a;
                        if (n != 1)
@@ -452,126 +648,95 @@ static long archwrite(struct chan *c, void *a, long n, int64_t offset)
                        else
                                error(EINVAL, "Gdb: must be 1 or 0");
                        return 1;
-
                case Qiob:
                        p = a;
                        checkport(offset, offset + n);
                        for (port = offset; port < offset + n; port++)
                                outb(port, *p++);
                        return n;
-
                case Qiow:
                        if (n & 1)
-                               error(EINVAL, NULL);
+                               error(EINVAL, ERROR_FIXME);
                        checkport(offset, offset + n);
                        sp = a;
                        for (port = offset; port < offset + n; port += 2)
                                outw(port, *sp++);
                        return n;
-
                case Qiol:
                        if (n & 3)
-                               error(EINVAL, NULL);
+                               error(EINVAL, ERROR_FIXME);
                        checkport(offset, offset + n);
                        lp = a;
                        for (port = offset; port < offset + n; port += 4)
                                outl(port, *lp++);
                        return n;
+               case Qmsr:
+                       if (!address_range_find(msr_wr_wlist, ARRAY_SIZE(msr_wr_wlist),
+                                               (uintptr_t) offset))
+                               error(EPERM, "MSR 0x%x not in write whitelist", offset);
+                       if (n != sizeof(uint64_t))
+                               error(EINVAL, "Tried to write more than a u64 (%p)", n);
+                       if (memcpy_from_user_errno(current, &value, a, sizeof(value)))
+                               return -1;
+
+                       core_set_init(&cset);
+                       core_set_fill_available(&cset);
+                       msr_set_address(&msra, (uint32_t) offset);
+                       msr_set_value(&msrv, value);
+
+                       err = msr_cores_write(&cset, &msra, &msrv);
+                       if (unlikely(err)) {
+                               switch (-err) {
+                               case (EFAULT):
+                                       error(-err, "write_msr() faulted on MSR 0x%x", offset);
+                               case (ERANGE):
+                                       error(-err, "Not enough space for MSR write");
+                               };
+                               error(-err, "MSR write failed");
+                       }
+                       return sizeof(uint64_t);
+               case Qperf: {
+                       struct perf_context *pc = (struct perf_context *) c->aux;
+
+                       assert(pc);
 
+                       return arch_perf_write(pc, a, n);
+               }
                default:
-                       if (c->qid.path < narchdir && (fn = writefn[c->qid.path]))
-                               return fn(c, a, n, offset);
-                       error(EPERM, NULL);
-                       break;
+                       error(EINVAL, ERROR_FIXME);
        }
        return 0;
 }
 
-struct dev archdevtab __devtab = {
-       "arch",
-
-       devreset,
-       devinit,
-       devshutdown,
-       archattach,
-       archwalk,
-       archstat,
-       archopen,
-       devcreate,
-       archclose,
-       archread,
-       devbread,
-       archwrite,
-       devbwrite,
-       devremove,
-       devwstat,
-};
-
-/*
- */
-void nop(void)
-{
-}
-
-static long cputyperead(struct chan *unused, void *a, long n, int64_t off)
+static void archinit(void)
 {
-       char buf[512], *s, *e;
-       int i, k;
-
-       error(ENOSYS, NULL);
-#if 0
-       e = buf + sizeof buf;
-       s = seprintf(buf, e, "%s %d\n", "AMD64", 0);
-       k = m->ncpuinfoe - m->ncpuinfos;
-       if (k > 4)
-               k = 4;
-       for (i = 0; i < k; i++)
-               s = seprintf(s, e, "%#8.8ux %#8.8ux %#8.8ux %#8.8ux\n",
-                                        m->cpuinfo[i][0], m->cpuinfo[i][1],
-                                        m->cpuinfo[i][2], m->cpuinfo[i][3]);
-       return readstr(off, a, n, buf);
-#endif
-}
+       int ret;
 
-static long rmemrw(int isr, void *a, long n, int64_t off)
-{
-       if (off < 0)
-               error(EINVAL, "offset must be >= 0");
-       if (n < 0)
-               error(EINVAL, "count must be >= 0");
-       if (isr) {
-               if (off >= MB)
-                       error(EINVAL, "offset must be < 1MB");
-               if (off + n >= MB)
-                       n = MB - off;
-               memmove(a, KADDR((uint32_t) off), n);
-       } else {
-               /* realmode buf page ok, allow vga framebuf's access */
-               if (off >= MB)
-                       error(EINVAL, "offset must be < 1MB");
-               if (off + n > MB && (off < 0xA0000 || off + n > 0xB0000 + 0x10000))
-                       error(EINVAL, "bad offset/count in write");
-               memmove(KADDR((uint32_t) off), a, n);
-       }
-       return n;
+       ret = address_range_init(msr_rd_wlist, ARRAY_SIZE(msr_rd_wlist));
+       assert(!ret);
+       ret = address_range_init(msr_wr_wlist, ARRAY_SIZE(msr_wr_wlist));
+       assert(!ret);
 }
 
-static long rmemread(struct chan *unused, void *a, long n, int64_t off)
-{
-       return rmemrw(1, a, n, off);
-}
-
-static long rmemwrite(struct chan *unused, void *a, long n, int64_t off)
-{
-       return rmemrw(0, a, n, off);
-}
-
-void archinit(void)
-{
-       spinlock_init(&archwlock);
-       addarchfile("cputype", 0444, cputyperead, NULL);
-       addarchfile("realmodemem", 0660, rmemread, rmemwrite);
-}
+struct dev archdevtab __devtab = {
+       .name = "arch",
+
+       .reset = devreset,
+       .init = archinit,
+       .shutdown = devshutdown,
+       .attach = archattach,
+       .walk = archwalk,
+       .stat = archstat,
+       .open = archopen,
+       .create = devcreate,
+       .close = archclose,
+       .read = archread,
+       .bread = devbread,
+       .write = archwrite,
+       .bwrite = devbwrite,
+       .remove = devremove,
+       .wstat = devwstat,
+};
 
 void archreset(void)
 {