Fix formatting: leading spaces to tabs, and fix continued-line alignment.
authorDan Cross <crossd@gmail.com>
Tue, 8 Mar 2016 15:37:27 +0000 (10:37 -0500)
committerBarret Rhoden <brho@cs.berkeley.edu>
Tue, 8 Mar 2016 19:58:19 +0000 (14:58 -0500)
Indent using tabs, not spaces.

In the event that a line must be broken due to length, the coding
standard says to break it so that we use tabs to advance the
continued line to the level of indentation of the broken line,
and then spaces to align to the opening parenthesis.

Signed-off-by: Dan Cross <crossd@gmail.com>
Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
kern/arch/riscv/uaccess.h
kern/arch/x86/devarch.c
kern/arch/x86/kdebug.c
kern/arch/x86/msr.c
kern/arch/x86/perfmon.c
kern/arch/x86/uaccess.h
kern/drivers/dev/kprof.c
kern/drivers/net/udrvr/compat.h
kern/include/percpu.h
kern/lib/circular_buffer.c
kern/src/profiler.c

index 4ed0bb7..8b91715 100644 (file)
@@ -67,7 +67,7 @@ static inline int __get_user(void *dst, const void *src, unsigned int count)
 }
 
 static inline int copy_from_user(void *dst, const void *src,
-                                                                unsigned int count)
+                                 unsigned int count)
 {
 #warning "The copy_from_user() API is a stub and should be re-implemented"
 
index 9d878f3..a018210 100644 (file)
@@ -200,8 +200,8 @@ int iounused(int start, int end)
        struct io_map *map;
 
        for (map = iomap.map; map; map = map->next) {
-               if (((start >= map->start) && (start < map->end))
-                       || ((start <= map->start) && (end > map->start)))
+               if (((start >= map->start) && (start < map->end)) ||
+                   ((start <= map->start) && (end > map->start)))
                        return 0;
        }
        return 1;
@@ -322,7 +322,7 @@ static struct perf_context *arch_create_perf_context(void)
 {
        ERRSTACK(1);
        struct perf_context *pc = kzmalloc(sizeof(struct perf_context),
-                                                                          KMALLOC_WAIT);
+                                          KMALLOC_WAIT);
 
        if (waserror()) {
                kfree(pc);
@@ -344,8 +344,8 @@ static void arch_free_perf_context(struct perf_context *pc)
 }
 
 static const uint8_t *arch_read_core_set(struct core_set *cset,
-                                                                                const uint8_t *kptr,
-                                                                                const uint8_t *ktop)
+                                         const uint8_t *kptr,
+                                         const uint8_t *ktop)
 {
        int i, nb;
        uint32_t n;
@@ -364,7 +364,7 @@ static const uint8_t *arch_read_core_set(struct core_set *cset,
 }
 
 static long arch_perf_write(struct perf_context *pc, const void *udata,
-                                                       long usize)
+                            long usize)
 {
        ERRSTACK(1);
        void *kdata;
@@ -543,7 +543,7 @@ static long archread(struct chan *c, void *a, long n, int64_t offset)
                        return readmem(offset, a, n, KADDR(0), REAL_MEM_SIZE);
                case Qmsr:
                        if (!address_range_find(msr_rd_wlist, ARRAY_SIZE(msr_rd_wlist),
-                                                                       (uintptr_t) offset))
+                                               (uintptr_t) offset))
                                error(EPERM, "MSR 0x%x not in read whitelist", offset);
                        core_set_init(&cset);
                        core_set_fill_available(&cset);
@@ -558,7 +558,7 @@ static long archread(struct chan *c, void *a, long n, int64_t offset)
                        if (likely(!err)) {
                                if (n >= num_cores * sizeof(uint64_t)) {
                                        if (!memcpy_to_user_errno(current, a, values,
-                                                                                         num_cores * sizeof(uint64_t)))
+                                                                 num_cores * sizeof(uint64_t)))
                                                n = num_cores * sizeof(uint64_t);
                                        else
                                                n = -1;
@@ -608,7 +608,7 @@ static long archread(struct chan *c, void *a, long n, int64_t offset)
                                if (offset-- > 0)
                                        continue;
                                snprintf(p, n * Linelen, "%#8p %#8p %-12.12s\n", map->start,
-                                                map->end - 1, map->tag);
+                                        map->end - 1, map->tag);
                                p += Linelen;
                                n--;
                        }
@@ -670,7 +670,7 @@ static long archwrite(struct chan *c, void *a, long n, int64_t offset)
                        return n;
                case Qmsr:
                        if (!address_range_find(msr_wr_wlist, ARRAY_SIZE(msr_wr_wlist),
-                                                                       (uintptr_t) offset))
+                                               (uintptr_t) offset))
                                error(EPERM, "MSR 0x%x not in write whitelist", offset);
                        if (n != sizeof(uint64_t))
                                error(EINVAL, "Tried to write more than a u64 (%p)", n);
index 5b7f3b9..2d3ba0d 100644 (file)
@@ -92,8 +92,8 @@ static bool stab_table_valid(const char *stabstr, const char *stabstr_end)
 //
 static void
 stab_binsearch(const stab_t *stabs,
-           const stab_t *stab_end,
-           int *region_left, int *region_right,
+               const stab_t *stab_end,
+               int *region_left, int *region_right,
               int type, uintptr_t addr)
 {
        int l = *region_left, r = *region_right, any_matches = 0;
index 33216a4..7db2262 100644 (file)
@@ -50,7 +50,7 @@ static void msr_smp_read(void *opaque)
 }
 
 int msr_cores_read(const struct core_set *cset, const struct msr_address *msra,
-                                  struct msr_value *msrv)
+                   struct msr_value *msrv)
 {
        int err;
        struct smp_read_values srv;
@@ -99,7 +99,7 @@ static void msr_smp_write(void *opaque)
 }
 
 int msr_cores_write(const struct core_set *cset, const struct msr_address *msra,
-                                       const struct msr_value *msrv)
+                    const struct msr_value *msrv)
 {
        struct smp_write_values swv;
 
index eaa984a..6b07eef 100644 (file)
@@ -80,10 +80,10 @@ static void perfmon_enable_fix_event(int event, bool enable)
 
        if (enable)
                write_msr(MSR_CORE_PERF_GLOBAL_CTRL,
-                                 gctrl | ((uint64_t) 1 << (32 + event)));
+                         gctrl | ((uint64_t) 1 << (32 + event)));
        else
                write_msr(MSR_CORE_PERF_GLOBAL_CTRL,
-                                 gctrl & ~((uint64_t) 1 << (32 + event)));
+                         gctrl & ~((uint64_t) 1 << (32 + event)));
 }
 
 static bool perfmon_event_available(uint32_t event)
@@ -92,7 +92,7 @@ static bool perfmon_event_available(uint32_t event)
 }
 
 static uint64_t perfmon_get_fixevent_mask(const struct perfmon_event *pev,
-                                                                                 int eventno, uint64_t base)
+                                          int eventno, uint64_t base)
 {
        uint64_t m = 0;
 
@@ -133,7 +133,7 @@ static void perfmon_do_cores_alloc(void *opaque)
                        perfmon_enable_fix_event(i, TRUE);
 
                        write_msr(MSR_CORE_PERF_FIXED_CTR0 + i,
-                                         -(int64_t) pa->ev.trigger_count);
+                                 -(int64_t) pa->ev.trigger_count);
                        write_msr(MSR_CORE_PERF_FIXED_CTR_CTRL, tmp);
                }
        } else {
@@ -153,7 +153,7 @@ static void perfmon_do_cores_alloc(void *opaque)
 
                        write_msr(MSR_IA32_PERFCTR0 + i, -(int64_t) pa->ev.trigger_count);
                        write_msr(MSR_ARCH_PERFMON_EVENTSEL0 + i,
-                                         cctx->counters[i].event);
+                                 cctx->counters[i].event);
                } else {
                        i = -ENOSPC;
                }
@@ -184,7 +184,7 @@ static void perfmon_do_cores_free(void *opaque)
                        perfmon_enable_fix_event((int) ccno, FALSE);
 
                        write_msr(MSR_CORE_PERF_FIXED_CTR_CTRL,
-                                         fxctrl_value & ~(FIXCNTR_MASK << ccbitsh));
+                                 fxctrl_value & ~(FIXCNTR_MASK << ccbitsh));
                        write_msr(MSR_CORE_PERF_FIXED_CTR0 + ccno, 0);
                }
        } else {
@@ -214,15 +214,15 @@ static void perfmon_do_cores_status(void *opaque)
        spin_lock_irqsave(&cctx->lock);
        if (perfmon_is_fixed_event(&env->pa->ev))
                env->pef->cores_values[coreno] =
-                       read_msr(MSR_CORE_PERF_FIXED_CTR0 + ccno);
+                   read_msr(MSR_CORE_PERF_FIXED_CTR0 + ccno);
        else
                env->pef->cores_values[coreno] =
-                       read_msr(MSR_IA32_PERFCTR0 + ccno);
+                   read_msr(MSR_IA32_PERFCTR0 + ccno);
        spin_unlock_irqsave(&cctx->lock);
 }
 
 static void perfmon_setup_alloc_core_set(const struct perfmon_alloc *pa,
-                                                                                struct core_set *cset)
+                                         struct core_set *cset)
 {
        int i;
 
@@ -265,8 +265,8 @@ static struct perfmon_alloc *perfmon_create_alloc(const struct perfmon_event *pe
 {
        int i;
        struct perfmon_alloc *pa = kzmalloc(sizeof(struct perfmon_alloc) +
-                                                                               num_cores * sizeof(counter_t),
-                                                                               KMALLOC_WAIT);
+                                               num_cores * sizeof(counter_t),
+                                           KMALLOC_WAIT);
 
        kref_init(&pa->ref, perfmon_release_alloc, 1);
        pa->ev = *pev;
@@ -279,8 +279,8 @@ static struct perfmon_alloc *perfmon_create_alloc(const struct perfmon_event *pe
 static struct perfmon_status *perfmon_alloc_status(void)
 {
        struct perfmon_status *pef = kzmalloc(sizeof(struct perfmon_status) +
-                                                                                 num_cores * sizeof(uint64_t),
-                                                                                 KMALLOC_WAIT);
+                                                 num_cores * sizeof(uint64_t),
+                                             KMALLOC_WAIT);
 
        return pef;
 }
@@ -352,9 +352,9 @@ void perfmon_interrupt(struct hw_trapframe *hw_tf, void *data)
                if (status & ((uint64_t) 1 << i)) {
                        if (cctx->counters[i].event) {
                                profiler_add_hw_sample(
-                                       hw_tf, perfmon_make_sample_event(cctx->counters + i));
+                                   hw_tf, perfmon_make_sample_event(cctx->counters + i));
                                write_msr(MSR_IA32_PERFCTR0 + i,
-                                                 -(int64_t) cctx->counters[i].trigger_count);
+                                         -(int64_t) cctx->counters[i].trigger_count);
                        }
                }
        }
@@ -362,9 +362,9 @@ void perfmon_interrupt(struct hw_trapframe *hw_tf, void *data)
                if (status & ((uint64_t) 1 << (32 + i))) {
                        if (cctx->fixed_counters[i].event) {
                                profiler_add_hw_sample(
-                                       hw_tf, perfmon_make_sample_event(cctx->fixed_counters + i));
+                                   hw_tf, perfmon_make_sample_event(cctx->fixed_counters + i));
                                write_msr(MSR_CORE_PERF_FIXED_CTR0 + i,
-                                                 -(int64_t) cctx->fixed_counters[i].trigger_count);
+                                         -(int64_t) cctx->fixed_counters[i].trigger_count);
                        }
                }
        }
@@ -385,7 +385,7 @@ void perfmon_get_cpu_caps(struct perfmon_cpu_caps *pcc)
 }
 
 static int perfmon_install_session_alloc(struct perfmon_session *ps,
-                                                                                struct perfmon_alloc *pa)
+                                         struct perfmon_alloc *pa)
 {
        int i;
 
@@ -404,7 +404,7 @@ static int perfmon_install_session_alloc(struct perfmon_session *ps,
 }
 
 int perfmon_open_event(const struct core_set *cset, struct perfmon_session *ps,
-                                          const struct perfmon_event *pev)
+                       const struct perfmon_event *pev)
 {
        ERRSTACK(1);
        int i;
@@ -439,7 +439,7 @@ int perfmon_open_event(const struct core_set *cset, struct perfmon_session *ps,
 }
 
 static void perfmon_alloc_get(struct perfmon_session *ps, int ped, bool reset,
-                                                         struct perfmon_alloc **ppa)
+                              struct perfmon_alloc **ppa)
 {
        struct perfmon_alloc *pa;
 
@@ -468,7 +468,7 @@ void perfmon_close_event(struct perfmon_session *ps, int ped)
 }
 
 struct perfmon_status *perfmon_get_event_status(struct perfmon_session *ps,
-                                                                                               int ped)
+                                                int ped)
 {
        struct core_set cset;
        struct perfmon_status_env env;
@@ -491,11 +491,10 @@ void perfmon_free_event_status(struct perfmon_status *pef)
 
 static void perfmon_release_session(struct kref *kref)
 {
-       struct perfmon_session *ps = container_of(kref, struct perfmon_session,
-                                                                                         ref);
-       int i;
+       struct perfmon_session *ps =
+           container_of(kref, struct perfmon_session, ref);
 
-       for (i = 0; i < ARRAY_SIZE(ps->allocs); i++) {
+       for (int i = 0; i < ARRAY_SIZE(ps->allocs); i++) {
                struct perfmon_alloc *pa = ps->allocs[i];
 
                if (pa)
@@ -507,7 +506,7 @@ static void perfmon_release_session(struct kref *kref)
 struct perfmon_session *perfmon_create_session(void)
 {
        struct perfmon_session *ps = kzmalloc(sizeof(struct perfmon_session),
-                                                                                 KMALLOC_WAIT);
+                                             KMALLOC_WAIT);
 
        kref_init(&ps->ref, perfmon_release_session, 1);
        spinlock_init(&ps->lock);
index 80551e4..438d867 100644 (file)
@@ -27,16 +27,16 @@ struct extable_ip_fixup {
 
 #define __read_msr_asm(eax, edx, addr, err, errret)                                            \
        asm volatile(ASM_STAC "\n"                                                                                      \
-                                "1:            rdmsr\n"                                                                        \
-                                "                      mfence\n"                                                                       \
-                                "2: " ASM_CLAC "\n"                                                                    \
-                                ".section .fixup,\"ax\"\n"                                                             \
-                                "3:            mov %4,%0\n"                                                            \
-                                "      jmp 2b\n"                                                                                       \
-                                ".previous\n"                                                                                  \
-                                _ASM_EXTABLE(1b, 3b)                                                                   \
-                                : "=r" (err), "=d" (edx), "=a" (eax)                                   \
-                                : "c" (addr), "i" (errret), "0" (err))
+                    "1:                rdmsr\n"                                                                        \
+                    "                  mfence\n"                                                                       \
+                    "2: " ASM_CLAC "\n"                                                                        \
+                    ".section .fixup,\"ax\"\n"                                                         \
+                    "3:                mov %4,%0\n"                                                            \
+                    "  jmp 2b\n"                                                                                       \
+                    ".previous\n"                                                                                      \
+                    _ASM_EXTABLE(1b, 3b)                                                                       \
+                    : "=r" (err), "=d" (edx), "=a" (eax)                                       \
+                    : "c" (addr), "i" (errret), "0" (err))
 
 #define __write_msr_asm(val, addr, err, errret)                                                        \
        asm volatile(ASM_STAC "\n"                                                                                      \
@@ -98,19 +98,19 @@ static inline int __put_user(void *dst, const void *src, unsigned int count)
        switch (count) {
        case 1:
                __put_user_asm(*(const uint8_t *) src, (uint8_t *) dst, err, "b",
-                                          "b", "iq", -EFAULT);
+                              "b", "iq", -EFAULT);
                break;
        case 2:
                __put_user_asm(*(const uint16_t *) src, (uint16_t *) dst, err, "w",
-                                          "w", "ir", -EFAULT);
+                              "w", "ir", -EFAULT);
                break;
        case 4:
                __put_user_asm(*(const uint32_t *) src, (uint32_t *) dst, err, "l",
-                                          "k", "ir", -EFAULT);
+                              "k", "ir", -EFAULT);
                break;
        case 8:
                __put_user_asm(*(const uint64_t *) src, (uint64_t *) dst, err, "q",
-                                          "", "er", -EFAULT);
+                              "", "er", -EFAULT);
                break;
        default:
                __user_memcpy(dst, src, count, err, -EFAULT);
@@ -141,19 +141,19 @@ static inline int __get_user(void *dst, const void *src, unsigned int count)
        switch (count) {
        case 1:
                __get_user_asm(*(uint8_t *) dst, (const uint8_t *) src, err, "b",
-                                          "b", "=q", -EFAULT);
+                              "b", "=q", -EFAULT);
                break;
        case 2:
                __get_user_asm(*(uint16_t *) dst, (const uint16_t *) src, err, "w",
-                                          "w", "=r", -EFAULT);
+                              "w", "=r", -EFAULT);
                break;
        case 4:
                __get_user_asm(*(uint32_t *) dst, (const uint32_t *) src, err, "l",
-                                          "k", "=r", -EFAULT);
+                              "k", "=r", -EFAULT);
                break;
        case 8:
                __get_user_asm(*(uint64_t *) dst, (const uint64_t *) src, err, "q",
-                                          "", "=r", -EFAULT);
+                              "", "=r", -EFAULT);
                break;
        default:
                __user_memcpy(dst, src, count, err, -EFAULT);
@@ -163,7 +163,7 @@ static inline int __get_user(void *dst, const void *src, unsigned int count)
 }
 
 static inline int copy_from_user(void *dst, const void *src,
-                                                                unsigned int count)
+                                 unsigned int count)
 {
        int err = 0;
 
index 71889ec..51c215b 100644 (file)
@@ -171,7 +171,7 @@ static void kprof_fetch_profiler_data(void)
        kprof.pdata = ndata;
        while (kprof.psize < psize) {
                size_t csize = profiler_read(kprof.pdata + kprof.psize,
-                                                                        psize - kprof.psize);
+                                            psize - kprof.psize);
 
                if (csize == 0)
                        break;
@@ -229,7 +229,7 @@ static void kprof_init(void)
        kprof.psize = 0;
 
        kprof.alarms = kzmalloc(sizeof(struct alarm_waiter) * num_cores,
-                                                       KMALLOC_WAIT);
+                               KMALLOC_WAIT);
        if (!kprof.alarms)
                error(ENOMEM, ERROR_FIXME);
        if (waserror()) {
@@ -267,7 +267,7 @@ static void kprofclear(void)
 }
 
 static struct walkqid *kprof_walk(struct chan *c, struct chan *nc, char **name,
-                                                                int nname)
+                                  int nname)
 {
        return devwalk(c, nc, name, nname, kproftab, ARRAY_SIZE(kproftab), devgen);
 }
@@ -563,7 +563,7 @@ void kprof_tracedata_write(const char *pretty_buf, size_t len)
        spin_lock_irqsave(&ktrace_lock);
        if (unlikely(!ktrace_init_done)) {
                circular_buffer_init(&ktrace_data, sizeof(ktrace_buffer),
-                                                        ktrace_buffer);
+                                    ktrace_buffer);
                ktrace_init_done = TRUE;
        }
        circular_buffer_write(&ktrace_data, pretty_buf, len);
@@ -635,7 +635,7 @@ void trace_vprintk(bool btrace, const char *fmt, va_list args)
        if (likely(system_timing.tsc_freq))
                tsc2timespec(read_tsc(), &ts_now);
        snprintf(hdr, sizeof(hdr), "[%lu.%09lu]:cpu%d: ", ts_now.tv_sec,
-                        ts_now.tv_nsec, core_id_early());
+                ts_now.tv_nsec, core_id_early());
 
        pb.ptr = usrbuf + vsnprintf(usrbuf, usr_bufsz, fmt, args);
        pb.top = usrbuf + usr_bufsz;
index cdbb37f..9df9f06 100644 (file)
@@ -13,8 +13,8 @@
 #include <linux_compat.h>
 
 #include <arch/uaccess.h>      /* copy_from_user(), copy_to_user() */
-#define access_ok(type, addr, size)     1
 
+#define access_ok(type, addr, size)     1
 
 /*
  * Device file /dev/XXXX has a dentry and inode that is associated
@@ -82,9 +82,9 @@ static unsigned long pgprot_noncached(int vmprot)
 #define is_vm_hugetlb_page(vma)        0
 
 extern int map_upage_at_addr(struct proc *p, physaddr_t paddr, uintptr_t addr,
-    int pteprot, int dolock);
+                             int pteprot, int dolock);
 extern int get_user_page(struct proc *p, unsigned long uvastart, int write,
-     int force, struct page **plist);
+                         int force, struct page **plist);
 extern void put_page(struct page *pagep);
 extern void set_page_dirty_lock(struct page *pagep);
 
@@ -239,7 +239,7 @@ struct ib_ud_header {
 
 extern void sysfs_init(void);
 extern void sysfs_create(int devnum, const struct file_operations *verb_fops,
-    void *ptr);
+                         void *ptr);
 
 extern ssize_t check_old_abi(struct file *filp, const char __user *buf,
-    size_t count);
+                             size_t count);
index 2555bc8..ffccf4e 100644 (file)
@@ -55,7 +55,7 @@
                typeof(var) *__cv;                                                                                              \
                if (likely(percpu_base))                                                                                \
                        __cv = (typeof(var) *) (percpu_base + cpu * PERCPU_SIZE +       \
-                                                                       PERCPU_OFFSET(var));                            \
+                                                   PERCPU_OFFSET(var));                                \
                else                                                                                                                    \
                        __cv = &var;                                                                                            \
                __cv;                                                                                                                   \
index 87f000b..93dd174 100644 (file)
@@ -43,8 +43,8 @@ void circular_buffer_clear(struct circular_buffer *cb)
 }
 
 static bool circular_buffer_is_overlap(const struct circular_buffer *cb,
-                                                                          const char *rptr, const char *wptr,
-                                                                          size_t size)
+                                       const char *rptr, const char *wptr,
+                                       size_t size)
 {
        /* Check if the current write operation [wptr, wptr+size) is overwriting
         * the block at which rptr in pointing to.
@@ -53,7 +53,7 @@ static bool circular_buffer_is_overlap(const struct circular_buffer *cb,
 }
 
 static void circular_buffer_write_skip(struct circular_buffer *cb, char *wrptr,
-                                                                          size_t size)
+                                       size_t size)
 {
        /* Move the read pointer forward, so that the incoming write does not
         * overwrite the block the read pointer is looking at.
@@ -74,7 +74,7 @@ static void circular_buffer_write_skip(struct circular_buffer *cb, char *wrptr,
 }
 
 size_t circular_buffer_write(struct circular_buffer *cb,
-                                                        const char *data, size_t size)
+                             const char *data, size_t size)
 {
        /* Data is written and evetually discarded in atomic blocks, in order to
         * maintain the consistency of the information stored in the buffer.
@@ -107,7 +107,7 @@ size_t circular_buffer_write(struct circular_buffer *cb,
 }
 
 size_t circular_buffer_read(struct circular_buffer *cb, char *data, size_t size,
-                                                       size_t off)
+                            size_t off)
 {
        size_t asize = cb->size, rsize = 0;
        const char *rdptr = cb->rdptr;
index a50d5b4..c76c936 100644 (file)
@@ -32,7 +32,7 @@
 
 struct profiler_cpu_context {
        struct block *block;
-    int cpu;
+       int cpu;
        int tracing;
        size_t dropped_data_size;
 };
@@ -62,7 +62,7 @@ static inline char *vb_encode_uint64(char *data, uint64_t n)
 }
 
 static struct block *profiler_buffer_write(struct profiler_cpu_context *cpu_buf,
-                                                                                  struct block *b)
+                                           struct block *b)
 {
        if (b) {
                qibwrite(profiler_queue, b);
@@ -86,7 +86,7 @@ static char *profiler_cpu_buffer_write_reserve(
 
        if (unlikely((!b) || (b->lim - b->wp) < size)) {
                cpu_buf->block = b = profiler_buffer_write(cpu_buf, b);
-        if (unlikely(!b))
+               if (unlikely(!b))
                        return NULL;
        }
        *pb = b;
@@ -106,8 +106,8 @@ static inline size_t profiler_max_envelope_size(void)
 }
 
 static void profiler_push_kernel_trace64(struct profiler_cpu_context *cpu_buf,
-                                                                                const uintptr_t *trace, size_t count,
-                                                                                uint64_t info)
+                                         const uintptr_t *trace, size_t count,
+                                         uint64_t info)
 {
        size_t i, size = sizeof(struct proftype_kern_trace64) +
                count * sizeof(uint64_t);
@@ -137,8 +137,8 @@ static void profiler_push_kernel_trace64(struct profiler_cpu_context *cpu_buf,
 }
 
 static void profiler_push_user_trace64(struct profiler_cpu_context *cpu_buf,
-                                                                          struct proc *p, const uintptr_t *trace,
-                                                                          size_t count, uint64_t info)
+                                       struct proc *p, const uintptr_t *trace,
+                                       size_t count, uint64_t info)
 {
        size_t i, size = sizeof(struct proftype_user_trace64) +
                count * sizeof(uint64_t);
@@ -169,7 +169,7 @@ static void profiler_push_user_trace64(struct profiler_cpu_context *cpu_buf,
 }
 
 static void profiler_push_pid_mmap(struct proc *p, uintptr_t addr, size_t msize,
-                                                                  size_t offset, const char *path)
+                                   size_t offset, const char *path)
 {
        size_t i, plen = strlen(path) + 1,
                size = sizeof(struct proftype_pid_mmap64) + plen;
@@ -231,8 +231,8 @@ static void profiler_emit_current_system_status(void)
                struct proc *p = (struct proc *) opaque;
 
                profiler_notify_mmap(p, vmr->vm_base, vmr->vm_end - vmr->vm_base,
-                                                        vmr->vm_prot, vmr->vm_flags, vmr->vm_file,
-                                                        vmr->vm_foff);
+                                    vmr->vm_prot, vmr->vm_flags, vmr->vm_file,
+                                    vmr->vm_foff);
        }
 
        ERRSTACK(1);
@@ -279,7 +279,7 @@ static void alloc_cpu_buffers(void)
        qnonblock(profiler_queue, TRUE);
 
        profiler_percpu_ctx =
-               kzmalloc(sizeof(*profiler_percpu_ctx) * num_cores, KMALLOC_WAIT);
+           kzmalloc(sizeof(*profiler_percpu_ctx) * num_cores, KMALLOC_WAIT);
 
        for (i = 0; i < num_cores; i++) {
                struct profiler_cpu_context *b = &profiler_percpu_ctx[i];
@@ -289,7 +289,7 @@ static void alloc_cpu_buffers(void)
 }
 
 static long profiler_get_checked_value(const char *value, long k, long minval,
-                                                                          long maxval)
+                                       long maxval)
 {
        long lvalue = strtol(value, NULL, 0) * k;
 
@@ -417,7 +417,7 @@ void profiler_control_trace(int onoff)
        core_set_init(&cset);
        core_set_fill_available(&cset);
        smp_do_in_cores(&cset, profiler_core_trace_enable,
-                                       (void *) (uintptr_t) onoff);
+                       (void *) (uintptr_t) onoff);
 }
 
 static void profiler_core_flush(void *opaque)
@@ -458,7 +458,7 @@ void profiler_add_kernel_backtrace(uintptr_t pc, uintptr_t fp, uint64_t info)
                        trace[0] = pc;
                        if (likely(fp))
                                n = backtrace_list(pc, fp, trace + 1,
-                                                                  PROFILER_BT_DEPTH - 1) + 1;
+                                                  PROFILER_BT_DEPTH - 1) + 1;
 
                        profiler_push_kernel_trace64(cpu_buf, trace, n, info);
                }
@@ -479,7 +479,7 @@ void profiler_add_user_backtrace(uintptr_t pc, uintptr_t fp, uint64_t info)
                        trace[0] = pc;
                        if (likely(fp))
                                n = backtrace_user_list(pc, fp, trace + 1,
-                                                                               PROFILER_BT_DEPTH - 1) + 1;
+                                                       PROFILER_BT_DEPTH - 1) + 1;
 
                        profiler_push_user_trace64(cpu_buf, p, trace, n, info);
                }
@@ -491,10 +491,10 @@ void profiler_add_hw_sample(struct hw_trapframe *hw_tf, uint64_t info)
 {
        if (in_kernel(hw_tf))
                profiler_add_kernel_backtrace(get_hwtf_pc(hw_tf), get_hwtf_fp(hw_tf),
-                                                                         info);
+                                             info);
        else
                profiler_add_user_backtrace(get_hwtf_pc(hw_tf), get_hwtf_fp(hw_tf),
-                                                                       info);
+                                           info);
 }
 
 int profiler_size(void)
@@ -508,7 +508,7 @@ int profiler_read(void *va, int n)
 }
 
 void profiler_notify_mmap(struct proc *p, uintptr_t addr, size_t size, int prot,
-                                                 int flags, struct file *f, size_t offset)
+                          int flags, struct file *f, size_t offset)
 {
        if (kref_get_not_zero(&profiler_kref, 1)) {
                if (f && (prot & PROT_EXEC) && profiler_percpu_ctx) {