#include <kmalloc.h>
#include <profiler.h>
#include <stdio.h>
-#include <frontend.h>
#include <hashtable.h>
#include <bitmask.h>
#include <vfs.h>
return test_bit(sysc_num, p->strace->trace_set);
}
+/* Helper, copies len bytes from u_data to the trace->data, if there's room. */
+static void copy_tracedata_from_user(struct systrace_record *trace,
+ long u_data, size_t len)
+{
+ size_t copy_amt;
+
+ copy_amt = MIN(sizeof(trace->data) - trace->datalen, len);
+ copy_from_user(trace->data + trace->datalen, (void*)u_data, copy_amt);
+ trace->datalen += copy_amt;
+}
+
+/* Helper, snprintfs to the trace, if there's room. */
+static void snprintf_to_trace(struct systrace_record *trace, const char *fmt,
+ ...)
+{
+ va_list ap;
+ int rc;
+
+ va_start(ap, fmt);
+ rc = vsnprintf((char*)trace->data + trace->datalen,
+ sizeof(trace->data) - trace->datalen, fmt, ap);
+ va_end(ap);
+ if (!snprintf_error(rc, sizeof(trace->data) - trace->datalen))
+ trace->datalen += rc;
+}
+
/* Starts a trace for p running sysc, attaching it to kthread. Pairs with
* systrace_finish_trace(). */
static void systrace_start_trace(struct kthread *kthread, struct syscall *sysc)
{
struct proc *p = current;
struct systrace_record *trace;
- uintreg_t data_arg;
- size_t data_len = 0;
kthread->strace = 0;
if (!should_strace(p, sysc))
switch (sysc->num) {
case SYS_write:
- data_arg = sysc->arg1;
- data_len = sysc->arg2;
+ copy_tracedata_from_user(trace, sysc->arg1, sysc->arg2);
break;
case SYS_openat:
- data_arg = sysc->arg1;
- data_len = sysc->arg2;
+ case SYS_chdir:
+ case SYS_rmdir:
+ case SYS_nmount:
+ copy_tracedata_from_user(trace, sysc->arg1, sysc->arg2);
+ break;
+ case SYS_stat:
+ case SYS_lstat:
+ case SYS_access:
+ case SYS_unlink:
+ case SYS_mkdir:
+ case SYS_wstat:
+ copy_tracedata_from_user(trace, sysc->arg0, sysc->arg1);
+ break;
+ case SYS_link:
+ case SYS_symlink:
+ case SYS_rename:
+ case SYS_nbind:
+ copy_tracedata_from_user(trace, sysc->arg0, sysc->arg1);
+ snprintf_to_trace(trace, " -> ");
+ copy_tracedata_from_user(trace, sysc->arg2, sysc->arg3);
+ break;
+ case SYS_nunmount:
+ copy_tracedata_from_user(trace, sysc->arg2, sysc->arg3);
break;
case SYS_exec:
trace->datalen = execargs_stringer(current,
sysc->arg3);
break;
}
- if (data_len) {
- trace->datalen = MIN(sizeof(trace->data), data_len);
- copy_from_user(trace->data, (void*)data_arg, trace->datalen);
- }
-
systrace_output(trace, p->strace, TRUE);
kthread->strace = trace;
{
struct proc *p = current;
struct systrace_record *trace;
- long data_arg;
- size_t data_len = 0;
if (!kthread->strace)
return;
if (!trace->datalen) {
switch (trace->syscallno) {
case SYS_read:
- data_arg = trace->arg1;
- data_len = retval < 0 ? 0 : retval;
+ if (retval <= 0)
+ break;
+ copy_tracedata_from_user(trace, trace->arg1, retval);
+ break;
+ case SYS_readlink:
+ if (retval <= 0)
+ break;
+ copy_tracedata_from_user(trace, trace->arg0, trace->arg1);
+ snprintf_to_trace(trace, " -> ");
+ copy_tracedata_from_user(trace, trace->arg2, trace->arg3);
break;
}
- trace->datalen = MIN(sizeof(trace->data), data_len);
- if (trace->datalen)
- copy_from_user(trace->data, (void*)data_arg, trace->datalen);
}
systrace_output(trace, p->strace, FALSE);
static void alloc_sysc_str(struct kthread *kth)
{
- kth->name = kmalloc(SYSCALL_STRLEN, MEM_WAIT);
+ kth->name = kmalloc(SYSCALL_STRLEN, MEM_ATOMIC);
+ if (!kth->name)
+ return;
kth->name[0] = 0;
}
static void free_sysc_str(struct kthread *kth)
{
char *str = kth->name;
+
kth->name = 0;
kfree(str);
}
#define sysc_save_str(...) \
{ \
struct per_cpu_info *pcpui = &per_cpu_info[core_id()]; \
- snprintf(pcpui->cur_kthread->name, SYSCALL_STRLEN, __VA_ARGS__); \
+ \
+ if (pcpui->cur_kthread->name) \
+ snprintf(pcpui->cur_kthread->name, SYSCALL_STRLEN, __VA_ARGS__); \
}
#else
return 0;
}
+static int sys_send_event(struct proc *p, struct event_queue *ev_q,
+ struct event_msg *u_msg, uint32_t vcoreid)
+{
+ struct event_msg local_msg = {0};
+
+ if (memcpy_from_user(p, &local_msg, u_msg, sizeof(struct event_msg))) {
+ set_errno(EINVAL);
+ return -1;
+ }
+ send_event(p, ev_q, &local_msg, vcoreid);
+ return 0;
+}
+
/* Puts the calling core into vcore context, if it wasn't already, via a
* self-IPI / active notification. Barring any weird unmappings, we just send
* ourselves a __notify. */
return 0;
}
-/* Initializes a process to run virtual machine contexts, returning the number
- * initialized, optionally setting errno */
-static int sys_vmm_setup(struct proc *p, unsigned int nr_guest_pcores,
- struct vmm_gpcore_init *gpcis, int flags)
+static int sys_vmm_add_gpcs(struct proc *p, unsigned int nr_more_gpcs,
+ struct vmm_gpcore_init *gpcis)
{
- int ret;
ERRSTACK(1);
+ struct vmm *vmm = &p->vmm;
+ qlock(&vmm->qlock);
if (waserror()) {
+ qunlock(&vmm->qlock);
poperror();
return -1;
}
- ret = vmm_struct_init(p, nr_guest_pcores, gpcis, flags);
+ __vmm_struct_init(p);
+ __vmm_add_gpcs(p, nr_more_gpcs, gpcis);
+ qunlock(&vmm->qlock);
poperror();
- return ret;
+ return nr_more_gpcs;
}
static int sys_vmm_poke_guest(struct proc *p, int guest_pcoreid)
return vmm_poke_guest(p, guest_pcoreid);
}
+static int sys_vmm_ctl(struct proc *p, int cmd, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4)
+{
+ ERRSTACK(1);
+ int ret;
+ struct vmm *vmm = &p->vmm;
+
+ /* Protects against concurrent setters and for gets that are not atomic
+ * reads (say, multiple exec ctls). */
+ qlock(&vmm->qlock);
+ if (waserror()) {
+ qunlock(&vmm->qlock);
+ poperror();
+ return -1;
+ }
+ __vmm_struct_init(p);
+ switch (cmd) {
+ case VMM_CTL_GET_EXITS:
+ if (vmm->amd)
+ error(ENOTSUP, "AMD VMMs unsupported");
+ ret = vmx_ctl_get_exits(&vmm->vmx);
+ break;
+ case VMM_CTL_SET_EXITS:
+ if (arg1 & ~VMM_CTL_ALL_EXITS)
+ error(EINVAL, "Bad vmm_ctl_exits %x (%x)", arg1,
+ VMM_CTL_ALL_EXITS);
+ if (vmm->amd)
+ error(ENOTSUP, "AMD VMMs unsupported");
+ ret = vmx_ctl_set_exits(&vmm->vmx, arg1);
+ break;
+ case VMM_CTL_GET_FLAGS:
+ ret = vmm->flags;
+ break;
+ case VMM_CTL_SET_FLAGS:
+ if (arg1 & ~VMM_CTL_ALL_FLAGS)
+ error(EINVAL, "Bad vmm_ctl flags. Got 0x%lx, allowed 0x%lx\n",
+ arg1, VMM_CTL_ALL_FLAGS);
+ vmm->flags = arg1;
+ ret = 0;
+ break;
+ default:
+ error(EINVAL, "Bad vmm_ctl cmd %d", cmd);
+ }
+ qunlock(&vmm->qlock);
+ poperror();
+ return ret;
+}
+
/* Pokes the ksched for the given resource for target_pid. If the target pid
* == 0, we just poke for the calling process. The common case is poking for
* self, so we avoid the lookup.
return retval;
}
-/* kept around til we remove the last ufe */
-#define ufe(which,a0,a1,a2,a3) \
- frontend_syscall_errno(p,APPSERVER_SYSCALL_##which,\
- (int)(a0),(int)(a1),(int)(a2),(int)(a3))
-
static intreg_t sys_fstat(struct proc *p, int fd, struct kstat *u_stat)
{
struct kstat *kbuf;
[SYS_provision] = {(syscall_t)sys_provision, "provision"},
[SYS_notify] = {(syscall_t)sys_notify, "notify"},
[SYS_self_notify] = {(syscall_t)sys_self_notify, "self_notify"},
+ [SYS_send_event] = {(syscall_t)sys_send_event, "send_event"},
[SYS_vc_entry] = {(syscall_t)sys_vc_entry, "vc_entry"},
[SYS_halt_core] = {(syscall_t)sys_halt_core, "halt_core"},
#ifdef CONFIG_ARSC_SERVER
[SYS_init_arsc] = {(syscall_t)sys_init_arsc, "init_arsc"},
#endif
[SYS_change_to_m] = {(syscall_t)sys_change_to_m, "change_to_m"},
- [SYS_vmm_setup] = {(syscall_t)sys_vmm_setup, "vmm_setup"},
+ [SYS_vmm_add_gpcs] = {(syscall_t)sys_vmm_add_gpcs, "vmm_add_gpcs"},
[SYS_vmm_poke_guest] = {(syscall_t)sys_vmm_poke_guest, "vmm_poke_guest"},
+ [SYS_vmm_ctl] = {(syscall_t)sys_vmm_ctl, "vmm_ctl"},
[SYS_poke_ksched] = {(syscall_t)sys_poke_ksched, "poke_ksched"},
[SYS_abort_sysc] = {(syscall_t)sys_abort_sysc, "abort_sysc"},
[SYS_abort_sysc_fd] = {(syscall_t)sys_abort_sysc_fd, "abort_sysc_fd"},