New vcoremap in procinfo
[akaros.git] / kern / src / syscall.c
1 /* See COPYRIGHT for copyright information. */
2
3 #ifdef __SHARC__
4 #pragma nosharc
5 #endif
6
7 #include <ros/common.h>
8 #include <arch/types.h>
9 #include <arch/arch.h>
10 #include <arch/mmu.h>
11 #include <arch/console.h>
12 #include <ros/timer.h>
13 #include <error.h>
14
15 #include <elf.h>
16 #include <string.h>
17 #include <assert.h>
18 #include <process.h>
19 #include <schedule.h>
20 #include <pmap.h>
21 #include <mm.h>
22 #include <trap.h>
23 #include <syscall.h>
24 #include <kmalloc.h>
25 #include <stdio.h>
26 #include <resource.h>
27 #include <frontend.h>
28 #include <colored_caches.h>
29 #include <arch/bitmask.h>
30 #include <kfs.h> // eventually replace this with vfs.h
31
32
33 #ifdef __CONFIG_NETWORKING__
34 #include <arch/nic_common.h>
35 extern int (*send_frame)(const char *CT(len) data, size_t len);
36 extern char device_mac[6];
37 #endif
38
39 /************** Utility Syscalls **************/
40
41 static int sys_null(void)
42 {
43         return 0;
44 }
45
46 // Writes 'val' to 'num_writes' entries of the well-known array in the kernel
47 // address space.  It's just #defined to be some random 4MB chunk (which ought
48 // to be boot_alloced or something).  Meant to grab exclusive access to cache
49 // lines, to simulate doing something useful.
50 static int sys_cache_buster(struct proc *p, uint32_t num_writes,
51                              uint32_t num_pages, uint32_t flags)
52 { TRUSTEDBLOCK /* zra: this is not really part of the kernel */
53         #define BUSTER_ADDR             0xd0000000  // around 512 MB deep
54         #define MAX_WRITES              1048576*8
55         #define MAX_PAGES               32
56         #define INSERT_ADDR     (UINFO + 2*PGSIZE) // should be free for these tests
57         uint32_t* buster = (uint32_t*)BUSTER_ADDR;
58         static spinlock_t buster_lock = SPINLOCK_INITIALIZER;
59         uint64_t ticks = -1;
60         page_t* a_page[MAX_PAGES];
61
62         /* Strided Accesses or Not (adjust to step by cachelines) */
63         uint32_t stride = 1;
64         if (flags & BUSTER_STRIDED) {
65                 stride = 16;
66                 num_writes *= 16;
67         }
68
69         /* Shared Accesses or Not (adjust to use per-core regions)
70          * Careful, since this gives 8MB to each core, starting around 512MB.
71          * Also, doesn't separate memory for core 0 if it's an async call.
72          */
73         if (!(flags & BUSTER_SHARED))
74                 buster = (uint32_t*)(BUSTER_ADDR + core_id() * 0x00800000);
75
76         /* Start the timer, if we're asked to print this info*/
77         if (flags & BUSTER_PRINT_TICKS)
78                 ticks = start_timing();
79
80         /* Allocate num_pages (up to MAX_PAGES), to simulate doing some more
81          * realistic work.  Note we don't write to these pages, even if we pick
82          * unshared.  Mostly due to the inconvenience of having to match up the
83          * number of pages with the number of writes.  And it's unnecessary.
84          */
85         if (num_pages) {
86                 spin_lock(&buster_lock);
87                 for (int i = 0; i < MIN(num_pages, MAX_PAGES); i++) {
88                         upage_alloc(p, &a_page[i],1);
89                         page_insert(p->env_pgdir, a_page[i], (void*)INSERT_ADDR + PGSIZE*i,
90                                     PTE_USER_RW);
91                 }
92                 spin_unlock(&buster_lock);
93         }
94
95         if (flags & BUSTER_LOCKED)
96                 spin_lock(&buster_lock);
97         for (int i = 0; i < MIN(num_writes, MAX_WRITES); i=i+stride)
98                 buster[i] = 0xdeadbeef;
99         if (flags & BUSTER_LOCKED)
100                 spin_unlock(&buster_lock);
101
102         if (num_pages) {
103                 spin_lock(&buster_lock);
104                 for (int i = 0; i < MIN(num_pages, MAX_PAGES); i++) {
105                         page_remove(p->env_pgdir, (void*)(INSERT_ADDR + PGSIZE * i));
106                         page_decref(a_page[i]);
107                 }
108                 spin_unlock(&buster_lock);
109         }
110
111         /* Print info */
112         if (flags & BUSTER_PRINT_TICKS) {
113                 ticks = stop_timing(ticks);
114                 printk("%llu,", ticks);
115         }
116         return 0;
117 }
118
119 static int sys_cache_invalidate(void)
120 {
121         #ifdef __i386__
122                 wbinvd();
123         #endif
124         return 0;
125 }
126
127 /* sys_reboot(): called directly from dispatch table. */
128
129 // Print a string to the system console.
130 // The string is exactly 'len' characters long.
131 // Destroys the environment on memory errors.
132 static ssize_t sys_cputs(env_t* e, const char *DANGEROUS s, size_t len)
133 {
134         // Check that the user has permission to read memory [s, s+len).
135         // Destroy the environment if not.
136         char *COUNT(len) _s = user_mem_assert(e, s, len, PTE_USER_RO);
137
138         // Print the string supplied by the user.
139         printk("%.*s", len, _s);
140         return (ssize_t)len;
141 }
142
143 // Read a character from the system console.
144 // Returns the character.
145 static uint16_t sys_cgetc(env_t* e)
146 {
147         uint16_t c;
148
149         // The cons_getc() primitive doesn't wait for a character,
150         // but the sys_cgetc() system call does.
151         while ((c = cons_getc()) == 0)
152                 cpu_relax();
153
154         return c;
155 }
156
157 /* Returns the id of the cpu this syscall is executed on. */
158 static uint32_t sys_getcpuid(void)
159 {
160         return core_id();
161 }
162
163 // TODO: Temporary hack until thread-local storage is implemented on i386
164 static size_t sys_getvcoreid(env_t* e)
165 {
166         if(e->state == PROC_RUNNING_S)
167                 return 0;
168
169         size_t i;
170         for(i = 0; i < e->procinfo->num_vcores; i++)
171                 if(core_id() == e->procinfo->vcoremap[i].pcoreid)
172                         return i;
173
174         panic("virtual core id not found in sys_getvcoreid()!");
175 }
176
177 /************** Process management syscalls **************/
178
179 /* Returns the calling process's pid */
180 static pid_t sys_getpid(struct proc *p)
181 {
182         return p->pid;
183 }
184
185 /*
186  * Creates a process found at the user string 'path'.  Currently uses KFS.
187  * Not runnable by default, so it needs it's status to be changed so that the
188  * next call to schedule() will try to run it.
189  * TODO: once we have a decent VFS, consider splitting this up
190  * and once there's an mmap, can have most of this in process.c
191  */
192 static int sys_proc_create(struct proc *p, const char *DANGEROUS path)
193 {
194         int pid = 0;
195         char tpath[MAX_PATH_LEN];
196         /*
197          * There's a bunch of issues with reading in the path, which we'll
198          * need to sort properly in the VFS.  Main concerns are TOCTOU (copy-in),
199          * whether or not it's a big deal that the pointer could be into kernel
200          * space, and resolving both of these without knowing the length of the
201          * string. (TODO)
202          * Change this so that all syscalls with a pointer take a length.
203          *
204          * zra: I've added this user_mem_strlcpy, which I think eliminates the
205      * the TOCTOU issue. Adding a length arg to this call would allow a more
206          * efficient implementation, though, since only one call to user_mem_check
207          * would be required.
208          */
209         int ret = user_mem_strlcpy(p,tpath, path, MAX_PATH_LEN, PTE_USER_RO);
210         int kfs_inode = kfs_lookup_path(tpath);
211         if (kfs_inode < 0)
212                 return -EINVAL;
213         struct proc *new_p = kfs_proc_create(kfs_inode);
214         pid = new_p->pid;
215         proc_decref(new_p, 1); // let go of the reference created in proc_create()
216         return pid;
217 }
218
219 /* Makes process PID runnable.  Consider moving the functionality to process.c */
220 static error_t sys_proc_run(struct proc *p, unsigned pid)
221 {
222         struct proc *target = pid2proc(pid);
223         error_t retval = 0;
224
225         if (!target)
226                 return -EBADPROC;
227         // note we can get interrupted here. it's not bad.
228         spin_lock_irqsave(&p->proc_lock);
229         // make sure we have access and it's in the right state to be activated
230         if (!proc_controls(p, target)) {
231                 proc_decref(target, 1);
232                 retval = -EPERM;
233         } else if (target->state != PROC_CREATED) {
234                 proc_decref(target, 1);
235                 retval = -EINVAL;
236         } else {
237                 __proc_set_state(target, PROC_RUNNABLE_S);
238                 schedule_proc(target);
239         }
240         spin_unlock_irqsave(&p->proc_lock);
241         proc_decref(target, 1);
242         return retval;
243 }
244
245 /* Destroy proc pid.  If this is called by the dying process, it will never
246  * return.  o/w it will return 0 on success, or an error.  Errors include:
247  * - EBADPROC: if there is no such process with pid
248  * - EPERM: if caller does not control pid */
249 static error_t sys_proc_destroy(struct proc *p, pid_t pid, int exitcode)
250 {
251         error_t r;
252         struct proc *p_to_die = pid2proc(pid);
253
254         if (!p_to_die) {
255                 set_errno(current_tf, ESRCH);
256                 return -1;
257         }
258         if (!proc_controls(p, p_to_die)) {
259                 proc_decref(p_to_die, 1);
260                 set_errno(current_tf, EPERM);
261                 return -1;
262         }
263         if (p_to_die == p) {
264                 // syscall code and pid2proc both have edible references, only need 1.
265                 p->exitcode = exitcode;
266                 proc_decref(p, 1);
267                 printd("[PID %d] proc exiting gracefully (code %d)\n", p->pid,exitcode);
268         } else {
269                 panic("Destroying other processes is not supported yet.");
270                 //printk("[%d] destroying proc %d\n", p->pid, p_to_die->pid);
271         }
272         proc_destroy(p_to_die);
273         return ESUCCESS;
274 }
275
276 static int sys_proc_yield(struct proc *p)
277 {
278         proc_yield(p);
279         return 0;
280 }
281
282 static ssize_t sys_run_binary(env_t* e, void *DANGEROUS binary_buf, size_t len,
283                               procinfo_t*DANGEROUS procinfo, size_t num_colors)
284 {
285         env_t* env = proc_create(NULL,0);
286         assert(env != NULL);
287
288         // let me know if you use this.  we need to sort process creation better.
289         printk("sys_run_binary() is deprecated.  Use at your own risk.");
290         if(memcpy_from_user(e,e->procinfo,procinfo,sizeof(*procinfo)))
291                 return -1;
292         proc_init_procinfo(e);
293
294         env_load_icode(env,e,binary_buf,len);
295         __proc_set_state(env, PROC_RUNNABLE_S);
296         schedule_proc(env);
297         if(num_colors > 0) {
298                 env->cache_colors_map = cache_colors_map_alloc();
299                 for(int i=0; i<num_colors; i++)
300                         cache_color_alloc(llc_cache, env->cache_colors_map);
301         }
302         proc_decref(env, 1);
303         proc_yield(e);
304         return 0;
305 }
306
307 static ssize_t sys_fork(env_t* e)
308 {
309         // TODO: right now we only support fork for single-core processes
310         if(e->state != PROC_RUNNING_S)
311         {
312                 set_errno(current_tf,EINVAL);
313                 return -1;
314         }
315
316         env_t* env = proc_create(NULL,0);
317         assert(env != NULL);
318
319         env->heap_top = e->heap_top;
320         env->ppid = e->pid;
321         env->env_tf = *current_tf;
322
323         env->cache_colors_map = cache_colors_map_alloc();
324         for(int i=0; i < llc_cache->num_colors; i++)
325                 if(GET_BITMASK_BIT(e->cache_colors_map,i))
326                         cache_color_alloc(llc_cache, env->cache_colors_map);
327
328         int copy_page(env_t* e, pte_t* pte, void* va, void* arg)
329         {
330                 env_t* env = (env_t*)arg;
331
332                 if(PAGE_PRESENT(*pte))
333                 {
334                         page_t* pp;
335                         if(upage_alloc(env,&pp,0))
336                                 return -1;
337                         if(page_insert(env->env_pgdir,pp,va,*pte & PTE_PERM))
338                         {
339                                 page_decref(pp);
340                                 return -1;
341                         }
342
343                         pagecopy(page2kva(pp),ppn2kva(PTE2PPN(*pte)));
344                 }
345                 else // PAGE_PAGED_OUT(*pte)
346                 {
347                         pte_t* newpte = pgdir_walk(env->env_pgdir,va,1);
348                         if(!newpte)
349                                 return -1;
350
351                         struct file* file = PTE2PFAULT_INFO(*pte)->file;
352                         pfault_info_t* newpfi = pfault_info_alloc(file);
353                         if(!newpfi)
354                                 return -1;
355
356                         *newpfi = *PTE2PFAULT_INFO(*pte);
357                         *newpte = PFAULT_INFO2PTE(newpfi);
358                 }
359
360                 return 0;
361         }
362
363         // TODO: (PC) this won't work.  Needs revisiting.
364         // copy procdata and procinfo
365         memcpy(env->procdata,e->procdata,sizeof(struct procdata));
366         memcpy(env->procinfo,e->procinfo,sizeof(struct procinfo));
367         env->procinfo->pid = env->pid;
368         env->procinfo->ppid = env->ppid;
369
370         // copy all memory below procdata
371         if(env_user_mem_walk(e,0,UDATA,&copy_page,env))
372         {
373                 proc_decref(env,2);
374                 set_errno(current_tf,ENOMEM);
375                 return -1;
376         }
377
378         __proc_set_state(env, PROC_RUNNABLE_S);
379         schedule_proc(env);
380
381         // don't decref the new process.
382         // that will happen when the parent waits for it.
383
384         printd("[PID %d] fork PID %d\n",e->pid,env->pid);
385
386         return env->pid;
387 }
388
389 intreg_t sys_exec(struct proc* p, int fd, procinfo_t* pi)
390 {
391         if(p->state != PROC_RUNNING_S)
392                 return -1;
393
394         int ret = -1;
395         struct file* f = file_open_from_fd(p,fd);
396         if(f == NULL) {
397                 set_errno(current_tf, EBADF);
398                 goto out;
399         }
400
401         // TODO: don't copy procinfo from the user (PC)
402         if(memcpy_from_user(p,p->procinfo,pi,sizeof(procinfo_t))) {
403                 proc_destroy(p);
404                 goto out;
405         }
406         proc_init_procinfo(p);
407         // TODO: don't do this either (PC)
408         memset(p->procdata, 0, sizeof(procdata_t));
409
410         env_user_mem_free(p,0,USTACKTOP);
411
412         if(load_elf(p,f))
413         {
414                 proc_destroy(p);
415                 goto out;
416         }
417         file_decref(f);
418         *current_tf = p->env_tf;
419         ret = 0;
420
421         printd("[PID %d] exec fd %d\n",p->pid,fd);
422
423 out:
424         return ret;
425 }
426
427 static ssize_t sys_trywait(env_t* e, pid_t pid, int* status)
428 {
429         struct proc* p = pid2proc(pid);
430
431         // TODO: this syscall is racy, so we only support for single-core procs
432         if(e->state != PROC_RUNNING_S)
433                 return -1;
434
435         // TODO: need to use errno properly.  sadly, ROS error codes conflict..
436
437         if(p)
438         {
439                 ssize_t ret;
440
441                 if(current->pid == p->ppid)
442                 {
443                         if(p->state == PROC_DYING)
444                         {
445                                 memcpy_to_user(e,status,&p->exitcode,sizeof(int));
446                                 printd("[PID %d] waited for PID %d (code %d)\n",
447                                        e->pid,p->pid,p->exitcode);
448                                 ret = 0;
449                         }
450                         else // not dead yet
451                         {
452                                 set_errno(current_tf,0);
453                                 ret = -1;
454                         }
455                 }
456                 else // not a child of the calling process
457                 {
458                         set_errno(current_tf,1);
459                         ret = -1;
460                 }
461
462                 // if the wait succeeded, decref twice
463                 proc_decref(p,1 + (ret == 0));
464                 return ret;
465         }
466
467         set_errno(current_tf,1);
468         return -1;
469 }
470
471 /************** Memory Management Syscalls **************/
472
473 static void *sys_mmap(struct proc* p, uintreg_t a1, uintreg_t a2, uintreg_t a3,
474                       uintreg_t* a456)
475 {
476         uintreg_t _a456[3];
477         if(memcpy_from_user(p,_a456,a456,3*sizeof(uintreg_t)))
478                 sys_proc_destroy(p,p->pid,-1);
479         return mmap(p,a1,a2,a3,_a456[0],_a456[1],_a456[2]);
480 }
481
482 static intreg_t sys_mprotect(struct proc* p, void* addr, size_t len, int prot)
483 {
484         return mprotect(p, addr, len, prot);
485 }
486
487 static intreg_t sys_munmap(struct proc* p, void* addr, size_t len)
488 {
489         return munmap(p, addr, len);
490 }
491
492 static void* sys_brk(struct proc *p, void* addr) {
493         ssize_t range;
494
495         spin_lock_irqsave(&p->proc_lock);
496
497         if((addr < p->procinfo->heap_bottom) || (addr >= (void*)BRK_END))
498                 goto out;
499
500         uintptr_t real_heap_top = ROUNDUP((uintptr_t)p->heap_top,PGSIZE);
501         uintptr_t real_new_heap_top = ROUNDUP((uintptr_t)addr,PGSIZE);
502         range = real_new_heap_top - real_heap_top;
503
504         if (range > 0) {
505                 if(__do_mmap(p, real_heap_top, range, PROT_READ | PROT_WRITE,
506                              MAP_FIXED | MAP_ANONYMOUS, NULL, 0) == MAP_FAILED)
507                         goto out;
508         }
509         else if (range < 0) {
510                 if(__munmap(p, (void*)real_new_heap_top, -range))
511                         goto out;
512         }
513         p->heap_top = addr;
514
515 out:
516         spin_unlock_irqsave(&p->proc_lock);
517         return p->heap_top;
518 }
519
520 static ssize_t sys_shared_page_alloc(env_t* p1,
521                                      void**DANGEROUS _addr, pid_t p2_id,
522                                      int p1_flags, int p2_flags
523                                     )
524 {
525         //if (!VALID_USER_PERMS(p1_flags)) return -EPERM;
526         //if (!VALID_USER_PERMS(p2_flags)) return -EPERM;
527
528         void * COUNT(1) * COUNT(1) addr = user_mem_assert(p1, _addr, sizeof(void *),
529                                                       PTE_USER_RW);
530         struct proc *p2 = pid2proc(p2_id);
531         if (!p2)
532                 return -EBADPROC;
533
534         page_t* page;
535         error_t e = upage_alloc(p1, &page,1);
536         if (e < 0) {
537                 proc_decref(p2, 1);
538                 return e;
539         }
540
541         void* p2_addr = page_insert_in_range(p2->env_pgdir, page,
542                         (void*SNT)UTEXT, (void*SNT)UTOP, p2_flags);
543         if (p2_addr == NULL) {
544                 page_free(page);
545                 proc_decref(p2, 1);
546                 return -EFAIL;
547         }
548
549         void* p1_addr = page_insert_in_range(p1->env_pgdir, page,
550                         (void*SNT)UTEXT, (void*SNT)UTOP, p1_flags);
551         if(p1_addr == NULL) {
552                 page_remove(p2->env_pgdir, p2_addr);
553                 page_free(page);
554                 proc_decref(p2, 1);
555                 return -EFAIL;
556         }
557         *addr = p1_addr;
558         proc_decref(p2, 1);
559         return ESUCCESS;
560 }
561
562 static int sys_shared_page_free(env_t* p1, void*DANGEROUS addr, pid_t p2)
563 {
564         return -1;
565 }
566
567
568 /************** Resource Request Syscalls **************/
569
570 /* sys_resource_req(): called directly from dispatch table. */
571
572 /************** Platform Specific Syscalls **************/
573
574 //Read a buffer over the serial port
575 static ssize_t sys_serial_read(env_t* e, char *DANGEROUS _buf, size_t len)
576 {
577         if (len == 0)
578                 return 0;
579
580         #ifdef __CONFIG_SERIAL_IO__
581             char *COUNT(len) buf = user_mem_assert(e, _buf, len, PTE_USER_RO);
582                 size_t bytes_read = 0;
583                 int c;
584                 while((c = serial_read_byte()) != -1) {
585                         buf[bytes_read++] = (uint8_t)c;
586                         if(bytes_read == len) break;
587                 }
588                 return (ssize_t)bytes_read;
589         #else
590                 return -EINVAL;
591         #endif
592 }
593
594 //Write a buffer over the serial port
595 static ssize_t sys_serial_write(env_t* e, const char *DANGEROUS buf, size_t len)
596 {
597         if (len == 0)
598                 return 0;
599         #ifdef __CONFIG_SERIAL_IO__
600                 char *COUNT(len) _buf = user_mem_assert(e, buf, len, PTE_USER_RO);
601                 for(int i =0; i<len; i++)
602                         serial_send_byte(buf[i]);
603                 return (ssize_t)len;
604         #else
605                 return -EINVAL;
606         #endif
607 }
608
609 #ifdef __CONFIG_NETWORKING__
610 // This is not a syscall we want. Its hacky. Here just for syscall stuff until get a stack.
611 static ssize_t sys_eth_read(env_t* e, char *DANGEROUS buf)
612 {
613         if (eth_up) {
614
615                 uint32_t len;
616                 char *ptr;
617
618                 spin_lock(&packet_buffers_lock);
619
620                 if (num_packet_buffers == 0) {
621                         spin_unlock(&packet_buffers_lock);
622                         return 0;
623                 }
624
625                 ptr = packet_buffers[packet_buffers_head];
626                 len = packet_buffers_sizes[packet_buffers_head];
627
628                 num_packet_buffers--;
629                 packet_buffers_head = (packet_buffers_head + 1) % MAX_PACKET_BUFFERS;
630
631                 spin_unlock(&packet_buffers_lock);
632
633                 char* _buf = user_mem_assert(e, buf, len, PTE_U);
634
635                 memcpy(_buf, ptr, len);
636
637                 kfree(ptr);
638
639                 return len;
640         }
641         else
642                 return -EINVAL;
643 }
644
645 // This is not a syscall we want. Its hacky. Here just for syscall stuff until get a stack.
646 static ssize_t sys_eth_write(env_t* e, const char *DANGEROUS buf, size_t len)
647 {
648         if (eth_up) {
649
650                 if (len == 0)
651                         return 0;
652
653                 // HACK TO BYPASS HACK
654                 int just_sent = send_frame(buf, len);
655
656                 if (just_sent < 0) {
657                         printk("Packet send fail\n");
658                         return 0;
659                 }
660
661                 return just_sent;
662
663                 // END OF RECURSIVE HACK
664 /*
665                 char *COUNT(len) _buf = user_mem_assert(e, buf, len, PTE_U);
666                 int total_sent = 0;
667                 int just_sent = 0;
668                 int cur_packet_len = 0;
669                 while (total_sent != len) {
670                         cur_packet_len = ((len - total_sent) > MTU) ? MTU : (len - total_sent);
671                         char dest_mac[6] = APPSERVER_MAC_ADDRESS;
672                         char* wrap_buffer = eth_wrap(_buf + total_sent, cur_packet_len, device_mac, dest_mac, APPSERVER_PORT);
673                         just_sent = send_frame(wrap_buffer, cur_packet_len + sizeof(struct ETH_Header));
674
675                         if (just_sent < 0)
676                                 return 0; // This should be an error code of its own
677
678                         if (wrap_buffer)
679                                 kfree(wrap_buffer);
680
681                         total_sent += cur_packet_len;
682                 }
683
684                 return (ssize_t)len;
685 */
686         }
687         else
688                 return -EINVAL;
689 }
690
691 static ssize_t sys_eth_get_mac_addr(env_t* e, char *DANGEROUS buf) 
692 {
693         if (eth_up) {
694                 for (int i = 0; i < 6; i++)
695                         buf[i] = device_mac[i];
696                 return 0;
697         }
698         else
699                 return -EINVAL;
700 }
701
702 static int sys_eth_recv_check(env_t* e) 
703 {
704         if (num_packet_buffers != 0) 
705                 return 1;
706         else
707                 return 0;
708 }
709
710 #endif // Network
711
712 // Syscalls below here are serviced by the appserver for now.
713 #define ufe(which,a0,a1,a2,a3) \
714         frontend_syscall_errno(p,APPSERVER_SYSCALL_##which,\
715                            (int)(a0),(int)(a1),(int)(a2),(int)(a3))
716
717 intreg_t sys_write(struct proc* p, int fd, const void* buf, int len)
718 {
719         void* kbuf = user_memdup_errno(p,buf,len);
720         if(kbuf == NULL)
721                 return -1;
722         int ret = ufe(write,fd,PADDR(kbuf),len,0);
723         user_memdup_free(p,kbuf);
724         return ret;
725 }
726
727 intreg_t sys_read(struct proc* p, int fd, void* buf, int len)
728 {
729         void* kbuf = kmalloc_errno(len);
730         if(kbuf == NULL)
731                 return -1;
732         int ret = ufe(read,fd,PADDR(kbuf),len,0);
733         if(ret != -1 && memcpy_to_user_errno(p,buf,kbuf,len))
734                 ret = -1;
735         user_memdup_free(p,kbuf);
736         return ret;
737 }
738
739 intreg_t sys_pwrite(struct proc* p, int fd, const void* buf, int len, int offset)
740 {
741         void* kbuf = user_memdup_errno(p,buf,len);
742         if(kbuf == NULL)
743                 return -1;
744         int ret = ufe(pwrite,fd,PADDR(kbuf),len,offset);
745         user_memdup_free(p,kbuf);
746         return ret;
747 }
748
749 intreg_t sys_pread(struct proc* p, int fd, void* buf, int len, int offset)
750 {
751         void* kbuf = kmalloc_errno(len);
752         if(kbuf == NULL)
753                 return -1;
754         int ret = ufe(pread,fd,PADDR(kbuf),len,offset);
755         if(ret != -1 && memcpy_to_user_errno(p,buf,kbuf,len))
756                 ret = -1;
757         user_memdup_free(p,kbuf);
758         return ret;
759 }
760
761 intreg_t sys_open(struct proc* p, const char* path, int oflag, int mode)
762 {
763         char* fn = user_strdup_errno(p,path,PGSIZE);
764         if(fn == NULL)
765                 return -1;
766         int ret = ufe(open,PADDR(fn),oflag,mode,0);
767         user_memdup_free(p,fn);
768         return ret;
769 }
770 intreg_t sys_close(struct proc* p, int fd)
771 {
772         return ufe(close,fd,0,0,0);
773 }
774
775 #define NEWLIB_STAT_SIZE 64
776 intreg_t sys_fstat(struct proc* p, int fd, void* buf)
777 {
778         int *kbuf = kmalloc(NEWLIB_STAT_SIZE, 0);
779         int ret = ufe(fstat,fd,PADDR(kbuf),0,0);
780         if(ret != -1 && memcpy_to_user_errno(p,buf,kbuf,NEWLIB_STAT_SIZE))
781                 ret = -1;
782         kfree(kbuf);
783         return ret;
784 }
785
786 intreg_t sys_stat(struct proc* p, const char* path, void* buf)
787 {
788         char* fn = user_strdup_errno(p,path,PGSIZE);
789         if(fn == NULL)
790                 return -1;
791
792         int *kbuf = kmalloc(NEWLIB_STAT_SIZE, 0);
793         int ret = ufe(stat,PADDR(fn),PADDR(kbuf),0,0);
794         if(ret != -1 && memcpy_to_user_errno(p,buf,kbuf,NEWLIB_STAT_SIZE))
795                 ret = -1;
796
797         user_memdup_free(p,fn);
798         kfree(kbuf);
799         return ret;
800 }
801
802 intreg_t sys_lstat(struct proc* p, const char* path, void* buf)
803 {
804         char* fn = user_strdup_errno(p,path,PGSIZE);
805         if(fn == NULL)
806                 return -1;
807
808         int *kbuf = kmalloc(NEWLIB_STAT_SIZE, 0);
809         int ret = ufe(lstat,PADDR(fn),PADDR(kbuf),0,0);
810         if(ret != -1 && memcpy_to_user_errno(p,buf,kbuf,NEWLIB_STAT_SIZE))
811                 ret = -1;
812
813         user_memdup_free(p,fn);
814         kfree(kbuf);
815         return ret;
816 }
817
818 intreg_t sys_fcntl(struct proc* p, int fd, int cmd, int arg)
819 {
820         return ufe(fcntl,fd,cmd,arg,0);
821 }
822
823 intreg_t sys_access(struct proc* p, const char* path, int type)
824 {
825         char* fn = user_strdup_errno(p,path,PGSIZE);
826         if(fn == NULL)
827                 return -1;
828         int ret = ufe(access,PADDR(fn),type,0,0);
829         user_memdup_free(p,fn);
830         return ret;
831 }
832
833 intreg_t sys_umask(struct proc* p, int mask)
834 {
835         return ufe(umask,mask,0,0,0);
836 }
837
838 intreg_t sys_chmod(struct proc* p, const char* path, int mode)
839 {
840         char* fn = user_strdup_errno(p,path,PGSIZE);
841         if(fn == NULL)
842                 return -1;
843         int ret = ufe(chmod,PADDR(fn),mode,0,0);
844         user_memdup_free(p,fn);
845         return ret;
846 }
847
848 intreg_t sys_lseek(struct proc* p, int fd, int offset, int whence)
849 {
850         return ufe(lseek,fd,offset,whence,0);
851 }
852
853 intreg_t sys_link(struct proc* p, const char* _old, const char* _new)
854 {
855         char* oldpath = user_strdup_errno(p,_old,PGSIZE);
856         if(oldpath == NULL)
857                 return -1;
858
859         char* newpath = user_strdup_errno(p,_new,PGSIZE);
860         if(newpath == NULL)
861         {
862                 user_memdup_free(p,oldpath);
863                 return -1;
864         }
865
866         int ret = ufe(link,PADDR(oldpath),PADDR(newpath),0,0);
867         user_memdup_free(p,oldpath);
868         user_memdup_free(p,newpath);
869         return ret;
870 }
871
872 intreg_t sys_unlink(struct proc* p, const char* path)
873 {
874         char* fn = user_strdup_errno(p,path,PGSIZE);
875         if(fn == NULL)
876                 return -1;
877         int ret = ufe(unlink,PADDR(fn),0,0,0);
878         user_memdup_free(p,fn);
879         return ret;
880 }
881
882 intreg_t sys_chdir(struct proc* p, const char* path)
883 {
884         char* fn = user_strdup_errno(p,path,PGSIZE);
885         if(fn == NULL)
886                 return -1;
887         int ret = ufe(chdir,PADDR(fn),0,0,0);
888         user_memdup_free(p,fn);
889         return ret;
890 }
891
892 intreg_t sys_getcwd(struct proc* p, char* pwd, int size)
893 {
894         void* kbuf = kmalloc_errno(size);
895         if(kbuf == NULL)
896                 return -1;
897         int ret = ufe(read,PADDR(kbuf),size,0,0);
898         if(ret != -1 && memcpy_to_user_errno(p,pwd,kbuf,strnlen(kbuf,size)))
899                 ret = -1;
900         user_memdup_free(p,kbuf);
901         return ret;
902 }
903
904 intreg_t sys_gettimeofday(struct proc* p, int* buf)
905 {
906         static spinlock_t gtod_lock = SPINLOCK_INITIALIZER;
907         static int t0 = 0;
908
909         spin_lock(&gtod_lock);
910         if(t0 == 0)
911 #ifdef __CONFIG_APPSERVER__
912                 t0 = ufe(time,0,0,0,0);
913 #else
914                 // Nanwan's birthday, bitches!!
915                 t0 = 1242129600;
916 #endif 
917         spin_unlock(&gtod_lock);
918
919         long long dt = read_tsc();
920         int kbuf[2] = {t0+dt/system_timing.tsc_freq,
921             (dt%system_timing.tsc_freq)*1000000/system_timing.tsc_freq};
922
923         return memcpy_to_user_errno(p,buf,kbuf,sizeof(kbuf));
924 }
925
926 #define SIZEOF_STRUCT_TERMIOS 60
927 intreg_t sys_tcgetattr(struct proc* p, int fd, void* termios_p)
928 {
929         int* kbuf = kmalloc(SIZEOF_STRUCT_TERMIOS,0);
930         int ret = ufe(tcgetattr,fd,PADDR(kbuf),0,0);
931         if(ret != -1 && memcpy_to_user_errno(p,termios_p,kbuf,SIZEOF_STRUCT_TERMIOS))
932                 ret = -1;
933         kfree(kbuf);
934         return ret;
935 }
936
937 intreg_t sys_tcsetattr(struct proc* p, int fd, int optional_actions, const void* termios_p)
938 {
939         void* kbuf = user_memdup_errno(p,termios_p,SIZEOF_STRUCT_TERMIOS);
940         if(kbuf == NULL)
941                 return -1;
942         int ret = ufe(tcsetattr,fd,optional_actions,PADDR(kbuf),0);
943         user_memdup_free(p,kbuf);
944         return ret;
945 }
946 /************** Syscall Invokation **************/
947
948 /* Executes the given syscall.
949  *
950  * Note tf is passed in, which points to the tf of the context on the kernel
951  * stack.  If any syscall needs to block, it needs to save this info, as well as
952  * any silly state.
953  *
954  * TODO: Build a dispatch table instead of switching on the syscallno
955  * Dispatches to the correct kernel function, passing the arguments.
956  */
957 intreg_t syscall(struct proc *p, uintreg_t syscallno, uintreg_t a1,
958                  uintreg_t a2, uintreg_t a3, uintreg_t a4, uintreg_t a5)
959 {
960         // Initialize the return value and error code returned to 0
961         proc_set_syscall_retval(current_tf, 0);
962         set_errno(current_tf,0);
963
964         typedef intreg_t (*syscall_t)(struct proc*,uintreg_t,uintreg_t,
965                                       uintreg_t,uintreg_t,uintreg_t);
966
967         const static syscall_t syscall_table[] = {
968                 [SYS_null] = (syscall_t)sys_null,
969                 [SYS_cache_buster] = (syscall_t)sys_cache_buster,
970                 [SYS_cache_invalidate] = (syscall_t)sys_cache_invalidate,
971                 [SYS_reboot] = (syscall_t)reboot,
972                 [SYS_cputs] = (syscall_t)sys_cputs,
973                 [SYS_cgetc] = (syscall_t)sys_cgetc,
974                 [SYS_getcpuid] = (syscall_t)sys_getcpuid,
975                 [SYS_getvcoreid] = (syscall_t)sys_getvcoreid,
976                 [SYS_getpid] = (syscall_t)sys_getpid,
977                 [SYS_proc_create] = (syscall_t)sys_proc_create,
978                 [SYS_proc_run] = (syscall_t)sys_proc_run,
979                 [SYS_proc_destroy] = (syscall_t)sys_proc_destroy,
980                 [SYS_yield] = (syscall_t)sys_proc_yield,
981                 [SYS_run_binary] = (syscall_t)sys_run_binary,
982                 [SYS_fork] = (syscall_t)sys_fork,
983                 [SYS_exec] = (syscall_t)sys_exec,
984                 [SYS_trywait] = (syscall_t)sys_trywait,
985                 [SYS_mmap] = (syscall_t)sys_mmap,
986                 [SYS_munmap] = (syscall_t)sys_munmap,
987                 [SYS_mprotect] = (syscall_t)sys_mprotect,
988                 [SYS_brk] = (syscall_t)sys_brk,
989                 [SYS_shared_page_alloc] = (syscall_t)sys_shared_page_alloc,
990                 [SYS_shared_page_free] = (syscall_t)sys_shared_page_free,
991                 [SYS_resource_req] = (syscall_t)resource_req,
992         #ifdef __CONFIG_SERIAL_IO__
993                 [SYS_serial_read] = (syscall_t)sys_serial_read,
994                 [SYS_serial_write] = (syscall_t)sys_serial_write,
995         #endif
996         #ifdef __CONFIG_NETWORKING__
997                 [SYS_eth_read] = (syscall_t)sys_eth_read,
998                 [SYS_eth_write] = (syscall_t)sys_eth_write,
999                 [SYS_eth_get_mac_addr] = (syscall_t)sys_eth_get_mac_addr,
1000                 [SYS_eth_recv_check] = (syscall_t)sys_eth_recv_check,
1001         #endif
1002                 // Syscalls serviced by the appserver for now.
1003                 [SYS_read] = (syscall_t)sys_read,
1004                 [SYS_write] = (syscall_t)sys_write,
1005                 [SYS_open] = (syscall_t)sys_open,
1006                 [SYS_close] = (syscall_t)sys_close,
1007                 [SYS_fstat] = (syscall_t)sys_fstat,
1008                 [SYS_stat] = (syscall_t)sys_stat,
1009                 [SYS_lstat] = (syscall_t)sys_lstat,
1010                 [SYS_fcntl] = (syscall_t)sys_fcntl,
1011                 [SYS_access] = (syscall_t)sys_access,
1012                 [SYS_umask] = (syscall_t)sys_umask,
1013                 [SYS_chmod] = (syscall_t)sys_chmod,
1014                 [SYS_lseek] = (syscall_t)sys_lseek,
1015                 [SYS_link] = (syscall_t)sys_link,
1016                 [SYS_unlink] = (syscall_t)sys_unlink,
1017                 [SYS_chdir] = (syscall_t)sys_chdir,
1018                 [SYS_getcwd] = (syscall_t)sys_getcwd,
1019                 [SYS_gettimeofday] = (syscall_t)sys_gettimeofday,
1020                 [SYS_tcgetattr] = (syscall_t)sys_tcgetattr,
1021                 [SYS_tcsetattr] = (syscall_t)sys_tcsetattr
1022         };
1023
1024         const int max_syscall = sizeof(syscall_table)/sizeof(syscall_table[0]);
1025
1026         //printk("Incoming syscall on core: %d number: %d\n    a1: %x\n   "
1027         //       " a2: %x\n    a3: %x\n    a4: %x\n    a5: %x\n", core_id(),
1028         //       syscallno, a1, a2, a3, a4, a5);
1029
1030         if(syscallno > max_syscall || syscall_table[syscallno] == NULL)
1031                 panic("Invalid syscall number %d for proc %x!", syscallno, *p);
1032
1033         return syscall_table[syscallno](p,a1,a2,a3,a4,a5);
1034 }
1035
1036 intreg_t syscall_async(struct proc *p, syscall_req_t *call)
1037 {
1038         return syscall(p, call->num, call->args[0], call->args[1],
1039                        call->args[2], call->args[3], call->args[4]);
1040 }
1041
1042 /* You should already have a refcnt'd ref to p before calling this */
1043 intreg_t process_generic_syscalls(struct proc *p, size_t max)
1044 {
1045         size_t count = 0;
1046         syscall_back_ring_t* sysbr = &p->syscallbackring;
1047
1048         /* make sure the proc is still alive, and keep it from dying from under us
1049          * incref will return ESUCCESS on success.  This might need some thought
1050          * regarding when the incref should have happened (like by whoever passed us
1051          * the *p). */
1052         // TODO: ought to be unnecessary, if you called this right, kept here for
1053         // now in case anyone actually uses the ARSCs.
1054         proc_incref(p, 1);
1055
1056         // max is the most we'll process.  max = 0 means do as many as possible
1057         while (RING_HAS_UNCONSUMED_REQUESTS(sysbr) && ((!max)||(count < max)) ) {
1058                 if (!count) {
1059                         // ASSUME: one queue per process
1060                         // only switch cr3 for the very first request for this queue
1061                         // need to switch to the right context, so we can handle the user pointer
1062                         // that points to a data payload of the syscall
1063                         lcr3(p->env_cr3);
1064                 }
1065                 count++;
1066                 //printk("DEBUG PRE: sring->req_prod: %d, sring->rsp_prod: %d\n",
1067                 //         sysbr->sring->req_prod, sysbr->sring->rsp_prod);
1068                 // might want to think about 0-ing this out, if we aren't
1069                 // going to explicitly fill in all fields
1070                 syscall_rsp_t rsp;
1071                 // this assumes we get our answer immediately for the syscall.
1072                 syscall_req_t* req = RING_GET_REQUEST(sysbr, ++(sysbr->req_cons));
1073                 rsp.retval = syscall_async(p, req);
1074                 // write response into the slot it came from
1075                 memcpy(req, &rsp, sizeof(syscall_rsp_t));
1076                 // update our counter for what we've produced (assumes we went in order!)
1077                 (sysbr->rsp_prod_pvt)++;
1078                 RING_PUSH_RESPONSES(sysbr);
1079                 //printk("DEBUG POST: sring->req_prod: %d, sring->rsp_prod: %d\n",
1080                 //         sysbr->sring->req_prod, sysbr->sring->rsp_prod);
1081         }
1082         // load sane page tables (and don't rely on decref to do it for you).
1083         lcr3(boot_cr3);
1084         proc_decref(p, 1);
1085         return (intreg_t)count;
1086 }
1087