mmap() and friends using vm regions
[akaros.git] / kern / src / frontend.c
1 #ifdef __SHARC__
2 #pragma nosharc
3 #endif
4
5 #ifdef __DEPUTY__
6 #pragma nodeputy
7 #endif
8
9 #include <atomic.h>
10 #include <process.h>
11 #include <kmalloc.h>
12 #include <pmap.h>
13 #include <frontend.h>
14 #include <syscall.h>
15 #include <smp.h>
16 #include <slab.h>
17 #include <arch/arch.h>
18
19 volatile int magic_mem[10];
20
21 void
22 frontend_proc_init(struct proc *SAFE p)
23 {
24 #ifdef __CONFIG_APPSERVER__
25         pid_t parent_id = p->ppid, id = p->pid;
26         int32_t errno;
27         if(frontend_syscall(parent_id,APPSERVER_SYSCALL_proc_init,id,0,0,0,&errno))
28                 panic("Front-end server couldn't initialize new process!");
29 #endif
30 }
31
32 void
33 frontend_proc_free(struct proc *SAFE p)
34 {
35 #ifdef __CONFIG_APPSERVER__
36         int32_t errno;
37         if(frontend_syscall(0,APPSERVER_SYSCALL_proc_free,p->pid,0,0,0,&errno))
38                 panic("Front-end server couldn't free process!");
39 #endif
40 }
41
42 void* user_memdup(struct proc* p, const void* va, int len)
43 {
44         void* kva = NULL;
45         if(len < 0 || (kva = kmalloc(len,0)) == NULL)
46                 return ERR_PTR(-ENOMEM);
47         if(memcpy_from_user(p,kva,va,len))
48         {
49                 kfree(kva);
50                 return ERR_PTR(-EINVAL);
51         }
52
53         return kva;
54 }
55
56 void* user_memdup_errno(struct proc* p, const void* va, int len)
57 {
58         void* kva = user_memdup(p,va,len);
59         if(IS_ERR(kva))
60         {
61                 set_errno(current_tf,-PTR_ERR(kva));
62                 return NULL;
63         }
64         return kva;
65 }
66
67 void user_memdup_free(struct proc* p, void* va)
68 {
69         kfree(va);
70 }
71
72 char* user_strdup(struct proc* p, const char* va0, int max)
73 {
74         max++;
75         char* kbuf = (char*)kmalloc(PGSIZE,0);
76         if(kbuf == NULL)
77                 return ERR_PTR(-ENOMEM);
78
79         int pos = 0, len = 0;
80         const char* va = va0;
81         while(max > 0 && len == 0)
82         {
83                 int thislen = MIN(PGSIZE-(uintptr_t)va%PGSIZE,max);
84                 if(memcpy_from_user(p,kbuf,va,thislen))
85                 {
86                         kfree(kbuf);
87                         return ERR_PTR(-EINVAL);
88                 }
89
90                 const char* nullterm = memchr(kbuf,0,thislen);
91                 if(nullterm)
92                         len = pos+(nullterm-kbuf)+1;
93
94                 pos += thislen;
95                 va += thislen;
96                 max -= thislen;
97         }
98
99         kfree(kbuf);
100         return len ? user_memdup(p,va0,len) : ERR_PTR(-EINVAL);
101 }
102
103 char* user_strdup_errno(struct proc* p, const char* va, int max)
104 {
105         void* kva = user_strdup(p,va,max);
106         if(IS_ERR(kva))
107         {
108                 set_errno(current_tf,-PTR_ERR(kva));
109                 return NULL;
110         }
111         return kva;
112 }
113
114 int memcpy_to_user_errno(struct proc* p, void* dst, const void* src,
115                                 int len)
116 {
117         if(memcpy_to_user(p,dst,src,len))
118         {
119                 set_errno(current_tf,EINVAL);
120                 return -1;
121         }
122         return 0;
123 }
124
125 void* kmalloc_errno(int len)
126 {
127         void* kva = NULL;
128         if(len < 0 || (kva = kmalloc(len,0)) == NULL)
129                 set_errno(current_tf,ENOMEM);
130         return kva;
131 }
132
133 struct kmem_cache* struct_file_cache;
134 void file_init()
135 {
136         struct_file_cache = kmem_cache_create("struct_file",
137                                               sizeof(struct file), 8, 0, 0, 0);
138 }
139
140 /* will zero anything in the page after the EOF */
141 error_t file_read_page(struct file* f, physaddr_t pa, size_t pgoff)
142 {
143         int ret = frontend_syscall(0,APPSERVER_SYSCALL_pread,f->fd,pa,PGSIZE,
144                                    pgoff*PGSIZE,NULL);
145         if(ret >= 0)
146                 memset(KADDR(pa)+ret,0,PGSIZE-ret);
147         return ret;
148 }
149
150 struct file* file_open_from_fd(struct proc* p, int fd)
151 {
152         struct file* f = NULL;
153         if(!(f = kmem_cache_alloc(struct_file_cache,0)))
154                 goto out;
155
156         f->fd = frontend_syscall(p->pid,APPSERVER_SYSCALL_kdup,fd,0,0,0,NULL);
157         if(f->fd == -1)
158         {
159                 kmem_cache_free(struct_file_cache,f);
160                 f = NULL;
161                 goto out;
162         }
163         spinlock_init(&f->lock);
164         f->refcnt = 1;
165
166 out:
167         return f;
168 }
169
170 struct file* file_open(const char* path, int oflag, int mode)
171 {
172         struct file* f = NULL;
173         // although path is a kernel pointer, it may be below KERNBASE.
174         // fix that if so.
175         char* malloced = NULL;
176         if((uintptr_t)path < KERNBASE)
177         {
178                 size_t len = strlen(path)+1;
179                 malloced = kmalloc(len,0);
180                 if(!malloced)
181                         goto out;
182                 path = memcpy(malloced,path,len);
183         }
184
185         if(!(f = kmem_cache_alloc(struct_file_cache,0)))
186                 goto out;
187
188         f->fd = frontend_syscall(0,APPSERVER_SYSCALL_open,PADDR(path),
189                                  oflag,mode,0,NULL);
190         if(f->fd == -1)
191         {
192                 kmem_cache_free(struct_file_cache,f);
193                 f = NULL;
194                 goto out;
195         }
196         spinlock_init(&f->lock);
197         f->refcnt = 1;
198
199 out:
200         if(malloced)
201                 kfree(malloced);
202         return f;
203 }
204
205 void file_incref(struct file* f)
206 {
207         spin_lock(&f->lock);
208         f->refcnt++;
209         spin_unlock(&f->lock);
210 }
211
212 void file_decref(struct file* f)
213 {
214         // if you decref too many times, you'll clobber memory :(
215         spin_lock(&f->lock);
216         if(--f->refcnt == 0)
217         {
218                 int ret = frontend_syscall(0,APPSERVER_SYSCALL_close,f->fd,0,0,0,NULL);
219                 assert(ret == 0);
220                 kmem_cache_free(struct_file_cache,f);
221         }
222         else
223                 spin_unlock(&f->lock);
224 }
225
226 int frontend_syscall_errno(struct proc* p, int n, int a0, int a1, int a2, int a3)
227 {
228         int errno, ret = frontend_syscall(p->pid,n,a0,a1,a2,a3,&errno);
229         if(errno && p)
230                 set_errno(current_tf,errno);
231         return ret;
232 }
233
234 int32_t frontend_syscall(pid_t pid, int32_t syscall_num, 
235                          uint32_t arg0, uint32_t arg1, 
236                          uint32_t arg2, uint32_t arg3, int32_t* errno)
237 {
238 #ifndef __CONFIG_APPSERVER__
239         warn("No appserver support, requested syscall %d for proc %d", syscall_num,
240              pid);
241         if(errno)
242                 *errno = ENOSYS;
243         return -1;
244 #endif
245
246 #ifdef __i386__
247         if (!irq_is_enabled())
248                 warn("IRQ is disabled in frontend_syscall %d for proc %d\n", syscall_num, pid);
249 #endif
250
251         static spinlock_t lock = SPINLOCK_INITIALIZER;
252         int32_t ret;
253
254         // only one frontend request at a time.
255         // interrupts could try to do frontend requests,
256         // which would deadlock, so disable them
257         spin_lock(&lock);
258
259         // write syscall into magic memory
260         magic_mem[7] = 0;
261         magic_mem[1] = syscall_num;
262         magic_mem[2] = arg0;
263         magic_mem[3] = arg1;
264         magic_mem[4] = arg2;
265         magic_mem[5] = arg3;
266         magic_mem[6] = pid;
267         magic_mem[0] = 0x80;
268
269         // wait for front-end response
270         while(magic_mem[7] == 0)
271                 ;
272
273         ret = magic_mem[1];
274         if(errno)
275                 *errno = magic_mem[2];
276
277         spin_unlock(&lock);
278
279         return ret;
280 }
281
282 void __diediedie(trapframe_t* tf, uint32_t srcid, uint32_t code, uint32_t a1, uint32_t a2)
283 {
284         int32_t errno;
285         frontend_syscall(0,APPSERVER_SYSCALL_exit,(int)code,0,0,0,&errno);
286         while(1);
287 }
288
289 void appserver_die(int code)
290 {
291         int i;
292         for(i = 0; i < num_cpus; i++)
293                 if(i != core_id())
294                         while(send_kernel_message(i,(amr_t)&__diediedie,(void*)code,0,0,
295                                                   KMSG_IMMEDIATE));
296
297         // just in case.
298         __diediedie(0,0,code,0,0);
299 }