1 /* See COPYRIGHT for copyright information. */
8 #include <ros/common.h>
9 #include <ros/ring_syscall.h>
10 #include <arch/types.h>
11 #include <arch/arch.h>
19 #include <hashtable.h>
21 #include <arsc_server.h>
26 struct proc_list arsc_proc_list = TAILQ_HEAD_INITIALIZER(arsc_proc_list);
27 spinlock_t arsc_proc_lock = SPINLOCK_INITIALIZER_IRQSAVE;
29 intreg_t inline syscall_async(struct proc *p, syscall_req_t *call)
31 struct syscall* sc = call->sc;
32 return syscall(p, sc->num, sc->arg0, sc->arg1,
33 sc->arg2, sc->arg3, sc->arg4, sc->arg5);
36 syscall_sring_t* sys_init_arsc(struct proc *p)
38 kref_get(&p->p_kref, 1); /* we're storing an external ref here */
39 syscall_sring_t* sring;
41 // TODO: need to pin this page in the future when swapping happens
42 va = do_mmap(p,MMAP_LOWEST_VA, SYSCALLRINGSIZE, PROT_READ | PROT_WRITE,
43 MAP_ANONYMOUS | MAP_POPULATE, NULL, 0);
44 pte_t *pte = pgdir_walk(p->env_pgdir, (void*)va, 0);
46 sring = (syscall_sring_t*) (ppn2kva(PTE2PPN(*pte)));
47 /*make sure we are able to allocate the shared ring */
48 assert(sring != NULL);
49 p->procdata->syscallring = sring;
50 /* Initialize the generic syscall ring buffer */
51 SHARED_RING_INIT(sring);
53 BACK_RING_INIT(&p->syscallbackring,
57 spin_lock_irqsave(&arsc_proc_lock);
58 TAILQ_INSERT_TAIL(&arsc_proc_list, p, proc_arsc_link);
59 spin_unlock_irqsave(&arsc_proc_lock);
60 return (syscall_sring_t*)va;
63 void arsc_server(uint32_t srcid, long a0, long a1, long a2)
65 struct proc *p = NULL;
66 TAILQ_INIT(&arsc_proc_list);
68 while (TAILQ_EMPTY(&arsc_proc_list))
71 TAILQ_FOREACH(p, &arsc_proc_list, proc_arsc_link) {
72 /* Probably want to try to process a dying process's syscalls. If
73 * not, just move it to an else case */
74 process_generic_syscalls (p, MAX_ASRC_BATCH);
75 if (p->state == PROC_DYING) {
76 TAILQ_REMOVE(&arsc_proc_list, p, proc_arsc_link);
78 /* Need to break out, so the TAILQ_FOREACH doesn't flip out.
79 * It's not fair, but we're not dealing with that yet anyway */
86 static intreg_t process_generic_syscalls(struct proc *p, size_t max)
89 syscall_back_ring_t* sysbr = &p->syscallbackring;
90 struct per_cpu_info* pcpui = &per_cpu_info[core_id()];
91 struct proc *old_proc;
92 // looking at a process not initialized to perform arsc.
95 /* Bail out if there is nothing to do */
96 if (!RING_HAS_UNCONSUMED_REQUESTS(sysbr))
98 /* Switch to the address space of the process, so we can handle their
100 old_proc = switch_to(p);
101 // max is the most we'll process. max = 0 means do as many as possible
102 // TODO: check for initialization of the ring.
103 while (RING_HAS_UNCONSUMED_REQUESTS(sysbr) && ((!max)||(count < max)) ) {
104 // ASSUME: one queue per process
106 //printk("DEBUG PRE: sring->req_prod: %d, sring->rsp_prod: %d\n",
107 // sysbr->sring->req_prod, sysbr->sring->rsp_prod);
108 // might want to think about 0-ing this out, if we aren't
109 // going to explicitly fill in all fields
111 // this assumes we get our answer immediately for the syscall.
112 syscall_req_t* req = RING_GET_REQUEST(sysbr, ++sysbr->req_cons);
114 pcpui->cur_kthread->sysc = req->sc;
115 run_local_syscall(req->sc); // TODO: blocking call will block arcs as well.
117 // need to keep the slot in the ring buffer if it is blocked
118 (sysbr->rsp_prod_pvt)++;
119 req->status = RES_ready;
120 RING_PUSH_RESPONSES(sysbr);
122 //printk("DEBUG POST: sring->req_prod: %d, sring->rsp_prod: %d\n",
123 // sysbr->sring->req_prod, sysbr->sring->rsp_prod);
125 /* switch back to whatever context we were in before */
126 switch_back(p, old_proc);
127 return (intreg_t)count;