3 #include <ros/common.h>
4 #include <ros/syscall.h>
5 #include <ros/ring_syscall.h>
6 #include <ros/sysevent.h>
10 #include <sys/param.h>
11 #include <arch/atomic.h>
14 syscall_desc_pool_t syscall_desc_pool;
15 async_desc_pool_t async_desc_pool;
16 async_desc_t* current_async_desc;
18 struct arsc_channel global_ac;
20 void init_arc(struct arsc_channel* ac)
22 // Set up the front ring for the general syscall ring
23 // and the back ring for the general sysevent ring
24 mcs_lock_init(&ac->aclock);
25 ac->ring_page = (syscall_sring_t*)sys_init_arsc();
27 FRONT_RING_INIT(&ac->sysfr, ac->ring_page, SYSCALLRINGSIZE);
28 //BACK_RING_INIT(&syseventbackring, &(__procdata.syseventring), SYSEVENTRINGSIZE);
29 //TODO: eventually rethink about desc pools, they are here but no longer necessary
30 POOL_INIT(&syscall_desc_pool, MAX_SYSCALLS);
31 POOL_INIT(&async_desc_pool, MAX_ASYNCCALLS);
34 // Wait on all syscalls within this async call. TODO - timeout or something?
35 int waiton_group_call(async_desc_t* desc, async_rsp_t* rsp)
37 syscall_rsp_t syscall_rsp;
46 while (!(TAILQ_EMPTY(&desc->syslist))) {
47 d = TAILQ_FIRST(&desc->syslist);
48 err = waiton_syscall(d);
49 // TODO: processing the retval out of rsp here. might be specific to
50 // the async call. do we want to accumulate? return any negative
51 // values? depends what we want from the return value, so we might
52 // have to pass in a function that is used to do the processing and
53 // pass the answer back out in rsp.
54 //rsp->retval += syscall_rsp.retval; // For example
55 retval = MIN(retval, err);
56 // remove from the list and free the syscall desc
57 TAILQ_REMOVE(&desc->syslist, d, next);
58 POOL_PUT(&syscall_desc_pool, d);
60 // run a cleanup function for this desc, if available
62 desc->cleanup(desc->data);
63 // free the asynccall desc
64 POOL_PUT(&async_desc_pool, desc);
68 // Finds a free async_desc_t, on which you can wait for a series of syscalls
69 async_desc_t* get_async_desc(void)
71 async_desc_t* desc = POOL_GET(&async_desc_pool);
73 // Clear out any data that was in the old desc
74 memset(desc, 0, sizeof(*desc));
75 TAILQ_INIT(&desc->syslist);
80 // Finds a free sys_desc_t, on which you can wait for a specific syscall, and
81 // binds it to the group desc.
82 syscall_desc_t* get_sys_desc(async_desc_t* desc)
84 syscall_desc_t* d = POOL_GET(&syscall_desc_pool);
86 // Clear out any data that was in the old desc
87 memset(d, 0, sizeof(*d));
88 TAILQ_INSERT_TAIL(&desc->syslist, d, next);
93 // Gets an async and a sys desc, with the sys bound to async. Also sets
94 // current_async_desc. This is meant as an easy wrapper when there is only one
95 // syscall for an async call.
96 int get_all_desc(async_desc_t** a_desc, syscall_desc_t** s_desc)
98 assert(a_desc && s_desc);
99 if ((current_async_desc = get_async_desc()) == NULL){
103 *a_desc = current_async_desc;
104 if ((*s_desc = get_sys_desc(current_async_desc)))
106 // in case we could get an async, but not a syscall desc, then clean up.
107 POOL_PUT(&async_desc_pool, current_async_desc);
108 current_async_desc = NULL;
113 // This runs one syscall instead of a group.
115 // TODO: right now there is one channel (remote), in the future, the caller
116 // may specify local which will cause it to give up the core to do the work.
117 // creation of additional remote channel also allows the caller to prioritize
118 // work, because the default policy for the kernel is to roundrobin between them.
119 int async_syscall(arsc_channel_t* chan, syscall_req_t* req, syscall_desc_t** desc_ptr2)
121 // Note that this assumes one global frontring (TODO)
122 // abort if there is no room for our request. ring size is currently 64.
123 // we could spin til it's free, but that could deadlock if this same thread
124 // is supposed to consume the requests it is waiting on later.
125 syscall_desc_t* desc = malloc(sizeof (syscall_desc_t));
126 desc->channel = chan;
127 syscall_front_ring_t *fr = &(desc->channel->sysfr);
128 //TODO: can do it locklessly using CAS, but could change with local async calls
129 struct mcs_lock_qnode local_qn = {0};
130 mcs_lock_lock(&(chan->aclock), &local_qn);
135 // req_prod_pvt comes in as the previously produced item. need to
136 // increment to the next available spot, which is the one we'll work on.
137 // at some point, we need to listen for the responses.
138 desc->idx = ++(fr->req_prod_pvt);
139 syscall_req_t* r = RING_GET_REQUEST(fr, desc->idx);
140 // CAS on the req->status perhaps
141 req->status = REQ_alloc;
143 memcpy(r, req, sizeof(syscall_req_t));
144 r->status = REQ_ready;
145 // push our updates to syscallfrontring.req_prod_pvt
146 // note: it is ok to push without protection since it is atomic and kernel
147 // won't process any requests until they are marked REQ_ready (also atomic)
148 RING_PUSH_REQUESTS(fr);
149 //cprintf("DEBUG: sring->req_prod: %d, sring->rsp_prod: %d\n",
150 mcs_lock_unlock(&desc->channel->aclock, &local_qn);
154 // Default convinence wrapper before other method of posting calls are available
156 syscall_desc_t* arc_call(long int num, ...)
160 struct syscall *p_sysc = malloc(sizeof (struct syscall));
161 syscall_desc_t* desc;
162 if (p_sysc == NULL) {
167 p_sysc->arg0 = va_arg(vl,long int);
168 p_sysc->arg1 = va_arg(vl,long int);
169 p_sysc->arg2 = va_arg(vl,long int);
170 p_sysc->arg3 = va_arg(vl,long int);
171 p_sysc->arg4 = va_arg(vl,long int);
172 p_sysc->arg5 = va_arg(vl,long int);
174 syscall_req_t arc = {REQ_alloc,NULL, NULL, p_sysc};
175 async_syscall(&SYS_CHANNEL, &arc, &desc);
176 printf ( "%d pushed at %p \n", desc);
180 // consider a timeout too
181 // Wait until arsc returns, caller provides rsp buffer.
182 // eventually change this to return ret_val, set errno
184 // What if someone calls waiton the same desc several times?
185 int waiton_syscall(syscall_desc_t* desc)
188 if (desc == NULL || desc->channel == NULL){
192 // Make sure we were given a desc with a non-NULL frontring. This could
193 // happen if someone forgot to check the error code on the paired syscall.
194 syscall_front_ring_t *fr = &desc->channel->sysfr;
200 printf("waiting %d\n", vcore_id());
201 syscall_rsp_t* rsp = RING_GET_RESPONSE(fr, desc->idx);
203 // ignoring the ring push response from the kernel side now
204 while (atomic_read(&rsp->sc->flags) != SC_DONE)
206 // memcpy(rsp, rsp_inring, sizeof(*rsp));
208 // run a cleanup function for this desc, if available
210 rsp->cleanup(rsp->data);
212 errno = RSP_ERRNO(rsp);
215 retval = RSP_RESULT(rsp);
216 atomic_inc((atomic_t*) &(fr->rsp_cons));