akaros/user/parlib/asynccall.c
<<
>>
Prefs
   1#include <stdlib.h>
   2
   3#include <parlib/common.h>
   4#include <parlib/assert.h>
   5#include <parlib/stdio.h>
   6#include <ros/syscall.h>
   7#include <ros/ring_syscall.h>
   8#include <ros/sysevent.h>
   9#include <parlib/arc.h>
  10#include <errno.h>
  11#include <parlib/arch/arch.h>
  12#include <sys/param.h>
  13#include <parlib/arch/atomic.h>
  14#include <parlib/vcore.h>
  15
  16syscall_desc_pool_t syscall_desc_pool;
  17async_desc_pool_t async_desc_pool;
  18async_desc_t* current_async_desc;
  19
  20struct arsc_channel global_ac;
  21
  22void init_arc(struct arsc_channel* ac)
  23{
  24        // Set up the front ring for the general syscall ring
  25        // and the back ring for the general sysevent ring
  26        mcs_lock_init(&ac->aclock);
  27        ac->ring_page = (syscall_sring_t*)sys_init_arsc();
  28
  29        FRONT_RING_INIT(&ac->sysfr, ac->ring_page, SYSCALLRINGSIZE);
  30        //BACK_RING_INIT(&syseventbackring, &(__procdata.syseventring), SYSEVENTRINGSIZE);
  31        //TODO: eventually rethink about desc pools, they are here but no longer
  32        //necessary
  33        POOL_INIT(&syscall_desc_pool, MAX_SYSCALLS);
  34        POOL_INIT(&async_desc_pool, MAX_ASYNCCALLS);
  35}
  36
  37// Wait on all syscalls within this async call.  TODO - timeout or something?
  38int waiton_group_call(async_desc_t* desc, async_rsp_t* rsp)
  39{
  40        syscall_rsp_t syscall_rsp;
  41        syscall_desc_t* d;
  42        int retval = 0;
  43        int err = 0;
  44        if (!desc) {
  45                errno = EINVAL;
  46                return -1;
  47        }
  48
  49        while (!(TAILQ_EMPTY(&desc->syslist))) {
  50                d = TAILQ_FIRST(&desc->syslist);
  51                err = waiton_syscall(d);
  52                // TODO: processing the retval out of rsp here.  might be
  53                // specific to the async call.  do we want to accumulate?
  54                // return any negative values?  depends what we want from the
  55                // return value, so we might have to pass in a function that is
  56                // used to do the processing and pass the answer back out in
  57                // rsp.
  58                //rsp->retval += syscall_rsp.retval; // For example
  59                retval = MIN(retval, err);
  60                // remove from the list and free the syscall desc
  61                TAILQ_REMOVE(&desc->syslist, d, next);
  62                POOL_PUT(&syscall_desc_pool, d);
  63        }
  64        // run a cleanup function for this desc, if available
  65        if (desc->cleanup)
  66                desc->cleanup(desc->data);
  67        // free the asynccall desc
  68        POOL_PUT(&async_desc_pool, desc);
  69        return err;
  70}
  71
  72// Finds a free async_desc_t, on which you can wait for a series of syscalls
  73async_desc_t* get_async_desc(void)
  74{
  75        async_desc_t* desc = POOL_GET(&async_desc_pool);
  76        if (desc) {
  77                // Clear out any data that was in the old desc
  78                memset(desc, 0, sizeof(*desc));
  79                TAILQ_INIT(&desc->syslist);
  80        }
  81        return desc;
  82}
  83
  84// Finds a free sys_desc_t, on which you can wait for a specific syscall, and
  85// binds it to the group desc.
  86syscall_desc_t* get_sys_desc(async_desc_t* desc)
  87{
  88        syscall_desc_t* d = POOL_GET(&syscall_desc_pool);
  89        if (d) {
  90                // Clear out any data that was in the old desc
  91                memset(d, 0, sizeof(*d));
  92        TAILQ_INSERT_TAIL(&desc->syslist, d, next);
  93        }
  94        return d;
  95}
  96
  97// Gets an async and a sys desc, with the sys bound to async.  Also sets
  98// current_async_desc.  This is meant as an easy wrapper when there is only one
  99// syscall for an async call.
 100int get_all_desc(async_desc_t** a_desc, syscall_desc_t** s_desc)
 101{
 102        assert(a_desc && s_desc);
 103        if ((current_async_desc = get_async_desc()) == NULL){
 104                errno = EBUSY;
 105                return -1;
 106        }
 107        *a_desc = current_async_desc;
 108        if ((*s_desc = get_sys_desc(current_async_desc)))
 109                return 0;
 110        // in case we could get an async, but not a syscall desc, then clean up.
 111        POOL_PUT(&async_desc_pool, current_async_desc);
 112        current_async_desc = NULL;
 113        errno = EBUSY;
 114        return -1;
 115}
 116
 117// This runs one syscall instead of a group. 
 118
 119// TODO: right now there is one channel (remote), in the future, the caller
 120// may specify local which will cause it to give up the core to do the work.
 121// creation of additional remote channel also allows the caller to prioritize
 122// work, because the default policy for the kernel is to roundrobin between
 123// them.
 124int async_syscall(arsc_channel_t* chan, syscall_req_t* req, syscall_desc_t** desc_ptr2)
 125{
 126        // Note that this assumes one global frontring (TODO)
 127        // abort if there is no room for our request.  ring size is currently
 128        // 64.  we could spin til it's free, but that could deadlock if this
 129        // same thread is supposed to consume the requests it is waiting on
 130        // later.
 131        syscall_desc_t* desc = malloc(sizeof (syscall_desc_t));
 132        desc->channel = chan;
 133        syscall_front_ring_t *fr = &(desc->channel->sysfr);
 134        //TODO: can do it locklessly using CAS, but could change with local
 135        //async calls
 136        struct mcs_lock_qnode local_qn = {0};
 137        mcs_lock_lock(&(chan->aclock), &local_qn);
 138        if (RING_FULL(fr)) {
 139                errno = EBUSY;
 140                return -1;
 141        }
 142        // req_prod_pvt comes in as the previously produced item.  need to
 143        // increment to the next available spot, which is the one we'll work on.
 144        // at some point, we need to listen for the responses.
 145        desc->idx = ++(fr->req_prod_pvt);
 146        syscall_req_t* r = RING_GET_REQUEST(fr, desc->idx);
 147        // CAS on the req->status perhaps
 148        req->status = REQ_alloc;
 149
 150        memcpy(r, req, sizeof(syscall_req_t));
 151        r->status = REQ_ready;
 152        // push our updates to syscallfrontring.req_prod_pvt
 153        // note: it is ok to push without protection since it is atomic and
 154        // kernel won't process any requests until they are marked REQ_ready
 155        // (also atomic)
 156        RING_PUSH_REQUESTS(fr);
 157        //cprintf("DEBUG: sring->req_prod: %d, sring->rsp_prod: %d\n", 
 158        mcs_lock_unlock(&desc->channel->aclock, &local_qn);
 159        *desc_ptr2 = desc;
 160        return 0;
 161}
 162// Default convinence wrapper before other method of posting calls are available
 163
 164syscall_desc_t* arc_call(long int num, ...)
 165{
 166        va_list vl;
 167        va_start(vl,num);
 168        struct syscall *p_sysc = malloc(sizeof (struct syscall));
 169        syscall_desc_t* desc;
 170        if (p_sysc == NULL) {
 171                errno = ENOMEM;
 172                return 0;
 173        }
 174        p_sysc->num = num;
 175        p_sysc->arg0 = va_arg(vl,long int);
 176        p_sysc->arg1 = va_arg(vl,long int);
 177        p_sysc->arg2 = va_arg(vl,long int);
 178        p_sysc->arg3 = va_arg(vl,long int);
 179        p_sysc->arg4 = va_arg(vl,long int);
 180        p_sysc->arg5 = va_arg(vl,long int);
 181        va_end(vl);
 182        syscall_req_t arc = {REQ_alloc,NULL, NULL, p_sysc};
 183        async_syscall(&SYS_CHANNEL, &arc, &desc);
 184        printf ( "%d pushed at %p \n", desc);
 185        return desc;
 186}
 187
 188// consider a timeout too
 189// Wait until arsc returns, caller provides rsp buffer.
 190// eventually change this to return ret_val, set errno
 191
 192// What if someone calls waiton the same desc several times?
 193int waiton_syscall(syscall_desc_t* desc)
 194{
 195        int retval = 0;
 196        if (desc == NULL || desc->channel == NULL){
 197                errno = EFAULT;
 198                return -1;
 199        }
 200        // Make sure we were given a desc with a non-NULL frontring.  This could
 201        // happen if someone forgot to check the error code on the paired
 202        // syscall.
 203        syscall_front_ring_t *fr =  &desc->channel->sysfr;
 204        
 205        if (!fr){
 206                errno = EFAULT;
 207                return -1;
 208        }
 209        printf("waiting %d\n", vcore_id());
 210        syscall_rsp_t* rsp = RING_GET_RESPONSE(fr, desc->idx);
 211
 212        // ignoring the ring push response from the kernel side now
 213        while (atomic_read(&rsp->sc->flags) != SC_DONE)
 214                cpu_relax();
 215        // memcpy(rsp, rsp_inring, sizeof(*rsp));
 216        
 217        // run a cleanup function for this desc, if available
 218        if (rsp->cleanup)
 219                rsp->cleanup(rsp->data);
 220        if (RSP_ERRNO(rsp)){
 221                errno = RSP_ERRNO(rsp);
 222                retval = -1;
 223        } else 
 224                retval =  RSP_RESULT(rsp); 
 225        atomic_inc((atomic_t*) &(fr->rsp_cons));
 226        return retval;
 227}
 228
 229
 230