Removed CONFIG_OSDI and EXPER_TRADPROC
authorBarret Rhoden <brho@cs.berkeley.edu>
Thu, 27 May 2010 23:30:42 +0000 (16:30 -0700)
committerKevin Klues <klueska@cs.berkeley.edu>
Thu, 3 Nov 2011 00:35:47 +0000 (17:35 -0700)
All the stuff they did will still exist on the OSDI branch, but we don't
want to maintain / wade through that stuff anymore.

22 files changed:
Makeconfig
Makelocal.template
kern/arch/i686/e1000.c
kern/arch/i686/ne2k.c
kern/arch/i686/nic_common.c
kern/arch/i686/nic_common.h
kern/arch/i686/perfmon.c
kern/arch/i686/rl8168.c
kern/arch/i686/smp_boot.c
kern/arch/sparc/smp.c
kern/include/env.h
kern/include/process.h
kern/include/ros/bits/syscall.h
kern/include/smp.h
kern/src/manager.c
kern/src/mm.c
kern/src/process.c
kern/src/resource.c
kern/src/smp.c
kern/src/syscall.c
kern/src/timer.c
user/parlib/pthread.c

index 77ab4bd..c55d21e 100644 (file)
@@ -3,7 +3,6 @@
 # To enable any of these options, add a line like the following to your Makelocal
 # CFLAGS += $(CONFIG_APPSERVER)
 CONFIG_APPSERVER:=                 -D__CONFIG_APPSERVER__
-CONFIG_OSDI:=                      -D__CONFIG_OSDI__
 
 # Kernel configuration parameters
 # By default, each of these options will be turned off
@@ -23,7 +22,6 @@ CONFIG_E1000_MMIO_HACK:=           -D__CONFIG_E1000_MMIO_HACK__
 CONFIG_E1000_ON_BOXBORO:=          -DE1000_MMIO_ADDR=0x9bb20000
 CONFIG_E1000_ON_S142:=             -DE1000_MMIO_ADDR=0xfbee0000
 CONFIG_DISABLE_MPTABLES:=          -D__CONFIG_DISABLE_MPTABLES__
-CONFIG_EXPER_TRADPROC:=            -D__CONFIG_EXPER_TRADPROC__
 CONFIG_MONITOR_ON_INT:=            -D__CONFIG_MONITOR_ON_INT__
 CONFIG_DISABLE_SMT:=               -D__CONFIG_DISABLE_SMT__
 CONFIG_BOXBORO:=                   -D__CONFIG_BOXBORO__
index 076b7b4..4ec5100 100644 (file)
@@ -1,6 +1,5 @@
 # General Purpose configuration parameters
 #CFLAGS += $(CONFIG_APPSERVER)
-#CFLAGS += $(CONFIG_OSDI)
 
 # Kernel configuration parameters
 #KERN_CFLAGS += $(CONFIG_KFS)
 #KERN_CFLAGS += $(CONFIG_SEQLOCK_DEBUG)
 #KERN_CFLAGS += $(CONFIG_SPINLOCK_DEBUG)
 #KERN_CFLAGS += $(CONFIG_PAGE_COLORING)
-#KERN_CFLAGS += $(CONFIG_APPSERVER)
 #KERN_CFLAGS += $(CONFIG_DEMAND_PAGING)
 #KERN_CFLAGS += $(CONFIG_NOMTRRS)
 #KERN_CFLAGS += $(CONFIG_E1000_MMIO_HACK)
 #KERN_CFLAGS += $(CONFIG_E1000_ON_BOXBORO)
 #KERN_CFLAGS += $(CONFIG_E1000_ON_S142)
 #KERN_CFLAGS += $(CONFIG_DISABLE_MPTABLES)
-#KERN_CFLAGS += $(CONFIG_EXPER_TRADPROC)
 #KERN_CFLAGS += $(CONFIG_MONITOR_ON_INT)
 #KERN_CFLAGS += $(CONFIG_DISABLE_SMT)
 #KERN_CFLAGS += $(CONFIG_BOXBORO)
 
 #KERN_CFLAGS += -DDEVELOPER_NAME=waterman
 #KERN_CFLAGS += -DDEVELOPER_NAME=brho
-# This manager runs the OSDI microbenchmarks from KFS
-#KERN_CFLAGS += -DDEVELOPER_NAME=tests
 
 # Userspace configuration parameters
 #USER_CFLAGS += $(CONFIG_SYSCALL_TRAP)
index deb2b10..bf3ff96 100644 (file)
@@ -655,20 +655,16 @@ void e1000_handle_rx_packet() {
                status =  rx_des_kva[rx_des_loop_cur].status;
 
                if (status == 0x0) {
-#ifndef __CONFIG_OSDI__
-                       panic("ERROR: E1000: Packet owned by hardware has 0 status value\n");
-#else /* OSDI */
                        warn("ERROR: E1000: Packet owned by hardware has 0 status value\n");
                        /* It's possible we are processing a packet that is a fragment
                         * before the entire packet arrives.  The code currently assumes
                         * that all of the packets fragments are there, so it assumes the
                         * next one is ready.  We'll spin until it shows up...  This could
                         * deadlock, and sucks in general, but will help us diagnose the
-                        * driver's issues.  */
+                        * driver's issues.  TODO: determine root cause and fix this shit.*/
                        while(rx_des_kva[rx_des_loop_cur].status == 0x0)
                                cpu_relax();
                        status = rx_des_kva[rx_des_loop_cur].status;
-#endif /* __CONFIG_OSDI__ */
                }
        
                fragment_size = rx_des_kva[rx_des_loop_cur].length;
@@ -710,36 +706,6 @@ void e1000_handle_rx_packet() {
 
        } while ((status & E1000_RXD_STAT_EOP) == 0);
 
-#ifdef __CONFIG_OSDI__
-       struct packetizer_packet *p = (struct packetizer_packet*)rx_buffer;
-       if(ntohs(p->ethertype) == PACKETIZER_ETH_TYPE) {
-               assert(fillmeup_data.proc != NULL);
-               assert(fillmeup_data.bufs != NULL);
-               struct proc *proc = fillmeup_data.proc;
-
-               int32_t lw;
-               uint32_t backupcr3;
-               memcpy_from_user(proc, &lw, fillmeup_data.last_written, sizeof(lw));
-               lw = (lw + 1) % (fillmeup_data.num_bufs);
-               memcpy_to_user(proc, &fillmeup_data.bufs[PACKETIZER_MAX_PAYLOAD * lw], 
-                              p->payload, ntohl(p->payload_size));
-                       
-               // memcpy_to_user(proc, fillmeup_data.last_written, &lw, sizeof(lw));
-               backupcr3 = rcr3();
-               lcr3(proc->env_cr3);
-               *(fillmeup_data.last_written) = lw;
-               lcr3(backupcr3);
-               //print_packetizer_packet(p);
-               proc_notify(fillmeup_data.proc, NE_ETC_ETC_ETC, 0);
-
-               // Advance the tail pointer                             
-               e1000_rx_index = rx_des_loop_cur;
-               e1000_wr32(E1000_RDT, e1000_rx_index);
-               kfree(rx_buffer);
-               return;
-       }
-#endif
-
 #ifdef __CONFIG_APPSERVER__
        // Treat as a syscall frontend response packet if eth_type says so
        // Will eventually go away, so not too worried about elegance here...
index f59e836..5576550 100644 (file)
@@ -391,32 +391,6 @@ void ne2k_handle_rx_packet() {
                return;
        }
 
-#ifdef __CONFIG_OSDI__
-       struct packetizer_packet *p = (struct packetizer_packet*)rx_buffer;
-       if(ntohs(p->ethertype) == PACKETIZER_ETH_TYPE) {
-               assert(fillmeup_data.proc != NULL);
-               assert(fillmeup_data.bufs != NULL);
-               struct proc *proc = fillmeup_data.proc;
-
-               int32_t lw;
-               uint32_t backupcr3;
-               memcpy_from_user(proc, &lw, fillmeup_data.last_written, sizeof(lw));
-               lw = (lw + 1) % (fillmeup_data.num_bufs);
-               memcpy_to_user(proc, &fillmeup_data.bufs[PACKETIZER_MAX_PAYLOAD * lw], 
-                              p->payload, ntohl(p->payload_size));
-               // memcpy_to_user(proc, fillmeup_data.last_written, &lw, sizeof(lw));
-               backupcr3 = rcr3();
-               lcr3(proc->env_cr3);
-               *(fillmeup_data.last_written) = lw;
-               lcr3(backupcr3);
-               //print_packetizer_packet(p);
-               proc_notify(fillmeup_data.proc, NE_ETC_ETC_ETC, 0);
-
-               kfree(rx_buffer);
-               return;
-       }
-#endif
-
 #ifdef __CONFIG_APPSERVER__
        // Treat as a syscall frontend response packet if eth_type says so
        // Will eventually go away, so not too worried about elegance here...
index 9c130f2..7156e62 100644 (file)
 #include <kmalloc.h>
 #include <stdio.h>
 
-#ifdef __CONFIG_OSDI__
-struct fillmeup fillmeup_data;
-#endif
-
 // Global send_frame function pointer
 // Means we can only have one network card per system right now...
 int (*send_frame)(const char *data, size_t len);
index d96c0fa..b7bfd7c 100644 (file)
@@ -20,42 +20,6 @@ static inline uint32_t htonl(uint32_t x)
 #define ntohs htons
 #define ntohl htonl
 
-#ifdef __CONFIG_OSDI__
-#define PACKETIZER_ETH_TYPE 0xabcd
-#define PACKETIZER_MAX_PAYLOAD 1024
-struct packetizer_packet
-{
-    uint8_t dst_mac[6];
-    uint8_t src_mac[6];
-    uint16_t ethertype;
-    uint16_t seqno;
-    uint32_t payload_size;
-    char payload[PACKETIZER_MAX_PAYLOAD];
-};
-
-static void print_packetizer_packet(struct packetizer_packet *p)
-{
-       printk("packetizer_packet:\n");
-       printk("  dst_mac: %02x:%02x:%02x:%02x:%02x:%02x\n", 
-               p->dst_mac[0],p->dst_mac[1],p->dst_mac[2],
-               p->dst_mac[3],p->dst_mac[4],p->dst_mac[5]);
-       printk("  src_mac: %02x:%02x:%02x:%02x:%02x:%02x\n", 
-               p->src_mac[0],p->src_mac[1],p->src_mac[2],
-               p->src_mac[3],p->src_mac[4],p->src_mac[5]);
-       printk("  ethertype: 0x%02x\n", ntohs(p->ethertype));
-       printk("  seqno: %u\n", ntohs(p->seqno));
-       printk("  payload_size: %u\n", ntohl(p->payload_size));
-}
-
-struct fillmeup {
-       struct proc *proc;
-       uint8_t *bufs;
-       uint16_t num_bufs;
-       int32_t *last_written;
-};
-extern struct fillmeup fillmeup_data;
-#endif
-
 // Packet sizes
 #define MTU              1500
 #define MAX_FRAME_SIZE   (MTU + 14)
index ad5ad8b..4fe6cf8 100644 (file)
@@ -8,12 +8,12 @@ static void setup_counter(int index, uint8_t mask, uint8_t event) {
 }
 
 void perfmon_init() {
-#ifdef __CONFIG_OSDI__
-       //setting up to collect cache miss behavior specifically for OSDI
+#if 0
+       // Examples of how to set up for cache misses:
        setup_counter(0, LLCACHE_REF_MASK, LLCACHE_EVENT);
        setup_counter(1, LLCACHE_MISS_MASK, LLCACHE_EVENT);
-
-  //enable user level access to the performance counters
-  lcr4(rcr4() | CR4_PCE);
 #endif
+
+       /* Enable user level access to the performance counters */
+       lcr4(rcr4() | CR4_PCE);
 }
index ecafb14..b2f1910 100644 (file)
@@ -512,34 +512,6 @@ void rl8168_handle_rx_packet() {
                
        } while (!(current_command & DES_LS_MASK));
 
-#ifdef __CONFIG_OSDI__
-       struct packetizer_packet *p = (struct packetizer_packet*)rx_buffer;
-       if(ntohs(p->ethertype) == PACKETIZER_ETH_TYPE) {
-               assert(fillmeup_data.proc != NULL);
-               assert(fillmeup_data.bufs != NULL);
-               struct proc *proc = fillmeup_data.proc;
-
-               int32_t lw;
-               uint32_t backupcr3;
-               memcpy_from_user(proc, &lw, fillmeup_data.last_written, sizeof(lw));
-               lw = (lw + 1) % (fillmeup_data.num_bufs);
-               memcpy_to_user(proc, &fillmeup_data.bufs[PACKETIZER_MAX_PAYLOAD * lw], 
-                              p->payload, ntohl(p->payload_size));
-                       
-               // memcpy_to_user(proc, fillmeup_data.last_written, &lw, sizeof(lw));
-               backupcr3 = rcr3();
-               lcr3(proc->env_cr3);
-               *(fillmeup_data.last_written) = lw;
-               lcr3(backupcr3);
-               //print_packetizer_packet(p);
-               proc_notify(fillmeup_data.proc, NE_ETC_ETC_ETC, 0);
-
-               rx_des_cur = rx_des_loop_cur;
-               kfree(rx_buffer);
-               return;
-       }
-#endif
-
 #ifdef __CONFIG_APPSERVER__
        // Treat as a syscall frontend response packet if eth_type says so
        // Will eventually go away, so not too worried about elegance here...
index d17c9c8..78ad1ed 100644 (file)
@@ -327,15 +327,4 @@ void smp_percpu_init(void)
        
        /* need to init perfctr before potentiall using it in timer handler */
        perfmon_init();
-
-#ifdef __CONFIG_EXPER_TRADPROC__
-       per_cpu_info[coreid].ticks = 0;
-       spinlock_init(&per_cpu_info[coreid].runqueue_lock);
-       TAILQ_INIT(&per_cpu_info[coreid].runqueue);
-       /* set a per-core timer interrupt to go off and call local_schedule every
-        * TIMER_uSEC microseconds.  The handler is registered independently of
-        * EXPER_TRADPROC, in line with what sparc does. */
-       set_core_timer(TIMER_uSEC);
-#endif
-
 }
index c9acbf6..21c33e8 100644 (file)
@@ -157,10 +157,4 @@ void smp_percpu_init(void)
        STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs);
        spinlock_init(&per_cpu_info[coreid].routine_amsg_lock);
        STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
-#ifdef __CONFIG_EXPER_TRADPROC__
-       per_cpu_info[coreid].ticks = 0;
-       spinlock_init(&per_cpu_info[coreid].runqueue_lock);
-       TAILQ_INIT(&per_cpu_info[coreid].runqueue);
-       set_core_timer(TIMER_uSEC);
-#endif
 }
index 82723c7..fb401ba 100644 (file)
@@ -63,13 +63,6 @@ struct Env {
        // The front ring pointers for pushing asynchronous system events out to the user
        // Note this is the actual frontring, not a pointer to it somewhere else
        sysevent_front_ring_t syseventfrontring;
-
-#ifdef __CONFIG_EXPER_TRADPROC__
-       /* need to call them Envs due to circular crap */
-       struct Env *true_proc;                                  /* original "parent" proc */
-       uint32_t vcoreid;                                               /* vcore this proc represents */
-       struct Env *vcore_procs[MAX_NUM_CPUS];  /* only used by the parent */
-#endif /* __CONFIG_EXPER_TRADPROC__ */
 };
 
 /* Process Flags */
index 389153d..99ea1bf 100644 (file)
@@ -176,9 +176,4 @@ void print_idlecoremap(void);
 void print_allpids(void);
 void print_proc_info(pid_t pid);
 
-#ifdef __CONFIG_EXPER_TRADPROC__
-bool is_real_proc(struct proc *p);
-int fake_proc_alloc(struct proc **pp, struct proc *parent, uint32_t vcoreid);
-#endif
-
 #endif // !ROS_KERN_PROCESS_H
index 5729b26..29f571b 100644 (file)
@@ -65,7 +65,6 @@
 
 /* Syscalls we plan to remove someday */
 #define SYS_cache_buster        200 
-#define SYS_fillmeup            201
 
 /* For Buster Measurement Flags */
 #define BUSTER_SHARED                  0x0001
index a543a29..7c1a52c 100644 (file)
@@ -39,11 +39,6 @@ struct per_cpu_info {
        struct kernel_msg_list NTPTV(a0t) NTPTV(a1t) NTPTV(a2t) immed_amsgs;
        spinlock_t routine_amsg_lock;
        struct kernel_msg_list NTPTV(a0t) NTPTV(a1t) NTPTV(a2t) routine_amsgs;
-#ifdef __CONFIG_EXPER_TRADPROC__
-       unsigned int ticks; /* how many times the tick went off.  can roll over */
-       spinlock_t runqueue_lock;
-       struct proc_list runqueue;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
 }__attribute__((aligned(HW_CACHE_ALIGN)));
 
 typedef struct per_cpu_info NTPTV(t) NTPTV(a0t) NTPTV(a1t) NTPTV(a2t) per_cpu_info_t;
@@ -65,14 +60,4 @@ int smp_call_function_single(uint32_t dest, poly_isr_t handler, TV(t) data,
                              handler_wrapper_t** wait_wrapper);
 int smp_call_wait(handler_wrapper_t*SAFE wrapper);
 
-#ifdef __CONFIG_EXPER_TRADPROC__
-
-#define TIMER_uSEC 10000
-
-void local_schedule(void);
-void local_schedule_proc(uint32_t core, struct proc *p);
-void load_balance(void);
-
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
 #endif /* !ROS_INC_SMP_H */
index d129ee4..1fd9f47 100644 (file)
@@ -102,8 +102,8 @@ void manager_brho(void)
                case 0:
                        /* 124 is half of the available boxboro colors (with the kernel
                         * getting 8) */
-                       quick_proc_color_run("msr_dumb_while", p, 124);
-                       //quick_proc_run("msr_dumb_while", p);
+                       //quick_proc_color_run("msr_dumb_while", p, 124);
+                       quick_proc_run("mhello", p);
                        #if 0
                        // this is how you can transition to a parallel process manually
                        // make sure you don't proc run first
@@ -241,383 +241,6 @@ void manager_pearce()
 
 }
 
-#ifdef __CONFIG_OSDI__
-/* Manager for Micro benchmarks, OSDI, etc */
-struct proc *mgr_p1 = 0;
-struct proc *mgr_p2 = 0;
-static void exper_1_part2(struct proc **pp);
-static void exper_2_part2(struct proc **pp);
-static void exper_3_part2(struct proc **pp);
-static void exper_4_part2(struct proc **pp);
-static void exper_5_part2(struct proc **pp);
-static void exper_6_part2(struct proc **pp);
-static void exper_7_part2(struct proc **pp);
-static void exper_8_part2(struct proc **pp);
-static void exper_9_part2(struct proc **pp);
-
-void manager_tests(void)
-{
-       static uint8_t RACY progress = 0;
-
-       printk("Test Progress: %d\n", progress);
-       /* 10 runs of every experiment.  Finishing/Part2 is harmless on a null
-        * pointer.  We need to clean up/ finish/ part2 after each quick_proc_run,
-        * since we leave the monitor and only enter on another run (with
-        * progress++).  That's why we run a part2 in the first case: of the next
-        * experiment. */
-       switch (progress++) {
-               /* Experiment 1: get max vcores */
-               case 0:
-                       printk("************* Starting experiment 1 ************** \n");
-               case 1:
-               case 2:
-               case 3:
-               case 4:
-               case 5:
-               case 6:
-               case 7:
-               case 8:
-               case 9:
-                       exper_1_part2(&mgr_p1);
-                       quick_proc_run("msr_get_cores", mgr_p1);
-                       break;
-               /* Experiment 2: get a single vcore */
-               case 10:
-                       exper_1_part2(&mgr_p1);
-                       printk("************* Starting experiment 2 ************** \n");
-               case 11:
-               case 12:
-               case 13:
-               case 14:
-               case 15:
-               case 16:
-               case 17:
-               case 18:
-               case 19:
-                       exper_2_part2(&mgr_p1);
-                       quick_proc_run("msr_get_singlecore", mgr_p1);
-                       break;
-               /* Experiment 3: kill a _M */
-               case 20: /* leftover from exp 2 */
-                       exper_2_part2(&mgr_p1);
-                       printk("************* Starting experiment 3 ************** \n");
-               case 21:
-               case 22:
-               case 23:
-               case 24:
-               case 25:
-               case 26:
-               case 27:
-               case 28:
-               case 29:
-                       exper_3_part2(&mgr_p1);
-                       quick_proc_run("msr_dumb_while", mgr_p1);
-                       break;
-               /* Experiment 4: _S create and death*/
-               case 30: /* leftover from exp 3 */
-                       exper_3_part2(&mgr_p1);
-                       printk("************* Starting experiment 4 ************** \n");
-               case 31:
-               case 32:
-               case 33:
-               case 34:
-               case 35:
-               case 36:
-               case 37:
-               case 38:
-               case 39:
-                       exper_4_part2(&mgr_p1);
-                       printk("[T]:004:S:%llu\n", read_tsc());
-                       quick_proc_run("tsc_spitter", mgr_p1);
-                       break;
-               /* Experiment 5: raw preempt, entire process*/
-               case 40:
-                       exper_4_part2(&mgr_p1);
-                       printk("************* Starting experiment 5 ************** \n");
-               case 41:
-               case 42:
-               case 43:
-               case 44:
-               case 45:
-               case 46:
-               case 47:
-               case 48:
-               case 49:
-                       exper_5_part2(&mgr_p1);
-                       quick_proc_run("msr_nice_while", mgr_p1);
-                       break;
-               /* Experiment 6: preempt-warn, entire process */
-               case 50:
-                       exper_5_part2(&mgr_p1);
-                       printk("************* Starting experiment 6 ************** \n");
-               case 51:
-               case 52:
-               case 53:
-               case 54:
-               case 55:
-               case 56:
-               case 57:
-               case 58:
-               case 59:
-                       exper_6_part2(&mgr_p1);
-                       quick_proc_run("msr_nice_while", mgr_p1);
-                       break;
-               /* Experiment 7: preempt-raw, single core */
-               case 60:
-                       exper_6_part2(&mgr_p1);
-                       printk("************* Starting experiment 7 ************** \n");
-               case 61:
-               case 62:
-               case 63:
-               case 64:
-               case 65:
-               case 66:
-               case 67:
-               case 68:
-               case 69:
-                       exper_7_part2(&mgr_p1);
-                       quick_proc_run("msr_nice_while", mgr_p1);
-                       break;
-               /* Experiment 8: preempt-warn, single core */
-               case 70:
-                       exper_7_part2(&mgr_p1);
-                       printk("************* Starting experiment 8 ************** \n");
-               case 71:
-               case 72:
-               case 73:
-               case 74:
-               case 75:
-               case 76:
-               case 77:
-               case 78:
-               case 79:
-                       exper_8_part2(&mgr_p1);
-                       quick_proc_run("msr_nice_while", mgr_p1);
-                       break;
-               /* Experiment 9: single notification time */
-               case 80:
-                       exper_8_part2(&mgr_p1);
-                       printk("************* Starting experiment 9 ************** \n");
-               case 81:
-               case 82:
-               case 83:
-               case 84:
-               case 85:
-               case 86:
-               case 87:
-               case 88:
-               case 89:
-                       exper_9_part2(&mgr_p1);
-                       quick_proc_run("msr_dumb_while", mgr_p1);
-                       break;
-               /* Experiment 10: cycling vcore */
-               case 90:
-                       exper_9_part2(&mgr_p1);
-                       printk("************* Starting experiment 10 ************* \n");
-                       quick_proc_run("msr_dumb_while", mgr_p1);
-                       break;
-               case 91:
-                       quick_proc_run("msr_cycling_vcores", mgr_p2);
-                       break;
-               case 92:
-                       printk("Will go on forever.  Udelaying for two minutes.\n");
-                       udelay(120000000);
-                       proc_incref(mgr_p1, 1);
-                       proc_destroy(mgr_p1);
-                       proc_decref(mgr_p1, 1);
-                       proc_incref(mgr_p2, 1);
-                       proc_destroy(mgr_p2);
-                       proc_decref(mgr_p2, 1);
-                       printk("Done with the tests!");
-                       monitor(0);
-                       break;
-               default:
-                       printd("Manager Progress: %d\n", progress);
-                       schedule();
-       }
-       monitor(0);
-       return;
-}
-
-/* OSDI experiment "bottom halves" */
-/* Experiment 1: get max vcores */
-static void exper_1_part2(struct proc **pp)
-{
-       while (*pp) /* make sure the previous run is over */
-               cpu_relax();
-}
-
-/* Experiment 2: get a single vcore */
-static void exper_2_part2(struct proc **pp)
-{
-       while (*pp) /* make sure the previous run is over */
-               cpu_relax();
-}
-
-/* Experiment 3: kill a _M */
-static void exper_3_part2(struct proc **pp)
-{
-       uint64_t begin = 0, diff = 0;
-
-       if (*pp) { /* need to kill, etc */
-               proc_incref(*pp, 1);
-               begin = start_timing(); 
-               proc_destroy(*pp);
-               proc_decref(*pp, 1);
-               wmb();
-               while (*pp) /* toggled in proc_free */
-                       cpu_relax();
-               diff = stop_timing(begin);      
-               printk("Took %llu usec (%llu nsec) to kill.\n",
-                      diff * 1000000 / system_timing.tsc_freq,
-                      diff * 1000000000 / system_timing.tsc_freq);
-               printk("[T]:003:%llu:%llu\n",
-                      diff * 1000000 / system_timing.tsc_freq,
-                      diff * 1000000000 / system_timing.tsc_freq);
-       }
-}
-
-/* Experiment 4: _S create and death*/
-static void exper_4_part2(struct proc **pp)
-{
-       while (*pp) /* make sure the previous run is over */
-               cpu_relax();
-}
-
-/* Experiment 5: raw preempt, entire process*/
-static void exper_5_part2(struct proc **pp)
-{
-       uint64_t begin = 0, diff = 0;
-       uint32_t end_refcnt = 0;
-       bool self_ipi_pending = FALSE;
-
-       if (*pp) {
-               proc_incref(*pp, 1);
-               spin_lock(&(*pp)->proc_lock);
-               end_refcnt = (*pp)->env_refcnt - (*pp)->procinfo->num_vcores;
-               begin = start_timing();
-               self_ipi_pending = __proc_preempt_all(*pp);
-               spin_unlock(&(*pp)->proc_lock);
-               __proc_kmsg_pending(*pp, self_ipi_pending);
-               spin_on((*pp)->env_refcnt != end_refcnt);
-               diff = stop_timing(begin);
-               printk("Took %llu usec (%llu nsec) to raw preempt all.\n",
-                      diff * 1000000 / system_timing.tsc_freq,
-                      diff * 1000000000 / system_timing.tsc_freq);
-               printk("[T]:005:%llu:%llu\n",
-                      diff * 1000000 / system_timing.tsc_freq,
-                      diff * 1000000000 / system_timing.tsc_freq);
-               proc_destroy(*pp);
-               proc_decref(*pp, 1);
-               while (*pp) /* toggled in proc_free */
-                       cpu_relax();
-       }
-}
-
-/* Experiment 6: preempt-warn, entire process */
-static void exper_6_part2(struct proc **pp)
-{
-       uint64_t begin = 0, diff = 0;
-
-       if (*pp) {
-               proc_incref(*pp, 1);
-               spin_lock(&(*pp)->proc_lock);
-               begin = start_timing();
-               __proc_preempt_warnall(*pp, 1000000);
-               spin_unlock(&(*pp)->proc_lock);
-               spin_on((*pp)->procinfo->num_vcores > 1);
-               diff = stop_timing(begin);
-               printk("Took %llu usec (%llu nsec) to warn preempt all.\n",
-                      diff * 1000000 / system_timing.tsc_freq,
-                      diff * 1000000000 / system_timing.tsc_freq);
-               printk("[T]:006:%llu:%llu\n",
-                      diff * 1000000 / system_timing.tsc_freq,
-                      diff * 1000000000 / system_timing.tsc_freq);
-               proc_destroy(*pp);
-               proc_decref(*pp, 1);
-               while (*pp) /* toggled in proc_free */
-                       cpu_relax();
-       }
-}
-
-/* Experiment 7: preempt-raw, single core */
-static void exper_7_part2(struct proc **pp)
-{
-       uint64_t begin = 0, diff = 0;
-       bool self_ipi_pending = FALSE;
-       uint32_t vcoreid, pcoreid = 7; // some core available on all systems
-
-       if (*pp) {
-               proc_incref(*pp, 1);
-               spin_lock(&(*pp)->proc_lock);
-               assert((*pp)->procinfo->pcoremap[pcoreid].valid);
-               begin = start_timing();
-               self_ipi_pending = __proc_preempt_core(*pp, pcoreid);
-               spin_unlock(&(*pp)->proc_lock);
-               __proc_kmsg_pending(*pp, self_ipi_pending);
-               spin_on((*pp)->procinfo->pcoremap[pcoreid].valid);
-               diff = stop_timing(begin);
-               printk("Took %llu usec (%llu nsec) to raw-preempt one core.\n",
-                      diff * 1000000 / system_timing.tsc_freq,
-                      diff * 1000000000 / system_timing.tsc_freq);
-               printk("[T]:007:%llu:%llu\n",
-                      diff * 1000000 / system_timing.tsc_freq,
-                      diff * 1000000000 / system_timing.tsc_freq);
-               proc_destroy(*pp);
-               proc_decref(*pp, 1);
-               while (*pp) /* toggled in proc_free */
-                       cpu_relax();
-       }
-}
-
-/* Experiment 8: preempt-warn, single core */
-static void exper_8_part2(struct proc **pp)
-{
-       uint64_t begin = 0, diff = 0;
-       uint32_t vcoreid, pcoreid = 7; // some core available on all systems
-
-       if (*pp) {
-               proc_incref(*pp, 1);
-               spin_lock(&(*pp)->proc_lock);
-               vcoreid = (*pp)->procinfo->pcoremap[pcoreid].vcoreid;
-               assert((*pp)->procinfo->pcoremap[pcoreid].valid);
-               begin = start_timing();
-               __proc_preempt_warn(*pp, vcoreid, 1000000); // 1 sec
-               spin_unlock(&(*pp)->proc_lock);
-               spin_on((*pp)->procinfo->pcoremap[pcoreid].valid);
-               diff = stop_timing(begin);
-               printk("Took %llu usec (%llu nsec) to warn-preempt one core.\n",
-                      diff * 1000000 / system_timing.tsc_freq,
-                      diff * 1000000000 / system_timing.tsc_freq);
-               printk("[T]:008:%llu:%llu\n",
-                      diff * 1000000 / system_timing.tsc_freq,
-                      diff * 1000000000 / system_timing.tsc_freq);
-               proc_destroy(*pp);
-               proc_decref(*pp, 1);
-               while (*pp) /* toggled in proc_free */
-                       cpu_relax();
-       }
-}
-
-/* Experiment 9: single notification time */
-static void exper_9_part2(struct proc **pp)
-{
-       struct notif_event ne = {0};
-
-       if (*pp) {
-               ne.ne_type = NE_ALARM;
-               proc_incref(*pp, 1);
-               printk("[T]:009:B:%llu\n", read_tsc());
-               proc_notify(*pp, NE_ALARM, &ne); 
-               proc_destroy(*pp);
-               proc_decref(*pp, 1);
-               while (*pp) /* toggled in proc_free */
-                       cpu_relax();
-       }
-}
-
-#endif /* __CONFIG_OSDI__ */
-
 #ifdef __sparc_v8__
 
 static char*
index 032178a..988f417 100644 (file)
 void *mmap(struct proc *p, uintptr_t addr, size_t len, int prot, int flags,
            int fd, size_t offset)
 {
-#ifdef __CONFIG_EXPER_TRADPROC__
-if (!is_real_proc(p))
-       p = p->true_proc;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        printd("mmap(addr %x, len %x, prot %x, flags %x, fd %x, off %x)\n", addr,
               len, prot, flags, fd, offset);
        if (fd >= 0 && (flags & MAP_SHARED)) {
@@ -58,11 +53,6 @@ if (!is_real_proc(p))
 void *do_mmap(struct proc *p, uintptr_t addr, size_t len, int prot, int flags,
               struct file* file, size_t offset)
 {
-#ifdef __CONFIG_EXPER_TRADPROC__
-if (!is_real_proc(p))
-       p = p->true_proc;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        // TODO: grab the appropriate mm_lock
        spin_lock(&p->proc_lock);
        void* ret = __do_mmap(p,addr,len,prot,flags,file,offset);
@@ -73,11 +63,6 @@ if (!is_real_proc(p))
 void *__do_mmap(struct proc *p, uintptr_t addr, size_t len, int prot, int flags,
                 struct file* file, size_t offset)
 {
-#ifdef __CONFIG_EXPER_TRADPROC__
-if (!is_real_proc(p))
-       p = p->true_proc;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        int num_pages = ROUNDUP(len, PGSIZE) / PGSIZE;
 
 #ifndef __CONFIG_DEMAND_PAGING__
@@ -168,11 +153,6 @@ if (!is_real_proc(p))
 
 int mprotect(struct proc* p, void* addr, size_t len, int prot)
 {
-#ifdef __CONFIG_EXPER_TRADPROC__
-if (!is_real_proc(p))
-       p = p->true_proc;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        printd("mprotect(addr %x, len %x, prot %x)\n",addr,len,prot);
        if((uintptr_t)addr % PGSIZE || (len == 0 && (prot & PROT_UNMAP)))
        {
@@ -197,11 +177,6 @@ if (!is_real_proc(p))
 
 int __mprotect(struct proc* p, void* addr, size_t len, int prot)
 {
-#ifdef __CONFIG_EXPER_TRADPROC__
-if (!is_real_proc(p))
-       p = p->true_proc;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        int newperm = (prot & PROT_WRITE) ? PTE_USER_RW :
                      (prot & (PROT_READ|PROT_EXEC)) ? PTE_USER_RO : 0;
 
@@ -253,31 +228,16 @@ if (!is_real_proc(p))
 
 int munmap(struct proc* p, void* addr, size_t len)
 {
-#ifdef __CONFIG_EXPER_TRADPROC__
-if (!is_real_proc(p))
-       p = p->true_proc;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        return mprotect(p, addr, len, PROT_UNMAP);
 }
 
 int __munmap(struct proc* p, void* addr, size_t len)
 {
-#ifdef __CONFIG_EXPER_TRADPROC__
-if (!is_real_proc(p))
-       p = p->true_proc;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        return __mprotect(p, addr, len, PROT_UNMAP);
 }
 
 int handle_page_fault(struct proc* p, uintptr_t va, int prot)
 {
-#ifdef __CONFIG_EXPER_TRADPROC__
-if (!is_real_proc(p))
-       p = p->true_proc;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        va = ROUNDDOWN(va,PGSIZE);
 
        if(prot != PROT_READ && prot != PROT_WRITE && prot != PROT_EXEC)
@@ -291,11 +251,6 @@ if (!is_real_proc(p))
        
 int __handle_page_fault(struct proc* p, uintptr_t va, int prot)
 {
-#ifdef __CONFIG_EXPER_TRADPROC__
-if (!is_real_proc(p))
-       p = p->true_proc;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        int ret = -1;
        // find offending PTE
        pte_t* ppte = pgdir_walk(p->env_pgdir,(void*)va,0);
index 8c8921e..4868963 100644 (file)
@@ -46,11 +46,6 @@ uint32_t num_mgmtcores = 1;
 void put_idle_core(uint32_t coreid)
 {
        spin_lock(&idle_lock);
-#ifdef __CONFIG_EXPER_TRADPROC__ /* often a good check, but hurts performance */
-       for (int i = 0; i < num_idlecores; i++)
-               if (idlecoremap[i] == coreid)
-                       warn("Core %d added to the freelist twice!", coreid);
-#endif /* __CONFIG_EXPER_TRADPROC__ */
        idlecoremap[num_idlecores++] = coreid;
        spin_unlock(&idle_lock);
 }
@@ -241,67 +236,6 @@ proc_init_procinfo(struct proc* p)
 #endif /* __CONFIG_DISABLE_SMT__ */
 }
 
-#ifdef __CONFIG_EXPER_TRADPROC__
-bool is_real_proc(struct proc *p)
-{
-       // the real proc has no true proc pointer
-       return !p->true_proc;
-}
-
-/* Make a _S process to represent a vcore in a traditional threading/scheduling
- * model.  Should be able to proc_run this once it's done.  Hold the parent's
- * lock when you call this. */
-int fake_proc_alloc(struct proc **pp, struct proc *parent, uint32_t vcoreid)
-{
-       error_t r;
-       struct proc *p;
-
-       if (!(p = kmem_cache_alloc(proc_cache, 0)))
-               return -ENOMEM;
-
-       spinlock_init(&p->proc_lock);
-       p->pid = parent->pid;
-       p->ppid = parent->ppid;
-       p->exitcode = 0;
-       p->state = PROC_RUNNING_M;
-       p->env_refcnt = 2;
-       p->env_entry = parent->env_entry;
-       p->cache_colors_map = parent->cache_colors_map;
-       p->next_cache_color = parent->next_cache_color;
-       p->heap_top = (void*)0xdeadbeef; // shouldn't use this.  poisoning.
-       p->env_pgdir = parent->env_pgdir;
-       p->env_cr3 = parent->env_cr3;
-       p->procinfo = parent->procinfo;
-       p->procdata = parent->procdata;
-       /* Don't use ARSCs, they aren't turned on */
-       // p->syscallbackring = not happening
-       p->true_proc = parent;
-       p->vcoreid = vcoreid;
-       /* there is a slight race on the old vcore mapping.  for a brief period, it
-        * is unmapped, but still tracked by the parent.  it's between the unmapping
-        * and the freeing (where the vcore_procs[i] is cleared, which we need to
-        * hold on to until the fake_proc has abandoned core.  a brief spin should
-        * be okay. */
-       spin_on(parent->vcore_procs[vcoreid]);
-       assert(!parent->vcore_procs[vcoreid]);
-       /* map us to the true parent vcoremap */
-       parent->vcore_procs[vcoreid] = p;
-       parent->env_refcnt++;
-
-       memset(&p->env_ancillary_state, 0, sizeof(p->env_ancillary_state));
-       /* env_tf is 0'd in init_trapframe */
-       struct preempt_data *vcpd = &p->procdata->vcore_preempt_data[vcoreid];
-       proc_init_trapframe(&p->env_tf, vcoreid, p->env_entry,
-                           vcpd->transition_stack);
-
-       *pp = p;
-       atomic_inc(&num_envs);
-
-       printd("[%08x] fake process %08x\n", current ? current->pid : 0, p->pid);
-       return 0;
-}
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
 /* Allocates and initializes a process, with the given parent.  Currently
  * writes the *p into **pp, and returns 0 on success, < 0 for an error.
  * Errors include:
@@ -370,12 +304,6 @@ static error_t proc_alloc(struct proc *SAFE*SAFE pp, pid_t parent_id)
        *pp = p;
        atomic_inc(&num_envs);
 
-#ifdef __CONFIG_EXPER_TRADPROC__
-       p->true_proc = 0;
-       p->vcoreid = 0;
-       memset(p->vcore_procs, 0, sizeof(p->vcore_procs));
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        frontend_proc_init(p);
 
        printd("[%08x] new process %08x\n", current ? current->pid : 0, p->pid);
@@ -411,35 +339,6 @@ static void __proc_free(struct proc *p)
        // All parts of the kernel should have decref'd before __proc_free is called
        assert(p->env_refcnt == 0);
 
-#ifdef __CONFIG_EXPER_TRADPROC__
-       if (!is_real_proc(p)) {
-               printd("Fake proc on core %d unmapping from parent\n", core_id());
-               p->true_proc->vcore_procs[p->vcoreid] = 0; /* unmap self */
-               proc_decref(p->true_proc, 1); // might deadlock
-               kmem_cache_free(proc_cache, p);
-               return;
-       } else {
-               /* make sure the kids are dead before spinning */
-               if (current && !is_real_proc(current)) {
-                       __abandon_core();
-               }
-               /* spin til my peeps are dead */
-               for (int i = 0; i < MAX_NUM_CPUS; i++) {
-                       for (int j = 0; p->vcore_procs[i]; j++) {
-                               cpu_relax();
-                               if (j == 10000) {
-                                       printk("Core %d stalled while waiting on peep %d\n",
-                                              core_id(), i);
-                                       //send_kernel_message(p->procinfo->vcoremap[i].pcoreid,
-                                       //                    __death, 0, 0, 0, KMSG_ROUTINE);
-                               }
-                       }
-               }
-       }
-       assert(is_real_proc(p));
-       printd("Core %d really trying to free proc %d (%p)\n", core_id(), p->pid, p);
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        frontend_proc_free(p);
 
        // Free any colors allocated to this process
@@ -469,16 +368,6 @@ static void __proc_free(struct proc *p)
 
        /* Dealloc the struct proc */
        kmem_cache_free(proc_cache, p);
-
-#ifdef __CONFIG_OSDI__ /* for experiment coordination */
-       extern struct proc *mgr_p1, *mgr_p2;
-       /* Signal to the monitor we're done */
-       if (p == mgr_p1)
-               mgr_p1 = 0;
-       if (p == mgr_p2)
-               mgr_p2 = 0;
-       printk("[T]:004:E:%llu\n", read_tsc());
-#endif /* __CONFIG_EXPER_TRADPROC__ */
 }
 
 /* Whether or not actor can control target.  Note we currently don't need
@@ -507,40 +396,6 @@ void proc_run(struct proc *p)
        bool self_ipi_pending = FALSE;
        spin_lock(&p->proc_lock);
 
-#ifdef __CONFIG_EXPER_TRADPROC__
-       /* this filth is so the state won't affect how it's run.  whenever we call
-        * proc_run, we think we are RUNNABLE_S.  prob issues with DYING. */
-       switch (p->state) {
-               case (PROC_DYING):
-                       spin_unlock(&p->proc_lock);
-                       printk("Process %d not starting due to async death\n", p->pid);
-                       if (!management_core())
-                               smp_idle(); // this never returns
-                       return;
-               case (PROC_RUNNABLE_S):
-                       assert(current != p);
-                       __proc_set_state(p, PROC_RUNNING_S);
-                       __seq_start_write(&p->procinfo->coremap_seqctr);
-                       p->procinfo->num_vcores = 0;
-                       __map_vcore(p, p->vcoreid, core_id());
-                       __seq_end_write(&p->procinfo->coremap_seqctr);
-                       // fallthru
-               case (PROC_RUNNING_M):
-                       if (p == current)
-                               p->env_refcnt--; // TODO: (REF) use incref
-                       spin_unlock(&p->proc_lock);
-                       // TODO: HSS!!
-                       // restore fp state from the preempt slot?
-                       disable_irq();
-                       __proc_startcore(p, &p->env_tf);
-                       break;
-               default:
-                       panic("Weird state(%s) in %s()", procstate2str(p->state),
-                             __FUNCTION__);
-       }
-       return;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        switch (p->state) {
                case (PROC_DYING):
                        spin_unlock(&p->proc_lock);
@@ -702,23 +557,10 @@ void proc_destroy(struct proc *p)
 {
        bool self_ipi_pending = FALSE;
 
-#ifdef __CONFIG_EXPER_TRADPROC__
-       /* in case a fake proc tries to kill themselves directly */
-       if (!is_real_proc(p)) {
-               printd("Trying to destroy a fake proc, will kill true proc\n");
-               proc_destroy(p->true_proc);
-               return;
-       }
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        spin_lock(&p->proc_lock);
 
        /* TODO: (DEATH) look at this again when we sort the __death IPI */
-#ifdef __CONFIG_EXPER_TRADPROC__
-       if ((current == p) || (current && (current->true_proc == p)))
-#else
        if (current == p)
-#endif /* __CONFIG_EXPER_TRADPROC__ */
                self_ipi_pending = TRUE;
 
        switch (p->state) {
@@ -847,19 +689,10 @@ void proc_yield(struct proc *SAFE p, bool being_nice)
        uint32_t vcoreid = get_vcoreid(p, core_id());
        struct vcore *vc = &p->procinfo->vcoremap[vcoreid];
 
-#ifdef __CONFIG_OSDI__
-       bool new_idle_core = FALSE;
-#endif /* __CONFIG_OSDI__ */
-
        /* no reason to be nice, return */
        if (being_nice && !vc->preempt_pending)
                return;
 
-#ifdef __CONFIG_EXPER_TRADPROC__
-       if (p->state & (PROC_RUNNING_M | PROC_DYING))
-               return;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
-
        spin_lock(&p->proc_lock); /* horrible scalability.  =( */
 
        /* fate is sealed, return and take the preempt message on the way out.
@@ -884,8 +717,7 @@ void proc_yield(struct proc *SAFE p, bool being_nice)
                        printd("[K] Process %d (%p) is yielding on vcore %d\n", p->pid, p,
                               get_vcoreid(p, core_id()));
                        /* TODO: (RMS) the Scheduler cannot handle the Runnable Ms (RMS), so
-                        * don't yield the last vcore.  It's ghetto and for OSDI, but it
-                        * needs to be fixed for all builds, not just CONFIG_OSDI. */
+                        * don't yield the last vcore. */
                        if (p->procinfo->num_vcores == 1) {
                                spin_unlock(&p->proc_lock);
                                return;
@@ -899,9 +731,6 @@ void proc_yield(struct proc *SAFE p, bool being_nice)
                        __seq_end_write(&p->procinfo->coremap_seqctr);
                        // add to idle list
                        put_idle_core(core_id());
-#ifdef __CONFIG_OSDI__
-                       new_idle_core = TRUE;
-#endif /* __CONFIG_OSDI__ */
                        // last vcore?  then we really want 1, and to yield the gang
                        // TODO: (RMS) will actually do this.
                        if (p->procinfo->num_vcores == 0) {
@@ -917,21 +746,8 @@ void proc_yield(struct proc *SAFE p, bool being_nice)
        }
        spin_unlock(&p->proc_lock);
        proc_decref(p, 1); // need to eat the ref passed in.
-#ifdef __CONFIG_OSDI__
-       /* If there was a change to the idle cores, try and give our core to someone who was
-        * preempted.  core_request likely won't return.  if that happens, p's
-        * context ought to be cleaned up in the proc_startcore of the new guy. (if
-        * we actually yielded)
-        * TODO: (RMS) do this more intelligently e.g.: kick_scheduler(); */
-       extern struct proc *victim;
-       if (new_idle_core && victim) {
-               /* this ghetto victim pointer is not an edible reference, and core
-                * request will eat it when it doesn't return. */
-               proc_incref(victim, 1);
-               core_request(victim);
-               proc_decref(victim, 1);
-       }
-#endif /* __CONFIG_OSDI__ */
+       /* TODO: (RMS) If there was a change to the idle cores, try and give our
+        * core to someone who was preempted. */
        /* Clean up the core and idle.  For mgmt cores, they will ultimately call
         * manager, which will call schedule() and will repick the yielding proc. */
        abandon_core();
@@ -1200,9 +1016,6 @@ uint32_t proc_get_vcoreid(struct proc *SAFE p, uint32_t pcoreid)
  * WARNING: You must hold the proc_lock before calling this! */
 bool __proc_give_cores(struct proc *SAFE p, uint32_t *pcorelist, size_t num)
 { TRUSTEDBLOCK
-#ifdef __CONFIG_EXPER_TRADPROC__
-       assert(is_real_proc(p));
-#endif /* __CONFIG_EXPER_TRADPROC__ */
        bool self_ipi_pending = FALSE;
        uint32_t free_vcoreid = 0;
        switch (p->state) {
@@ -1225,9 +1038,6 @@ bool __proc_give_cores(struct proc *SAFE p, uint32_t *pcorelist, size_t num)
                                        assert(p->procinfo->vcoremap[i].valid);
                        }
                        // add new items to the vcoremap
-#ifdef __CONFIG_EXPER_TRADPROC__
-                       __proc_set_state(p, PROC_RUNNING_M);
-#endif /* __CONFIG_EXPER_TRADPROC__ */
                        __seq_start_write(&p->procinfo->coremap_seqctr);
                        for (int i = 0; i < num; i++) {
                                // find the next free slot, which should be the next one
@@ -1236,12 +1046,6 @@ bool __proc_give_cores(struct proc *SAFE p, uint32_t *pcorelist, size_t num)
                                       pcorelist[i]);
                                __map_vcore(p, free_vcoreid, pcorelist[i]);
                                p->procinfo->num_vcores++;
-#ifdef __CONFIG_EXPER_TRADPROC__
-                               struct proc *fake_proc;
-                               /* every vcore is a fake proc */
-                               fake_proc_alloc(&fake_proc, p, free_vcoreid);
-                               local_schedule_proc(pcorelist[i], fake_proc);
-#endif /* __CONFIG_EXPER_TRADPROC__ */
                        }
                        __seq_end_write(&p->procinfo->coremap_seqctr);
                        break;
@@ -1249,9 +1053,7 @@ bool __proc_give_cores(struct proc *SAFE p, uint32_t *pcorelist, size_t num)
                        /* Up the refcnt, since num cores are going to start using this
                         * process and have it loaded in their 'current'. */
                        // TODO: (REF) use proc_incref once we have atomics
-#ifndef __CONFIG_EXPER_TRADPROC__ // the refcnt is done in fake_proc_alloc
                        p->env_refcnt += num;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
                        __seq_start_write(&p->procinfo->coremap_seqctr);
                        for (int i = 0; i < num; i++) {
                                free_vcoreid = get_free_vcoreid(p, free_vcoreid);
@@ -1259,14 +1061,8 @@ bool __proc_give_cores(struct proc *SAFE p, uint32_t *pcorelist, size_t num)
                                       pcorelist[i]);
                                __map_vcore(p, free_vcoreid, pcorelist[i]);
                                p->procinfo->num_vcores++;
-#ifdef __CONFIG_EXPER_TRADPROC__
-                               struct proc *fake_proc;
-                               fake_proc_alloc(&fake_proc, p, free_vcoreid);
-                               local_schedule_proc(pcorelist[i], fake_proc);
-#else
                                send_kernel_message(pcorelist[i], __startcore, p, 0, 0,
                                                    KMSG_ROUTINE);
-#endif /* __CONFIG_EXPER_TRADPROC__ */
                                if (pcorelist[i] == core_id())
                                        self_ipi_pending = TRUE;
                        }
@@ -1307,10 +1103,6 @@ bool __proc_take_cores(struct proc *SAFE p, uint32_t *pcorelist,
                        size_t num, amr_t message, TV(a0t) arg0,
                        TV(a1t) arg1, TV(a2t) arg2)
 { TRUSTEDBLOCK
-#ifdef __CONFIG_EXPER_TRADPROC__
-       assert(is_real_proc(p));
-       assert(0);
-#endif /* __CONFIG_EXPER_TRADPROC__ */
        uint32_t vcoreid, pcoreid;
        bool self_ipi_pending = FALSE;
        switch (p->state) {
@@ -1362,9 +1154,6 @@ bool __proc_take_cores(struct proc *SAFE p, uint32_t *pcorelist,
 bool __proc_take_allcores(struct proc *SAFE p, amr_t message,
                           TV(a0t) arg0, TV(a1t) arg1, TV(a2t) arg2)
 {
-#ifdef __CONFIG_EXPER_TRADPROC__
-       assert(is_real_proc(p));
-#endif /* __CONFIG_EXPER_TRADPROC__ */
        uint32_t active_vcoreid = 0, pcoreid;
        bool self_ipi_pending = FALSE;
        switch (p->state) {
@@ -1382,13 +1171,6 @@ bool __proc_take_allcores(struct proc *SAFE p, amr_t message,
        assert(num_idlecores + p->procinfo->num_vcores <= num_cpus); // sanity
        spin_unlock(&idle_lock);
        __seq_start_write(&p->procinfo->coremap_seqctr);
-#ifdef __CONFIG_EXPER_TRADPROC__
-       /* Decref each child, so they will free themselves when they unmap */
-       for (int i = 0; i < MAX_NUM_CPUS; i++) {
-               if (p->vcore_procs[i])
-                       proc_decref(p->vcore_procs[i], 1);
-       }
-#endif /* __CONFIG_EXPER_TRADPROC__ */
        for (int i = 0; i < p->procinfo->num_vcores; i++) {
                // find next active vcore
                active_vcoreid = get_busy_vcoreid(p, active_vcoreid);
@@ -1726,27 +1508,6 @@ void print_proc_info(pid_t pid)
        for (int i = 0; i < MAX_NUM_RESOURCES; i++)
                printk("\tRes type: %02d, amt wanted: %08d, amt granted: %08d\n", i,
                       p->resources[i].amt_wanted, p->resources[i].amt_granted);
-#ifdef __CONFIG_EXPER_TRADPROC__
-       void print_chain(struct proc *p)
-       {
-               if (!is_real_proc(p)) {
-                       printk("P is not a true_proc, parent is %p\n", p->true_proc);
-                       print_chain(p);
-               } else {
-                       printk("P is a true_proc\n");
-                       for (int i = 0; i < p->procinfo->num_vcores; i++) {
-                               printk("%p's child %d is %p\n", p, i, p->vcore_procs[i]);
-                               if (p->vcore_procs[i])
-                                       for (int j = 0; j < MAX_NUM_CPUS; j++)
-                                               if (p->vcore_procs[i]->vcore_procs[j])
-                                                       printk("Crap, child %p has its own child %p!!\n",
-                                                              p->vcore_procs[i],
-                                                              p->vcore_procs[i]->vcore_procs[j]);
-                       }
-               }
-       }
-       print_chain(p);
-#endif /* __CONFIG_EXPER_TRADPROC__ */
        /* No one cares, and it clutters the terminal */
        //printk("Vcore 0's Last Trapframe:\n");
        //print_trapframe(&p->env_tf);
index 0cf07eb..bdd2b30 100644 (file)
 #include <schedule.h>
 #include <hashtable.h>
 
-#ifdef __CONFIG_OSDI__
-/* Whoever was preempted from.  Ghetto hacks in yield will use this to give the
- * core back.  Assumes only one proc is getting preempted, which is true for
- * OSDI. */
-struct proc *victim = NULL;
-#endif
-
 /* This deals with a request for more cores.  The request is already stored in
  * the proc's amt_wanted (it is compared to amt_granted). 
  *
@@ -101,65 +94,10 @@ ssize_t core_request(struct proc *p)
                }
                num_granted = amt_new;
        } else {
-#ifdef __CONFIG_OSDI__
-               /* take what we can from the idlecoremap, then if enough aren't
-                * available, take what we can now. */  
-               num_granted = num_idlecores;
-               for (int i = 0; i < num_granted; i++) {
-                       corelist[i] = idlecoremap[num_idlecores-1];
-                       num_idlecores--;
-               }
-#else
+               /* In this case, you might want to preempt or do other fun things... */
                num_granted = 0;
-#endif /* __CONFIG_OSDI__ */
        }
        spin_unlock(&idle_lock);
-#ifdef __CONFIG_OSDI__
-       /* Ghetto, using the SOFT flag to mean "take this from someone else" */
-       if (p->resources[RES_CORES].flags & REQ_SOFT) {
-               /* And take whatever else we can from whoever is using other cores */
-               size_t num_to_preempt = amt_new - num_granted;
-               size_t num_preempted = 0;
-               
-               printd("Attempted to preempt %d cores for proc %d (%p)\n",
-                      num_to_preempt, p->pid, p);
-               /* Find and preempt some cores.  Note the skipping of core 0.  Also note
-                * this is a horrible way to do it.  A reasonably smart scheduler can
-                * check its pcoremap. */
-               for (int i = 1; i < num_cpus; i++) {
-                       victim = per_cpu_info[i].cur_proc;
-                       /* victim is a core with a current proc that isn't us */
-                       if (victim && victim != p) {
-                               printd("Preempting pcore %d from proc %d (%p)\n", i, 
-                                      victim->pid, victim);
-                               /* preempt_core technically needs an edible reference, though
-                                * currently we always return since the victim isn't current */
-                               proc_incref(victim, 1);
-                               /* no waiting or anything, just take it.  telling them 1 sec */
-                               proc_preempt_core(victim, i, 1000000);
-                               proc_decref(victim, 1);
-                               num_preempted++;
-                       }
-                       if (num_preempted == num_to_preempt)
-                               break;
-               }
-               assert(num_preempted == num_to_preempt);
-               printd("Trying to get the idlecores recently preempted.\n");
-               /* Then take the idlecores for ourself.  Cannot handle a concurrent
-                * core_request.  If this fails, things will be fucked. */
-               spin_on(num_idlecores < num_to_preempt);
-               spin_lock(&idle_lock);
-               for (int i = num_granted; i < amt_new; i++) {
-                       // grab the last one on the list
-                       corelist[i] = idlecoremap[num_idlecores-1];
-                       num_idlecores--;
-               }
-               assert(num_idlecores >= 0);
-               spin_unlock(&idle_lock);
-               num_granted += num_preempted;
-               assert(num_granted == amt_new);
-       }
-#endif /* __CONFIG_OSDI__ */
 
        // Now, actually give them out
        if (num_granted) {
@@ -181,16 +119,6 @@ ssize_t core_request(struct proc *p)
                                /* If we remove this, vcore0 will start where the _S left off */
                                vcpd->notif_pending = TRUE;
                                assert(vcpd->notif_enabled);
-#ifdef __CONFIG_EXPER_TRADPROC__
-                               /* the proc that represents vcore0 will start at the entry
-                                * point, as if it was a notification handler, so we'll mimic
-                                * what __startcore would have done for a vcore0 restart. */
-                               vcpd->notif_tf = *current_tf;
-                               proc_init_trapframe(&p->env_tf, 0, p->env_entry,
-                                                   vcpd->transition_stack);
-                               vcpd->notif_pending = FALSE;
-                               vcpd->notif_enabled = FALSE;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
                                /* in the async case, we'll need to remotely stop and bundle
                                 * vcore0's TF.  this is already done for the sync case (local
                                 * syscall). */
@@ -249,15 +177,6 @@ error_t resource_req(struct proc *p, int type, size_t amt_wanted,
                // We have no sense of time yet, or of half-filling requests
                printk("[kernel] Async requests treated synchronously for now.\n");
 
-#ifdef __CONFIG_EXPER_TRADPROC__
-       /* this might be fucking with refcnts */
-       struct proc *tp;
-       if (!is_real_proc(p)) {
-               tp = p->true_proc;
-               assert(tp && !tp->true_proc);
-               return resource_req(tp, type, amt_wanted, amt_wanted_min, flags);
-       }
-#endif /* __CONFIG_EXPER_TRADPROC__ */
        /* set the desired resource amount in the process's resource list. */
        spin_lock(&p->proc_lock);
        size_t old_amount = p->resources[type].amt_wanted;
index 537233d..8ccb931 100644 (file)
@@ -62,59 +62,3 @@ void smp_idle(void)
        }
        assert(0);
 }
-
-#ifdef __CONFIG_EXPER_TRADPROC__
-/* For experiments with per-core schedulers (traditional).  This checks the
- * runqueue, and if there is something there, it runs in.  Note this does
- * nothing for whoever was running here.  Consider saving and restoring them,
- * resetting current, etc. */
-void local_schedule(void)
-{
-       struct per_cpu_info *my_info = &per_cpu_info[core_id()];
-       struct proc *next_to_run;
-
-       spin_lock_irqsave(&my_info->runqueue_lock);
-       next_to_run = TAILQ_FIRST(&my_info->runqueue);
-       if (next_to_run)
-               TAILQ_REMOVE(&my_info->runqueue, next_to_run, proc_link);
-       spin_unlock_irqsave(&my_info->runqueue_lock);
-       if (!next_to_run)
-               return;
-       assert(next_to_run->state == PROC_RUNNING_M); // FILTHY HACK
-       printd("Core %d trying to run proc %08p\n", core_id(), next_to_run);
-       void proc_run_hand(struct trapframe *tf, uint32_t src_id, void *p, void *a1,
-                          void *a2)
-       {
-               proc_run((struct proc*)p);
-       }
-       send_kernel_message(core_id(), proc_run_hand, (void*)next_to_run, 0, 0,
-                           KMSG_ROUTINE);
-       return;
-}
-
-void local_schedule_proc(uint32_t core, struct proc *p)
-{
-       assert(core); // for sanity don't put them on core0 or any management core
-       struct per_cpu_info *my_info = &per_cpu_info[core];
-       spin_lock_irqsave(&my_info->runqueue_lock);
-       TAILQ_INSERT_TAIL(&my_info->runqueue, p, proc_link);
-       printd("SCHED: inserting proc %p on core %d\n", p, core);
-       spin_unlock_irqsave(&my_info->runqueue_lock);
-}
-
-/* ghetto func to act like a load balancer.  for now, it just looks at the head
- * of every other cpu's queue. */
-void load_balance(void)
-{
-       struct per_cpu_info *other_info;
-       struct proc *dummy;
-
-       for (int i = 0; i < num_cpus; i++) {
-               other_info = &per_cpu_info[i];
-               spin_lock_irqsave(&other_info->runqueue_lock);
-               dummy = TAILQ_FIRST(&other_info->runqueue);
-               spin_unlock_irqsave(&other_info->runqueue_lock);
-       }
-}
-
-#endif /* __CONFIG_EXPER_TRADPROC__ */
index 90d875e..3d98291 100644 (file)
@@ -266,16 +266,12 @@ static error_t sys_proc_destroy(struct proc *p, pid_t pid, int exitcode)
                set_errno(current_tf, ESRCH);
                return -1;
        }
-#ifndef __CONFIG_EXPER_TRADPROC__
        if (!proc_controls(p, p_to_die)) {
                proc_decref(p_to_die, 1);
                set_errno(current_tf, EPERM);
                return -1;
        }
        if (p_to_die == p) {
-#else
-       if ((p_to_die == p) || (p_to_die == p->true_proc)) {
-#endif /* __CONFIG_EXPER_TRADPROC__ */
                // syscall code and pid2proc both have edible references, only need 1.
                p->exitcode = exitcode;
                proc_decref(p, 1);
@@ -511,10 +507,8 @@ static intreg_t sys_munmap(struct proc* p, void* addr, size_t len)
 static void* sys_brk(struct proc *p, void* addr) {
        ssize_t range;
 
-#ifdef __CONFIG_EXPER_TRADPROC__
-       printk("[kernel] don't use brk, unsupported.\n");
-       return (void*)-1;
-#endif /* __CONFIG_EXPER_TRADPROC__ */
+       // TODO: remove sys_brk
+       printk("[kernel] don't use brk, unsupported and will be removed soon.\n");
 
        spin_lock(&p->proc_lock);
 
@@ -1015,8 +1009,7 @@ intreg_t sys_gettimeofday(struct proc* p, int* buf)
        spin_lock(&gtod_lock);
        if(t0 == 0)
 
-#if (defined __CONFIG_APPSERVER__) && (!defined __CONFIG_OSDI__)
-       // For OSDI, do not get time from appserver because it would lead to inaccurate measurements.
+#if (defined __CONFIG_APPSERVER__)
        t0 = ufe(time,0,0,0,0);
 #else
        // Nanwan's birthday, bitches!!
@@ -1052,22 +1045,6 @@ intreg_t sys_tcsetattr(struct proc* p, int fd, int optional_actions, const void*
        return ret;
 }
 
-intreg_t sys_fillmeup(struct proc *p, uint8_t *bufs, 
-                      uint16_t num_bufs, int32_t *last_written)
-{
-#if defined(__CONFIG_OSDI__) && defined(__CONFIG_NETWORKING__)
-       extern struct fillmeup fillmeup_data;
-       fillmeup_data.proc = p;
-       fillmeup_data.bufs = bufs;
-       fillmeup_data.num_bufs = num_bufs;
-       fillmeup_data.last_written = last_written;
-       *last_written = -1;
-       return 0;
-#else
-       return -1;
-#endif
-}
-
 /************** Syscall Invokation **************/
 
 /* Executes the given syscall.
@@ -1127,9 +1104,6 @@ intreg_t syscall(struct proc *p, uintreg_t syscallno, uintreg_t a1,
                [SYS_eth_get_mac_addr] = (syscall_t)sys_eth_get_mac_addr,
                [SYS_eth_recv_check] = (syscall_t)sys_eth_recv_check,
        #endif
-       #ifdef __CONFIG_OSDI__
-               [SYS_fillmeup] = (syscall_t)sys_fillmeup,
-       #endif
                // Syscalls serviced by the appserver for now.
                [SYS_read] = (syscall_t)sys_read,
                [SYS_write] = (syscall_t)sys_write,
index 716f9a4..4109681 100644 (file)
@@ -9,12 +9,9 @@
 #include <ros/timer.h>
 #include <stdio.h>
 #include <schedule.h>
-<<<<<<< HEAD
-=======
 #include <multiboot.h>
 #include <pmap.h>
 #include <arch/perfmon.h>
->>>>>>> 3611594... adding support for perfctr in trad_proc timer handler
 
 /* timing_overhead
  * Any user space process that links to this file will get its own copy.  
@@ -88,33 +85,4 @@ void train_timing()
  * in here. */
 void timer_interrupt(struct trapframe *tf, void *data)
 {
-#ifdef __CONFIG_EXPER_TRADPROC__
-
-       #ifdef __sparc_v8__
-       # define num_misses read_perfctr(core_id(),22)
-       #else
-       # define num_misses (read_pmc(1))
-       #endif
-
-       // cause M misses and run for N usec
-       #define M 1000
-       #define N 10
-       unsigned int lfsr = 1+read_tsc()%7;
-
-       uint64_t t0 = read_tsc();
-       uint64_t misses0 = num_misses;
-       while(num_misses-misses0 < M)
-       {
-               int x;
-               x = *(volatile int*)KADDR((4*lfsr) % maxaddrpa);
-               lfsr = (lfsr >> 1) ^ (unsigned int)(0 - ((lfsr & 1u) & 0xd0000001u));
-       }
-       while((read_tsc()-t0)/(system_timing.tsc_freq/1000000) < N);
-
-       /* about every 10 ticks (100ms) run the load balancer.  Offset by coreid so
-        * it's not as horrible.  */
-       if (per_cpu_info[core_id()].ticks % 10 == core_id())
-               load_balance();
-       local_schedule();
-#endif /* __CONFIG_EXPER_TRADPROC__ */
 }
index 65cf504..7a59eaa 100644 (file)
@@ -108,30 +108,20 @@ void __attribute__((noreturn)) vcore_entry()
 
        /* no one currently running, so lets get someone from the ready queue */
        struct pthread_tcb *new_thread = NULL;
-#ifdef __CONFIG_OSDI__
-       // Added so that we will return back to here if there is no new thread
-       // instead of at the top of this function.  Related to the fact that
-       // the kernel level scheduler can't yet handle scheduling manycore 
-       // processed yet when there are no more jobs left (i.e. sys_yield() will
-       // return instead of actually yielding...).
-       while(!new_thread) {
-#endif
-               mcs_lock_lock(&queue_lock);
-               new_thread = TAILQ_FIRST(&ready_queue);
-               if (new_thread) {
-                       TAILQ_REMOVE(&ready_queue, new_thread, next);
-                       TAILQ_INSERT_TAIL(&active_queue, new_thread, next);
-                       threads_active++;
-                       threads_ready--;
-               }
-               mcs_lock_unlock(&queue_lock);
-               if (!new_thread) {
-                       printd("[P] No threads, vcore %d is yielding\n", vcoreid);
-                       sys_yield(0);
-               }
-#ifdef __CONFIG_OSDI__
+       mcs_lock_lock(&queue_lock);
+       new_thread = TAILQ_FIRST(&ready_queue);
+       if (new_thread) {
+               TAILQ_REMOVE(&ready_queue, new_thread, next);
+               TAILQ_INSERT_TAIL(&active_queue, new_thread, next);
+               threads_active++;
+               threads_ready--;
+       }
+       mcs_lock_unlock(&queue_lock);
+       if (!new_thread) {
+               /* TODO: consider doing something more intelligent here */
+               printd("[P] No threads, vcore %d is yielding\n", vcoreid);
+               sys_yield(0);
        }
-#endif
        /* Save a ptr to the pthread running in the transition context's TLS */
        current_thread = new_thread;
        printd("[P] Vcore %d is starting pthread %d\n", vcoreid, new_thread->id);