Process management via active messages
[akaros.git] / kern / arch / sparc / env.c.save.1
1 /* See COPYRIGHT for copyright information. */
2 #ifdef __DEPUTY__
3 #pragma noasync
4 #endif
5
6 #include <arch/trap.h>
7 #include <env.h>
8 #include <assert.h>
9 #include <arch/arch.h>
10 #include <pmap.h>
11
12 void
13 (IN_HANDLER env_push_ancillary_state)(env_t* e)
14 {
15         static_assert(offsetof(ancillary_state_t,fpr) % 8 == 0);
16
17         #define push_two_fp_regs(pdest,n) \
18             __asm__ __volatile__ ("std  %%f" XSTR(n) ",[%0+4*" XSTR(n) "]" \
19                               : : "r"(pdest) : "memory");
20
21         // do I need to save FP regs?
22         if(e->env_tf.psr & PSR_EF)
23         {
24                 // temporarily turn on FP in the kernel
25                 write_psr(read_psr() | PSR_EF);
26
27                 e->env_ancillary_state.fsr = read_fsr();
28
29                 push_two_fp_regs(e->env_ancillary_state.fpr,0);
30                 push_two_fp_regs(e->env_ancillary_state.fpr,2);
31                 push_two_fp_regs(e->env_ancillary_state.fpr,4);
32                 push_two_fp_regs(e->env_ancillary_state.fpr,6);
33                 push_two_fp_regs(e->env_ancillary_state.fpr,8);
34                 push_two_fp_regs(e->env_ancillary_state.fpr,10);
35                 push_two_fp_regs(e->env_ancillary_state.fpr,12);
36                 push_two_fp_regs(e->env_ancillary_state.fpr,14);
37                 push_two_fp_regs(e->env_ancillary_state.fpr,16);
38                 push_two_fp_regs(e->env_ancillary_state.fpr,18);
39                 push_two_fp_regs(e->env_ancillary_state.fpr,20);
40                 push_two_fp_regs(e->env_ancillary_state.fpr,22);
41                 push_two_fp_regs(e->env_ancillary_state.fpr,24);
42                 push_two_fp_regs(e->env_ancillary_state.fpr,26);
43                 push_two_fp_regs(e->env_ancillary_state.fpr,28);
44                 push_two_fp_regs(e->env_ancillary_state.fpr,30);
45
46                 write_psr(read_psr() & ~PSR_EF);
47         }
48 }
49
50 void
51 (IN_HANDLER env_pop_ancillary_state)(env_t* e)
52
53
54         #define pop_two_fp_regs(pdest,n) \
55             __asm__ __volatile__ ("ldd  [%0+4*" XSTR(n) "], %%f" XSTR(n) \
56                               : : "r"(pdest) : "memory");
57
58         if(e->env_tf.psr & PSR_EF)
59         {
60                 write_psr(read_psr() | PSR_EF);
61
62                 pop_two_fp_regs(e->env_ancillary_state.fpr,0);
63                 pop_two_fp_regs(e->env_ancillary_state.fpr,2);
64                 pop_two_fp_regs(e->env_ancillary_state.fpr,4);
65                 pop_two_fp_regs(e->env_ancillary_state.fpr,6);
66                 pop_two_fp_regs(e->env_ancillary_state.fpr,8);
67                 pop_two_fp_regs(e->env_ancillary_state.fpr,10);
68                 pop_two_fp_regs(e->env_ancillary_state.fpr,12);
69                 pop_two_fp_regs(e->env_ancillary_state.fpr,14);
70                 pop_two_fp_regs(e->env_ancillary_state.fpr,16);
71                 pop_two_fp_regs(e->env_ancillary_state.fpr,18);
72                 pop_two_fp_regs(e->env_ancillary_state.fpr,20);
73                 pop_two_fp_regs(e->env_ancillary_state.fpr,22);
74                 pop_two_fp_regs(e->env_ancillary_state.fpr,24);
75                 pop_two_fp_regs(e->env_ancillary_state.fpr,26);
76                 pop_two_fp_regs(e->env_ancillary_state.fpr,28);
77                 pop_two_fp_regs(e->env_ancillary_state.fpr,30);
78
79                 write_fsr(e->env_ancillary_state.fsr);
80
81                 write_psr(read_psr() & ~PSR_EF);
82         }
83 }
84
85 void
86 env_set_program_counter(env_t* e, uintptr_t pc)
87 {
88         e->env_tf.pc = pc;
89         e->env_tf.npc = pc+4;
90 }
91
92 void
93 env_init_trapframe(env_t* e)
94 {
95         extern char trap_table;
96
97         e->env_tf.gpr[14] = USTACKTOP-64;
98         e->env_tf.psr = PSR_S; // but PS = 0
99         e->env_tf.wim = 0;
100         e->env_tf.tbr = (uint32_t)&trap_table;
101 }
102
103 // Flush all mapped pages in the user portion of the address space
104 // TODO: only supports L3 user pages
105 void
106 env_user_mem_free(env_t* e)
107 {
108         pte_t *l1pt = e->env_pgdir, *l2pt, *l3pt;
109         uint32_t l1x,l2x,l3x;
110         physaddr_t l2ptpa,l3ptpa,page_pa;
111         uint32_t l2_tables_per_page,l3_tables_per_page;
112
113         l2_tables_per_page = PGSIZE/(sizeof(pte_t)*NL2ENTRIES);
114         l3_tables_per_page = PGSIZE/(sizeof(pte_t)*NL3ENTRIES);
115
116         static_assert(L2X(UTOP) == 0 && L3X(UTOP) == 0);
117         for(l1x = 0; l1x < L1X(UTOP); l1x++)
118         {
119                 if(!(l1pt[l1x] & PTE_PTD))
120                         continue;
121
122                 l2ptpa = PTD_ADDR(l1pt[l1x]);
123                 l2pt = (pte_t*COUNT(NL2ENTRIES)) KADDR(l2ptpa);
124
125                 for(l2x = 0; l2x < NL2ENTRIES; l2x++)
126                 {
127                         if(!(l2pt[l2x] & PTE_PTD))
128                                 continue;
129
130                         l3ptpa = PTD_ADDR(l2pt[l2x]);
131                         l3pt = (pte_t*COUNT(NL3ENTRIES)) KADDR(l3ptpa);
132
133                         for(l3x = 0; l3x < NL3ENTRIES; l3x++)
134                         {
135                                 if(l3pt[l3x] & PTE_PTE)
136                                 {
137                                         page_pa = PTE_ADDR(l3pt[l3x]);
138                                         l3pt[l3x] = 0;
139                                         page_decref(pa2page(page_pa));
140                                 }
141                         }
142
143                         l2pt[l2x] = 0;
144
145                         // free the L3 PT itself
146                         page_decref(pa2page(l2ptpa));
147                 }
148
149                 l1pt[l1x] = 0;
150
151                 // free the L2 PT itself
152                 page_decref(pa2page(l2ptpa));
153         }
154
155         tlbflush();
156 }