Fixes to error handling and User text mappings
[tpg/acess2.git] / Kernel / arch / x86 / mm_virt.c
1 /*
2  * AcessOS Microkernel Version
3  * mm_virt.c
4  * 
5  * Memory Map
6  * 0xE0 - Kernel Base
7  * 0xF0 - Kernel Stacks
8  * 0xFD - Fractals
9  * 0xFE - Unused
10  * 0xFF - System Calls / Kernel's User Code
11  */
12 #define DEBUG   0
13 #define SANITY  1
14 #include <acess.h>
15 #include <mm_phys.h>
16 #include <proc.h>
17
18 #if USE_PAE
19 # define TAB    21
20 # define DIR    30
21 #else
22 # define TAB    22
23 #endif
24
25 #define KERNEL_STACKS           0xF0000000
26 #define KERNEL_STACK_SIZE       0x00008000
27 #define KERNEL_STACKS_END       0xFC000000
28 #define WORKER_STACKS           0x00100000      // Thread0 Only!
29 #define WORKER_STACK_SIZE       KERNEL_STACK_SIZE
30 #define WORKER_STACKS_END       0xB0000000
31 #define NUM_WORKER_STACKS       ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
32
33 #define PAE_PAGE_TABLE_ADDR     0xFC000000      // 16 MiB
34 #define PAE_PAGE_DIR_ADDR       0xFCFC0000      // 16 KiB
35 #define PAE_PAGE_PDPT_ADDR      0xFCFC3F00      // 32 bytes
36 #define PAE_TMP_PDPT_ADDR       0xFCFC3F20      // 32 bytes
37 #define PAE_TMP_DIR_ADDR        0xFCFE0000      // 16 KiB
38 #define PAE_TMP_TABLE_ADDR      0xFD000000      // 16 MiB
39
40 #define PAGE_TABLE_ADDR 0xFC000000
41 #define PAGE_DIR_ADDR   0xFC3F0000
42 #define PAGE_CR3_ADDR   0xFC3F0FC0
43 #define TMP_CR3_ADDR    0xFC3F0FC4      // Part of core instead of temp
44 #define TMP_DIR_ADDR    0xFC3F1000      // Same
45 #define TMP_TABLE_ADDR  0xFC400000
46
47 #define HW_MAP_ADDR             0xFE000000
48 #define HW_MAP_MAX              0xFFEF0000
49 #define NUM_HW_PAGES    ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
50 #define TEMP_MAP_ADDR   0xFFEF0000      // Allows 16 "temp" pages
51 #define NUM_TEMP_PAGES  16
52 #define LAST_BLOCK_ADDR 0xFFFF0000      // Free space for kernel provided user code/ *(-1) protection
53
54 #define PF_PRESENT      0x1
55 #define PF_WRITE        0x2
56 #define PF_USER         0x4
57 #define PF_COW          0x200
58 #define PF_PAGED        0x400
59
60 #define INVLPG(addr)    __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
61
62 #if USE_PAE
63 typedef Uint64  tTabEnt;
64 #else
65 typedef Uint32  tTabEnt;
66 #endif
67
68 // === IMPORTS ===
69 extern void     _UsertextEnd, _UsertextBase;
70 extern Uint32   gaInitPageDir[1024];
71 extern Uint32   gaInitPageTable[1024];
72 extern void     Threads_SegFault(tVAddr Addr);
73 extern void     Error_Backtrace(Uint eip, Uint ebp);
74
75 // === PROTOTYPES ===
76 void    MM_PreinitVirtual();
77 void    MM_InstallVirtual();
78 void    MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
79 void    MM_DumpTables(tVAddr Start, tVAddr End);
80 tPAddr  MM_DuplicatePage(tVAddr VAddr);
81
82 // === GLOBALS ===
83 #define gaPageTable     ((tTabEnt*)PAGE_TABLE_ADDR)
84 #define gaPageDir       ((tTabEnt*)PAGE_DIR_ADDR)
85 #define gaTmpTable      ((tTabEnt*)TMP_TABLE_ADDR)
86 #define gaTmpDir        ((tTabEnt*)TMP_DIR_ADDR)
87 #define gpPageCR3       ((tTabEnt*)PAGE_CR3_ADDR)
88 #define gpTmpCR3        ((tTabEnt*)TMP_CR3_ADDR)
89
90 #define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
91 #define gaPAE_PageDir   ((tTabEnt*)PAE_PAGE_DIR_ADDR)
92 #define gaPAE_MainPDPT  ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
93 #define gaPAE_TmpTable  ((tTabEnt*)PAE_TMP_DIR_ADDR)
94 #define gaPAE_TmpDir    ((tTabEnt*)PAE_TMP_DIR_ADDR)
95 #define gaPAE_TmpPDPT   ((tTabEnt*)PAE_TMP_PDPT_ADDR)
96  int    gbUsePAE = 0;
97  int    gilTempMappings = 0;
98  int    gilTempFractal = 0;
99 Uint32  gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
100  int    giLastUsedWorker = 0;
101
102 // === CODE ===
103 /**
104  * \fn void MM_PreinitVirtual()
105  * \brief Maps the fractal mappings
106  */
107 void MM_PreinitVirtual()
108 {
109         #if USE_PAE
110         gaInitPageDir[ ((PAGE_TABLE_ADDR >> TAB)-3*512+3)*2 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
111         #else
112         gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
113         #endif
114         INVLPG( PAGE_TABLE_ADDR );
115 }
116
117 /**
118  * \fn void MM_InstallVirtual()
119  * \brief Sets up the constant page mappings
120  */
121 void MM_InstallVirtual()
122 {
123          int    i;
124         
125         #if USE_PAE
126         // --- Pre-Allocate kernel tables
127         for( i = KERNEL_BASE >> TAB; i < 1024*4; i ++ )
128         {
129                 if( gaPAE_PageDir[ i ] )        continue;
130                 
131                 // Skip stack tables, they are process unique
132                 if( i > KERNEL_STACKS >> TAB && i < KERNEL_STACKS_END >> TAB) {
133                         gaPAE_PageDir[ i ] = 0;
134                         continue;
135                 }
136                 // Preallocate table
137                 gaPAE_PageDir[ i ] = MM_AllocPhys() | 3;
138                 INVLPG( &gaPAE_PageTable[i*512] );
139                 memset( &gaPAE_PageTable[i*512], 0, 0x1000 );
140         }
141         #else
142         // --- Pre-Allocate kernel tables
143         for( i = KERNEL_BASE>>22; i < 1024; i ++ )
144         {
145                 if( gaPageDir[ i ] )    continue;
146                 // Skip stack tables, they are process unique
147                 if( i > KERNEL_STACKS >> 22 && i < KERNEL_STACKS_END >> 22) {
148                         gaPageDir[ i ] = 0;
149                         continue;
150                 }
151                 // Preallocate table
152                 gaPageDir[ i ] = MM_AllocPhys() | 3;
153                 INVLPG( &gaPageTable[i*1024] );
154                 memset( &gaPageTable[i*1024], 0, 0x1000 );
155         }
156         #endif
157         
158         // Unset kernel on the User Text pages
159         for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
160                 Log("MM_SetFlags( 0x%08x, 0, MM_PFLAG_KERNEL)", (tVAddr)&_UsertextBase + i*4096);
161                 MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
162         }
163 }
164
165 /**
166  * \brief Cleans up the SMP required mappings
167  */
168 void MM_FinishVirtualInit()
169 {
170         #if USE_PAE
171         gaInitPDPT[ 0 ] = 0;
172         #else
173         gaInitPageDir[ 0 ] = 0;
174         #endif
175 }
176
177 /**
178  * \fn void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
179  * \brief Called on a page fault
180  */
181 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
182 {
183         //ENTER("xAddr bErrorCode", Addr, ErrorCode);
184         
185         // -- Check for COW --
186         if( gaPageDir  [Addr>>22] & PF_PRESENT
187          && gaPageTable[Addr>>12] & PF_PRESENT
188          && gaPageTable[Addr>>12] & PF_COW )
189         {
190                 tPAddr  paddr;
191                 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
192                 {
193                         gaPageTable[Addr>>12] &= ~PF_COW;
194                         gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
195                 }
196                 else
197                 {
198                         //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
199                         paddr = MM_DuplicatePage( Addr );
200                         MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
201                         gaPageTable[Addr>>12] &= PF_USER;
202                         gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
203                 }
204                 
205                 INVLPG( Addr & ~0xFFF );
206                 //LEAVE('-')
207                 return;
208         }
209         
210         // If it was a user, tell the thread handler
211         if(ErrorCode & 4) {
212                 Warning("%s %s %s memory%s",
213                         (ErrorCode&4?"User":"Kernel"),
214                         (ErrorCode&2?"write to":"read from"),
215                         (ErrorCode&1?"bad/locked":"non-present"),
216                         (ErrorCode&16?" (Instruction Fetch)":"")
217                         );
218                 Warning("User Pagefault: Instruction at %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
219                 __asm__ __volatile__ ("sti");   // Restart IRQs
220                 Threads_SegFault(Addr);
221                 return ;
222         }
223         
224         Debug_KernelPanic();
225         
226         // -- Check Error Code --
227         if(ErrorCode & 8)
228                 Warning("Reserved Bits Trashed!");
229         else
230         {
231                 Warning("%s %s %s memory%s",
232                         (ErrorCode&4?"User":"Kernel"),
233                         (ErrorCode&2?"write to":"read from"),
234                         (ErrorCode&1?"bad/locked":"non-present"),
235                         (ErrorCode&16?" (Instruction Fetch)":"")
236                         );
237         }
238         
239         Log("Code at %p accessed %p", Regs->eip, Addr);
240         // Print Stack Backtrace
241         Error_Backtrace(Regs->eip, Regs->ebp);
242         
243         Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
244         if( gaPageDir[Addr>>22] & PF_PRESENT )
245                 Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
246         
247         //MM_DumpTables(0, -1); 
248         
249         Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
250 }
251
252 /**
253  * \fn void MM_DumpTables(tVAddr Start, tVAddr End)
254  * \brief Dumps the layout of the page tables
255  */
256 void MM_DumpTables(tVAddr Start, tVAddr End)
257 {
258         tVAddr  rangeStart = 0;
259         tPAddr  expected = 0;
260         tVAddr  curPos;
261         Uint    page;
262         const tPAddr    MASK = ~0xF98;
263         
264         Start >>= 12;   End >>= 12;
265         
266         #if 0
267         Log("Directory Entries:");
268         for(page = Start >> 10;
269                 page < (End >> 10)+1;
270                 page ++)
271         {
272                 if(gaPageDir[page])
273                 {
274                         Log(" 0x%08x-0x%08x :: 0x%08x",
275                                 page<<22, ((page+1)<<22)-1,
276                                 gaPageDir[page]&~0xFFF
277                                 );
278                 }
279         }
280         #endif
281         
282         Log("Table Entries:");
283         for(page = Start, curPos = Start<<12;
284                 page < End;
285                 curPos += 0x1000, page++)
286         {
287                 if( !(gaPageDir[curPos>>22] & PF_PRESENT)
288                 ||  !(gaPageTable[page] & PF_PRESENT)
289                 ||  (gaPageTable[page] & MASK) != expected)
290                 {
291                         if(expected) {
292                                 Log(" 0x%08x-0x%08x => 0x%08x-0x%08x (%s%s%s%s)",
293                                         rangeStart, curPos - 1,
294                                         gaPageTable[rangeStart>>12] & ~0xFFF,
295                                         (expected & ~0xFFF) - 1,
296                                         (expected & PF_PAGED ? "p" : "-"),
297                                         (expected & PF_COW ? "C" : "-"),
298                                         (expected & PF_USER ? "U" : "-"),
299                                         (expected & PF_WRITE ? "W" : "-")
300                                         );
301                                 expected = 0;
302                         }
303                         if( !(gaPageDir[curPos>>22] & PF_PRESENT) )     continue;
304                         if( !(gaPageTable[curPos>>12] & PF_PRESENT) )   continue;
305                         
306                         expected = (gaPageTable[page] & MASK);
307                         rangeStart = curPos;
308                 }
309                 if(expected)    expected += 0x1000;
310         }
311         
312         if(expected) {
313                 Log("0x%08x-0x%08x => 0x%08x-0x%08x (%s%s%s%s)",
314                         rangeStart, curPos - 1,
315                         gaPageTable[rangeStart>>12] & ~0xFFF,
316                         (expected & ~0xFFF) - 1,
317                         (expected & PF_PAGED ? "p" : "-"),
318                         (expected & PF_COW ? "C" : "-"),
319                         (expected & PF_USER ? "U" : "-"),
320                         (expected & PF_WRITE ? "W" : "-")
321                         );
322                 expected = 0;
323         }
324 }
325
326 /**
327  * \fn tPAddr MM_Allocate(tVAddr VAddr)
328  */
329 tPAddr MM_Allocate(tVAddr VAddr)
330 {
331         tPAddr  paddr;
332         //ENTER("xVAddr", VAddr);
333         //__asm__ __volatile__ ("xchg %bx,%bx");
334         // Check if the directory is mapped
335         if( gaPageDir[ VAddr >> 22 ] == 0 )
336         {
337                 // Allocate directory
338                 paddr = MM_AllocPhys();
339                 //LOG("paddr = 0x%llx (new table)", paddr);
340                 if( paddr == 0 ) {
341                         Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
342                         //LEAVE('i',0);
343                         return 0;
344                 }
345                 // Map
346                 gaPageDir[ VAddr >> 22 ] = paddr | 3;
347                 // Mark as user
348                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
349                 
350                 INVLPG( &gaPageDir[ VAddr >> 22 ] );
351                 //LOG("Clearing new table");
352                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
353         }
354         // Check if the page is already allocated
355         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
356                 Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
357                 //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
358                 return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
359         }
360         
361         // Allocate
362         paddr = MM_AllocPhys();
363         //LOG("paddr = 0x%llx", paddr);
364         if( paddr == 0 ) {
365                 Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
366                         VAddr, __builtin_return_address(0));
367                 //LEAVE('i',0);
368                 return 0;
369         }
370         // Map
371         gaPageTable[ VAddr >> 12 ] = paddr | 3;
372         // Mark as user
373         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
374         // Invalidate Cache for address
375         INVLPG( VAddr & ~0xFFF );
376         
377         //LEAVE('X', paddr);
378         return paddr;
379 }
380
381 /**
382  * \fn void MM_Deallocate(tVAddr VAddr)
383  */
384 void MM_Deallocate(tVAddr VAddr)
385 {
386         if( gaPageDir[ VAddr >> 22 ] == 0 ) {
387                 Warning("MM_Deallocate - Directory not mapped");
388                 return;
389         }
390         
391         if(gaPageTable[ VAddr >> 12 ] == 0) {
392                 Warning("MM_Deallocate - Page is not allocated");
393                 return;
394         }
395         
396         // Dereference page
397         MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
398         // Clear page
399         gaPageTable[ VAddr >> 12 ] = 0;
400 }
401
402 /**
403  * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
404  * \brief Checks if the passed address is accesable
405  */
406 tPAddr MM_GetPhysAddr(tVAddr Addr)
407 {
408         if( !(gaPageDir[Addr >> 22] & 1) )
409                 return 0;
410         if( !(gaPageTable[Addr >> 12] & 1) )
411                 return 0;
412         return (gaPageTable[Addr >> 12] & ~0xFFF) | (Addr & 0xFFF);
413 }
414
415
416 /**
417  * \fn int MM_IsUser(tVAddr VAddr)
418  * \brief Checks if a page is user accessable
419  */
420 int MM_IsUser(tVAddr VAddr)
421 {
422         if( !(gaPageDir[VAddr >> 22] & 1) )
423                 return 0;
424         if( !(gaPageTable[VAddr >> 12] & 1) )
425                 return 0;
426         if( !(gaPageTable[VAddr >> 12] & PF_USER) )
427                 return 0;
428         return 1;
429 }
430
431 /**
432  * \fn void MM_SetCR3(tPAddr CR3)
433  * \brief Sets the current process space
434  */
435 void MM_SetCR3(tPAddr CR3)
436 {
437         __asm__ __volatile__ ("mov %0, %%cr3"::"r"(CR3));
438 }
439
440 /**
441  * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
442  * \brief Map a physical page to a virtual one
443  */
444 int MM_Map(tVAddr VAddr, tPAddr PAddr)
445 {
446         //ENTER("xVAddr xPAddr", VAddr, PAddr);
447         // Sanity check
448         if( PAddr & 0xFFF || VAddr & 0xFFF ) {
449                 Warning("MM_Map - Physical or Virtual Addresses are not aligned");
450                 //LEAVE('i', 0);
451                 return 0;
452         }
453         
454         // Align addresses
455         PAddr &= ~0xFFF;        VAddr &= ~0xFFF;
456         
457         // Check if the directory is mapped
458         if( gaPageDir[ VAddr >> 22 ] == 0 )
459         {
460                 gaPageDir[ VAddr >> 22 ] = MM_AllocPhys() | 3;
461                 
462                 // Mark as user
463                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
464                 
465                 INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
466                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
467         }
468         // Check if the page is already allocated
469         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
470                 Warning("MM_Map - Allocating to used address");
471                 //LEAVE('i', 0);
472                 return 0;
473         }
474         
475         // Map
476         gaPageTable[ VAddr >> 12 ] = PAddr | 3;
477         // Mark as user
478         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
479         
480         //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
481         //      VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
482         
483         // Reference
484         MM_RefPhys( PAddr );
485         
486         //LOG("INVLPG( 0x%x )", VAddr);
487         INVLPG( VAddr );
488         
489         //LEAVE('i', 1);
490         return 1;
491 }
492
493 /**
494  * \fn tVAddr MM_ClearUser()
495  * \brief Clear user's address space
496  */
497 tVAddr MM_ClearUser()
498 {
499         Uint    i, j;
500         
501         // Copy Directories
502         for( i = 0; i < (MM_USER_MAX>>22); i ++ )
503         {
504                 // Check if directory is not allocated
505                 if( !(gaPageDir[i] & PF_PRESENT) ) {
506                         gaPageDir[i] = 0;
507                         continue;
508                 }
509                 
510                 
511                 for( j = 0; j < 1024; j ++ )
512                 {
513                         if( gaPageTable[i*1024+j] & 1 )
514                                 MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF );
515                         gaPageTable[i*1024+j] = 0;
516                 }
517                 
518                 MM_DerefPhys( gaPageDir[i] & ~0xFFF );
519                 gaPageDir[i] = 0;
520                 INVLPG( &gaPageTable[i*1024] );
521         }
522         INVLPG( gaPageDir );
523         
524         return *gpPageCR3;
525 }
526
527 /**
528  * \fn tPAddr MM_Clone()
529  * \brief Clone the current address space
530  */
531 tPAddr MM_Clone()
532 {
533         Uint    i, j;
534         tVAddr  ret;
535         Uint    page = 0;
536         tVAddr  kStackBase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
537         void    *tmp;
538         
539         LOCK( &gilTempFractal );
540         
541         // Create Directory Table
542         *gpTmpCR3 = MM_AllocPhys() | 3;
543         INVLPG( gaTmpDir );
544         //LOG("Allocated Directory (%x)", *gpTmpCR3);
545         memsetd( gaTmpDir, 0, 1024 );
546         
547         // Copy Tables
548         for( i = 0; i < 768; i ++)
549         {
550                 // Check if table is allocated
551                 if( !(gaPageDir[i] & PF_PRESENT) ) {
552                         gaTmpDir[i] = 0;
553                         page += 1024;
554                         continue;
555                 }
556                 
557                 // Allocate new table
558                 gaTmpDir[i] = MM_AllocPhys() | (gaPageDir[i] & 7);
559                 INVLPG( &gaTmpTable[page] );
560                 // Fill
561                 for( j = 0; j < 1024; j ++, page++ )
562                 {
563                         if( !(gaPageTable[page] & PF_PRESENT) ) {
564                                 gaTmpTable[page] = 0;
565                                 continue;
566                         }
567                         
568                         // Refrence old page
569                         MM_RefPhys( gaPageTable[page] & ~0xFFF );
570                         // Add to new table
571                         if(gaPageTable[page] & PF_WRITE) {
572                                 gaTmpTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
573                                 gaPageTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
574                                 INVLPG( page << 12 );
575                         }
576                         else
577                                 gaTmpTable[page] = gaPageTable[page];
578                 }
579         }
580         
581         // Map in kernel tables (and make fractal mapping)
582         for( i = 768; i < 1024; i ++ )
583         {
584                 // Fractal
585                 if( i == (PAGE_TABLE_ADDR >> 22) ) {
586                         gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
587                         continue;
588                 }
589                 
590                 if( gaPageDir[i] == 0 ) {
591                         gaTmpDir[i] = 0;
592                         continue;
593                 }
594                 
595                 //LOG("gaPageDir[%x/4] = 0x%x", i*4, gaPageDir[i]);
596                 MM_RefPhys( gaPageDir[i] & ~0xFFF );
597                 gaTmpDir[i] = gaPageDir[i];
598         }
599         
600         // Allocate kernel stack
601         for(i = KERNEL_STACKS >> 22;
602                 i < KERNEL_STACKS_END >> 22;
603                 i ++ )
604         {
605                 // Check if directory is allocated
606                 if( (gaPageDir[i] & 1) == 0 ) {
607                         gaTmpDir[i] = 0;
608                         continue;
609                 }               
610                 
611                 // We don't care about other kernel stacks, just the current one
612                 if( i != kStackBase >> 22 ) {
613                         MM_DerefPhys( gaPageDir[i] & ~0xFFF );
614                         gaTmpDir[i] = 0;
615                         continue;
616                 }
617                 
618                 // Create a copy
619                 gaTmpDir[i] = MM_AllocPhys() | 3;
620                 INVLPG( &gaTmpTable[i*1024] );
621                 for( j = 0; j < 1024; j ++ )
622                 {
623                         // Is the page allocated? If not, skip
624                         if( !(gaPageTable[i*1024+j] & 1) ) {
625                                 gaTmpTable[i*1024+j] = 0;
626                                 continue;
627                         }
628                         
629                         // We don't care about other kernel stacks
630                         if( ((i*1024+j)*4096 & ~(KERNEL_STACK_SIZE-1)) != kStackBase ) {
631                                 gaTmpTable[i*1024+j] = 0;
632                                 continue;
633                         }
634                         
635                         // Allocate page
636                         gaTmpTable[i*1024+j] = MM_AllocPhys() | 3;
637                         
638                         MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF );
639                         
640                         tmp = (void *) MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF );
641                         memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 );
642                         MM_FreeTemp( (Uint)tmp );
643                 }
644         }
645         
646         ret = *gpTmpCR3 & ~0xFFF;
647         RELEASE( &gilTempFractal );
648         
649         //LEAVE('x', ret);
650         return ret;
651 }
652
653 /**
654  * \fn tVAddr MM_NewKStack()
655  * \brief Create a new kernel stack
656  */
657 tVAddr MM_NewKStack()
658 {
659         tVAddr  base = KERNEL_STACKS;
660         Uint    i;
661         for(;base<KERNEL_STACKS_END;base+=KERNEL_STACK_SIZE)
662         {
663                 if(MM_GetPhysAddr(base) != 0)   continue;
664                 for(i=0;i<KERNEL_STACK_SIZE;i+=0x1000) {
665                         MM_Allocate(base+i);
666                 }
667                 return base+KERNEL_STACK_SIZE;
668         }
669         Warning("MM_NewKStack - No address space left\n");
670         return 0;
671 }
672
673 /**
674  * \fn tVAddr MM_NewWorkerStack()
675  * \brief Creates a new worker stack
676  */
677 tVAddr MM_NewWorkerStack()
678 {
679         Uint    esp, ebp;
680         Uint    oldstack;
681         Uint    base, addr;
682          int    i, j;
683         Uint    *tmpPage;
684         tPAddr  pages[WORKER_STACK_SIZE>>12];
685         
686         // Get the old ESP and EBP
687         __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
688         __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
689         
690         // Find a free worker stack address
691         for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++)
692         {
693                 // Used block
694                 if( gWorkerStacks[base/32] == -1 ) {
695                         base += 31;     base &= ~31;
696                         base --;        // Counteracted by the base++
697                         continue;
698                 }
699                 // Used stack
700                 if( gWorkerStacks[base/32] & (1 << base) ) {
701                         continue;
702                 }
703                 break;
704         }
705         if(base >= NUM_WORKER_STACKS) {
706                 Warning("Uh-oh! Out of worker stacks");
707                 return 0;
708         }
709         
710         // It's ours now!
711         gWorkerStacks[base/32] |= (1 << base);
712         // Make life easier for later calls
713         giLastUsedWorker = base;
714         // We have one
715         base = WORKER_STACKS + base * WORKER_STACK_SIZE;
716         //Log(" MM_NewWorkerStack: base = 0x%x", base);
717         
718         // Acquire the lock for the temp fractal mappings
719         LOCK(&gilTempFractal);
720         
721         // Set the temp fractals to TID0's address space
722         *gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
723         //Log(" MM_NewWorkerStack: *gpTmpCR3 = 0x%x", *gpTmpCR3);
724         INVLPG( gaTmpDir );
725         
726         
727         // Check if the directory is mapped (we are assuming that the stacks
728         // will fit neatly in a directory)
729         //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
730         if(gaTmpDir[ base >> 22 ] == 0) {
731                 gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3;
732                 INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] );
733         }
734         
735         // Mapping Time!
736         for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 )
737         {
738                 pages[ addr >> 12 ] = MM_AllocPhys();
739                 gaTmpTable[ (base + addr) >> 12 ] = pages[addr>>12] | 3;
740         }
741         *gpTmpCR3 = 0;
742         // Release the temp mapping lock
743         RELEASE(&gilTempFractal);
744         
745         // Copy the old stack
746         oldstack = (esp + KERNEL_STACK_SIZE-1) & ~(KERNEL_STACK_SIZE-1);
747         esp = oldstack - esp;   // ESP as an offset in the stack
748         
749         // Make `base` be the top of the stack
750         base += WORKER_STACK_SIZE;
751         
752         i = (WORKER_STACK_SIZE>>12) - 1;
753         // Copy the contents of the old stack to the new one, altering the addresses
754         // `addr` is refering to bytes from the stack base (mem downwards)
755         for(addr = 0; addr < esp; addr += 0x1000)
756         {
757                 Uint    *stack = (Uint*)( oldstack-(addr+0x1000) );
758                 tmpPage = (void*)MM_MapTemp( pages[i] );
759                 // Copy old stack
760                 for(j = 0; j < 1024; j++)
761                 {
762                         // Possible Stack address?
763                         if(oldstack-esp < stack[j] && stack[j] < oldstack)
764                                 tmpPage[j] = base - (oldstack - stack[j]);
765                         else    // Seems not, best leave it alone
766                                 tmpPage[j] = stack[j];
767                 }
768                 MM_FreeTemp((tVAddr)tmpPage);
769                 i --;
770         }
771         
772         //Log("MM_NewWorkerStack: RETURN 0x%x", base);
773         return base;
774 }
775
776 /**
777  * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
778  * \brief Sets the flags on a page
779  */
780 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
781 {
782         tTabEnt *ent;
783         if( !(gaPageDir[VAddr >> 22] & 1) )     return ;
784         if( !(gaPageTable[VAddr >> 12] & 1) )   return ;
785         
786         ent = &gaPageTable[VAddr >> 12];
787         
788         // Read-Only
789         if( Mask & MM_PFLAG_RO )
790         {
791                 if( Flags & MM_PFLAG_RO ) {
792                         *ent &= ~PF_WRITE;
793                 }
794                 else {
795                         gaPageDir[VAddr >> 22] |= PF_WRITE;
796                         *ent |= PF_WRITE;
797                 }
798         }
799         
800         // Kernel
801         if( Mask & MM_PFLAG_KERNEL )
802         {
803                 if( Flags & MM_PFLAG_KERNEL ) {
804                         *ent &= ~PF_USER;
805                 }
806                 else {
807                         gaPageDir[VAddr >> 22] |= PF_USER;
808                         *ent |= PF_USER;
809                 }
810         }
811         
812         // Copy-On-Write
813         if( Mask & MM_PFLAG_COW )
814         {
815                 if( Flags & MM_PFLAG_COW ) {
816                         *ent &= ~PF_WRITE;
817                         *ent |= PF_COW;
818                 }
819                 else {
820                         *ent &= ~PF_COW;
821                         *ent |= PF_WRITE;
822                 }
823         }
824         
825         //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
826         //      *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
827 }
828
829 /**
830  * \fn tPAddr MM_DuplicatePage(tVAddr VAddr)
831  * \brief Duplicates a virtual page to a physical one
832  */
833 tPAddr MM_DuplicatePage(tVAddr VAddr)
834 {
835         tPAddr  ret;
836         Uint    temp;
837          int    wasRO = 0;
838         
839         //ENTER("xVAddr", VAddr);
840         
841         // Check if mapped
842         if( !(gaPageDir  [VAddr >> 22] & PF_PRESENT) )  return 0;
843         if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) )  return 0;
844         
845         // Page Align
846         VAddr &= ~0xFFF;
847         
848         // Allocate new page
849         ret = MM_AllocPhys();
850         
851         // Write-lock the page (to keep data constistent), saving its R/W state
852         wasRO = (gaPageTable[VAddr >> 12] & PF_WRITE ? 0 : 1);
853         gaPageTable[VAddr >> 12] &= ~PF_WRITE;
854         INVLPG( VAddr );
855         
856         // Copy Data
857         temp = MM_MapTemp(ret);
858         memcpy( (void*)temp, (void*)VAddr, 0x1000 );
859         MM_FreeTemp(temp);
860         
861         // Restore Writeable status
862         if(!wasRO)      gaPageTable[VAddr >> 12] |= PF_WRITE;
863         INVLPG(VAddr);
864         
865         //LEAVE('X', ret);
866         return ret;
867 }
868
869 /**
870  * \fn Uint MM_MapTemp(tPAddr PAddr)
871  * \brief Create a temporary memory mapping
872  * \todo Show Luigi Barone (C Lecturer) and see what he thinks
873  */
874 tVAddr MM_MapTemp(tPAddr PAddr)
875 {
876          int    i;
877         
878         //ENTER("XPAddr", PAddr);
879         
880         PAddr &= ~0xFFF;
881         
882         //LOG("gilTempMappings = %i", gilTempMappings);
883         
884         for(;;)
885         {
886                 LOCK( &gilTempMappings );
887                 
888                 for( i = 0; i < NUM_TEMP_PAGES; i ++ )
889                 {
890                         // Check if page used
891                         if(gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] & 1)        continue;
892                         // Mark as used
893                         gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
894                         INVLPG( TEMP_MAP_ADDR + (i << 12) );
895                         //LEAVE('p', TEMP_MAP_ADDR + (i << 12));
896                         RELEASE( &gilTempMappings );
897                         return TEMP_MAP_ADDR + (i << 12);
898                 }
899                 RELEASE( &gilTempMappings );
900                 Threads_Yield();
901         }
902 }
903
904 /**
905  * \fn void MM_FreeTemp(tVAddr PAddr)
906  * \brief Free's a temp mapping
907  */
908 void MM_FreeTemp(tVAddr VAddr)
909 {
910          int    i = VAddr >> 12;
911         //ENTER("xVAddr", VAddr);
912         
913         if(i >= (TEMP_MAP_ADDR >> 12))
914                 gaPageTable[ i ] = 0;
915         
916         //LEAVE('-');
917 }
918
919 /**
920  * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
921  * \brief Allocates a contigous number of pages
922  */
923 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
924 {
925          int    i, j;
926         
927         PAddr &= ~0xFFF;
928         
929         // Scan List
930         for( i = 0; i < NUM_HW_PAGES; i ++ )
931         {               
932                 // Check if addr used
933                 if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 )
934                         continue;
935                 
936                 // Check possible region
937                 for( j = 0; j < Number && i + j < NUM_HW_PAGES; j ++ )
938                 {
939                         // If there is an allocated page in the region we are testing, break
940                         if( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & 1 )    break;
941                 }
942                 // Is it all free?
943                 if( j == Number )
944                 {
945                         // Allocate
946                         for( j = 0; j < Number; j++ ) {
947                                 MM_RefPhys( PAddr + (j<<12) );
948                                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3;
949                         }
950                         return HW_MAP_ADDR + (i<<12);
951                 }
952         }
953         // If we don't find any, return NULL
954         return 0;
955 }
956
957 /**
958  * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
959  * \brief Allocates DMA physical memory
960  * \param Pages Number of pages required
961  * \param MaxBits       Maximum number of bits the physical address can have
962  * \param PhysAddr      Pointer to the location to place the physical address allocated
963  * \return Virtual address allocate
964  */
965 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
966 {
967         tPAddr  maxCheck = (1 << MaxBits);
968         tPAddr  phys;
969         tVAddr  ret;
970         
971         ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr);
972         
973         // Sanity Check
974         if(MaxBits < 12 || !PhysAddr) {
975                 LEAVE('i', 0);
976                 return 0;
977         }
978         
979         // Bound
980         if(MaxBits >= PHYS_BITS)        maxCheck = -1;
981         
982         // Fast Allocate
983         if(Pages == 1 && MaxBits >= PHYS_BITS)
984         {
985                 phys = MM_AllocPhys();
986                 *PhysAddr = phys;
987                 ret = MM_MapHWPages(phys, 1);
988                 if(ret == 0) {
989                         MM_DerefPhys(phys);
990                         LEAVE('i', 0);
991                         return 0;
992                 }
993                 LEAVE('x', ret);
994                 return ret;
995         }
996         
997         // Slow Allocate
998         phys = MM_AllocPhysRange(Pages, MaxBits);
999         // - Was it allocated?
1000         if(phys == 0) {
1001                 LEAVE('i', 0);
1002                 return 0;
1003         }
1004         
1005         // Allocated successfully, now map
1006         ret = MM_MapHWPages(phys, Pages);
1007         if( ret == 0 ) {
1008                 // If it didn't map, free then return 0
1009                 for(;Pages--;phys+=0x1000)
1010                         MM_DerefPhys(phys);
1011                 LEAVE('i', 0);
1012                 return 0;
1013         }
1014         
1015         *PhysAddr = phys;
1016         LEAVE('x', ret);
1017         return ret;
1018 }
1019
1020 /**
1021  * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1022  * \brief Unmap a hardware page
1023  */
1024 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1025 {
1026          int    i, j;
1027         // Sanity Check
1028         if(VAddr < HW_MAP_ADDR || VAddr-Number*0x1000 > HW_MAP_MAX)     return;
1029         
1030         i = VAddr >> 12;
1031         
1032         LOCK( &gilTempMappings );       // Temp and HW share a directory, so they share a lock
1033         
1034         for( j = 0; j < Number; j++ )
1035         {
1036                 MM_DerefPhys( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & ~0xFFF );
1037                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = 0;
1038         }
1039         
1040         RELEASE( &gilTempMappings );
1041 }
1042
1043 // --- EXPORTS ---
1044 EXPORT(MM_GetPhysAddr);
1045 EXPORT(MM_Map);
1046 //EXPORT(MM_Unmap);
1047 EXPORT(MM_MapHWPages);
1048 EXPORT(MM_AllocDMA);
1049 EXPORT(MM_UnmapHWPages);

UCC git Repository :: git.ucc.asn.au