AcessNative - Fixing Fixing Fixing
[tpg/acess2.git] / Kernel / arch / x86 / mm_virt.c
1 /*
2  * AcessOS Microkernel Version
3  * mm_virt.c
4  * 
5  * Memory Map
6  * 0xE0 - Kernel Base
7  * 0xF0 - Kernel Stacks
8  * 0xFD - Fractals
9  * 0xFE - Unused
10  * 0xFF - System Calls / Kernel's User Code
11  */
12 #define DEBUG   0
13 #define SANITY  1
14 #include <acess.h>
15 #include <mm_virt.h>
16 #include <mm_phys.h>
17 #include <proc.h>
18
19 #if USE_PAE
20 # define TAB    21
21 # define DIR    30
22 #else
23 # define TAB    22
24 #endif
25
26 #define KERNEL_STACKS           0xF0000000
27 #define KERNEL_STACK_SIZE       0x00008000
28 #define KERNEL_STACKS_END       0xFC000000
29 #define WORKER_STACKS           0x00100000      // Thread0 Only!
30 #define WORKER_STACK_SIZE       KERNEL_STACK_SIZE
31 #define WORKER_STACKS_END       0xB0000000
32 #define NUM_WORKER_STACKS       ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
33
34 #define PAE_PAGE_TABLE_ADDR     0xFC000000      // 16 MiB
35 #define PAE_PAGE_DIR_ADDR       0xFCFC0000      // 16 KiB
36 #define PAE_PAGE_PDPT_ADDR      0xFCFC3F00      // 32 bytes
37 #define PAE_TMP_PDPT_ADDR       0xFCFC3F20      // 32 bytes
38 #define PAE_TMP_DIR_ADDR        0xFCFE0000      // 16 KiB
39 #define PAE_TMP_TABLE_ADDR      0xFD000000      // 16 MiB
40
41 #define PAGE_TABLE_ADDR 0xFC000000
42 #define PAGE_DIR_ADDR   0xFC3F0000
43 #define PAGE_CR3_ADDR   0xFC3F0FC0
44 #define TMP_CR3_ADDR    0xFC3F0FC4      // Part of core instead of temp
45 #define TMP_DIR_ADDR    0xFC3F1000      // Same
46 #define TMP_TABLE_ADDR  0xFC400000
47
48 #define HW_MAP_ADDR             0xFE000000
49 #define HW_MAP_MAX              0xFFEF0000
50 #define NUM_HW_PAGES    ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
51 #define TEMP_MAP_ADDR   0xFFEF0000      // Allows 16 "temp" pages
52 #define NUM_TEMP_PAGES  16
53 #define LAST_BLOCK_ADDR 0xFFFF0000      // Free space for kernel provided user code/ *(-1) protection
54
55 #define PF_PRESENT      0x1
56 #define PF_WRITE        0x2
57 #define PF_USER         0x4
58 #define PF_COW          0x200
59 #define PF_NOPAGE       0x400
60
61 #define INVLPG(addr)    __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
62
63 #if USE_PAE
64 typedef Uint64  tTabEnt;
65 #else
66 typedef Uint32  tTabEnt;
67 #endif
68
69 // === IMPORTS ===
70 extern void     _UsertextEnd, _UsertextBase;
71 extern Uint32   gaInitPageDir[1024];
72 extern Uint32   gaInitPageTable[1024];
73 extern void     Threads_SegFault(tVAddr Addr);
74 extern void     Error_Backtrace(Uint eip, Uint ebp);
75
76 // === PROTOTYPES ===
77 void    MM_PreinitVirtual(void);
78 void    MM_InstallVirtual(void);
79 void    MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
80 void    MM_DumpTables(tVAddr Start, tVAddr End);
81 tVAddr  MM_ClearUser(void);
82 tPAddr  MM_DuplicatePage(tVAddr VAddr);
83
84 // === GLOBALS ===
85 #define gaPageTable     ((tTabEnt*)PAGE_TABLE_ADDR)
86 #define gaPageDir       ((tTabEnt*)PAGE_DIR_ADDR)
87 #define gaTmpTable      ((tTabEnt*)TMP_TABLE_ADDR)
88 #define gaTmpDir        ((tTabEnt*)TMP_DIR_ADDR)
89 #define gpPageCR3       ((tTabEnt*)PAGE_CR3_ADDR)
90 #define gpTmpCR3        ((tTabEnt*)TMP_CR3_ADDR)
91
92 #define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
93 #define gaPAE_PageDir   ((tTabEnt*)PAE_PAGE_DIR_ADDR)
94 #define gaPAE_MainPDPT  ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
95 #define gaPAE_TmpTable  ((tTabEnt*)PAE_TMP_DIR_ADDR)
96 #define gaPAE_TmpDir    ((tTabEnt*)PAE_TMP_DIR_ADDR)
97 #define gaPAE_TmpPDPT   ((tTabEnt*)PAE_TMP_PDPT_ADDR)
98  int    gbUsePAE = 0;
99 tMutex  glTempMappings;
100 tMutex  glTempFractal;
101 Uint32  gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
102  int    giLastUsedWorker = 0;
103
104 // === CODE ===
105 /**
106  * \fn void MM_PreinitVirtual(void)
107  * \brief Maps the fractal mappings
108  */
109 void MM_PreinitVirtual(void)
110 {
111         #if USE_PAE
112         gaInitPageDir[ ((PAGE_TABLE_ADDR >> TAB)-3*512+3)*2 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
113         #else
114         gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
115         #endif
116         INVLPG( PAGE_TABLE_ADDR );
117 }
118
119 /**
120  * \fn void MM_InstallVirtual(void)
121  * \brief Sets up the constant page mappings
122  */
123 void MM_InstallVirtual(void)
124 {
125          int    i;
126         
127         #if USE_PAE
128         // --- Pre-Allocate kernel tables
129         for( i = KERNEL_BASE >> TAB; i < 1024*4; i ++ )
130         {
131                 if( gaPAE_PageDir[ i ] )        continue;
132                 
133                 // Skip stack tables, they are process unique
134                 if( i > KERNEL_STACKS >> TAB && i < KERNEL_STACKS_END >> TAB) {
135                         gaPAE_PageDir[ i ] = 0;
136                         continue;
137                 }
138                 // Preallocate table
139                 gaPAE_PageDir[ i ] = MM_AllocPhys() | 3;
140                 INVLPG( &gaPAE_PageTable[i*512] );
141                 memset( &gaPAE_PageTable[i*512], 0, 0x1000 );
142         }
143         #else
144         // --- Pre-Allocate kernel tables
145         for( i = KERNEL_BASE>>22; i < 1024; i ++ )
146         {
147                 if( gaPageDir[ i ] )    continue;
148                 // Skip stack tables, they are process unique
149                 if( i > KERNEL_STACKS >> 22 && i < KERNEL_STACKS_END >> 22) {
150                         gaPageDir[ i ] = 0;
151                         continue;
152                 }
153                 // Preallocate table
154                 gaPageDir[ i ] = MM_AllocPhys() | 3;
155                 INVLPG( &gaPageTable[i*1024] );
156                 memset( &gaPageTable[i*1024], 0, 0x1000 );
157         }
158         #endif
159         
160         // Unset kernel on the User Text pages
161         for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
162                 MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
163         }
164 }
165
166 /**
167  * \brief Cleans up the SMP required mappings
168  */
169 void MM_FinishVirtualInit(void)
170 {
171         #if USE_PAE
172         gaInitPDPT[ 0 ] = 0;
173         #else
174         gaInitPageDir[ 0 ] = 0;
175         #endif
176 }
177
178 /**
179  * \fn void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
180  * \brief Called on a page fault
181  */
182 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
183 {
184         //ENTER("xAddr bErrorCode", Addr, ErrorCode);
185         
186         // -- Check for COW --
187         if( gaPageDir  [Addr>>22] & PF_PRESENT
188          && gaPageTable[Addr>>12] & PF_PRESENT
189          && gaPageTable[Addr>>12] & PF_COW )
190         {
191                 tPAddr  paddr;
192                 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
193                 {
194                         gaPageTable[Addr>>12] &= ~PF_COW;
195                         gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
196                 }
197                 else
198                 {
199                         //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
200                         paddr = MM_DuplicatePage( Addr );
201                         MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
202                         gaPageTable[Addr>>12] &= PF_USER;
203                         gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
204                 }
205                 
206                 INVLPG( Addr & ~0xFFF );
207                 //LEAVE('-')
208                 return;
209         }
210         
211         // If it was a user, tell the thread handler
212         if(ErrorCode & 4) {
213                 Warning("%s %s %s memory%s",
214                         (ErrorCode&4?"User":"Kernel"),
215                         (ErrorCode&2?"write to":"read from"),
216                         (ErrorCode&1?"bad/locked":"non-present"),
217                         (ErrorCode&16?" (Instruction Fetch)":"")
218                         );
219                 Warning("User Pagefault: Instruction at %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
220                 __asm__ __volatile__ ("sti");   // Restart IRQs
221                 #if 1
222                 Error_Backtrace(Regs->eip, Regs->ebp);
223                 #endif
224                 Threads_SegFault(Addr);
225                 return ;
226         }
227         
228         Debug_KernelPanic();
229         
230         // -- Check Error Code --
231         if(ErrorCode & 8)
232                 Warning("Reserved Bits Trashed!");
233         else
234         {
235                 Warning("%s %s %s memory%s",
236                         (ErrorCode&4?"User":"Kernel"),
237                         (ErrorCode&2?"write to":"read from"),
238                         (ErrorCode&1?"bad/locked":"non-present"),
239                         (ErrorCode&16?" (Instruction Fetch)":"")
240                         );
241         }
242         
243         Log("Code at %p accessed %p", Regs->eip, Addr);
244         // Print Stack Backtrace
245         Error_Backtrace(Regs->eip, Regs->ebp);
246         
247         Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
248         if( gaPageDir[Addr>>22] & PF_PRESENT )
249                 Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
250         
251         //MM_DumpTables(0, -1); 
252         
253         // Register Dump
254         Log("EAX %08x ECX %08x EDX %08x EBX %08x", Regs->eax, Regs->ecx, Regs->edx, Regs->ebx);
255         Log("ESP %08x EBP %08x ESI %08x EDI %08x", Regs->esp, Regs->ebp, Regs->esi, Regs->edi);
256         //Log("SS:ESP %04x:%08x", Regs->ss, Regs->esp);
257         Log("CS:EIP %04x:%08x", Regs->cs, Regs->eip);
258         Log("DS %04x ES %04x FS %04x GS %04x", Regs->ds, Regs->es, Regs->fs, Regs->gs);
259         {
260                 Uint    dr0, dr1;
261                 __ASM__ ("mov %%dr0, %0":"=r"(dr0):);
262                 __ASM__ ("mov %%dr1, %0":"=r"(dr1):);
263                 Log("DR0 %08x DR1 %08x", dr0, dr1);
264         }
265         
266         Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
267 }
268
269 /**
270  * \fn void MM_DumpTables(tVAddr Start, tVAddr End)
271  * \brief Dumps the layout of the page tables
272  */
273 void MM_DumpTables(tVAddr Start, tVAddr End)
274 {
275         tVAddr  rangeStart = 0;
276         tPAddr  expected = 0;
277         tVAddr  curPos;
278         Uint    page;
279         const tPAddr    MASK = ~0xF98;
280         
281         Start >>= 12;   End >>= 12;
282         
283         #if 0
284         Log("Directory Entries:");
285         for(page = Start >> 10;
286                 page < (End >> 10)+1;
287                 page ++)
288         {
289                 if(gaPageDir[page])
290                 {
291                         Log(" 0x%08x-0x%08x :: 0x%08x",
292                                 page<<22, ((page+1)<<22)-1,
293                                 gaPageDir[page]&~0xFFF
294                                 );
295                 }
296         }
297         #endif
298         
299         Log("Table Entries:");
300         for(page = Start, curPos = Start<<12;
301                 page < End;
302                 curPos += 0x1000, page++)
303         {
304                 if( !(gaPageDir[curPos>>22] & PF_PRESENT)
305                 ||  !(gaPageTable[page] & PF_PRESENT)
306                 ||  (gaPageTable[page] & MASK) != expected)
307                 {
308                         if(expected) {
309                                 Log(" 0x%08x-0x%08x => 0x%08x-0x%08x (%s%s%s%s)",
310                                         rangeStart, curPos - 1,
311                                         gaPageTable[rangeStart>>12] & ~0xFFF,
312                                         (expected & ~0xFFF) - 1,
313                                         (expected & PF_NOPAGE ? "P" : "-"),
314                                         (expected & PF_COW ? "C" : "-"),
315                                         (expected & PF_USER ? "U" : "-"),
316                                         (expected & PF_WRITE ? "W" : "-")
317                                         );
318                                 expected = 0;
319                         }
320                         if( !(gaPageDir[curPos>>22] & PF_PRESENT) )     continue;
321                         if( !(gaPageTable[curPos>>12] & PF_PRESENT) )   continue;
322                         
323                         expected = (gaPageTable[page] & MASK);
324                         rangeStart = curPos;
325                 }
326                 if(expected)    expected += 0x1000;
327         }
328         
329         if(expected) {
330                 Log("0x%08x-0x%08x => 0x%08x-0x%08x (%s%s%s%s)",
331                         rangeStart, curPos - 1,
332                         gaPageTable[rangeStart>>12] & ~0xFFF,
333                         (expected & ~0xFFF) - 1,
334                         (expected & PF_NOPAGE ? "p" : "-"),
335                         (expected & PF_COW ? "C" : "-"),
336                         (expected & PF_USER ? "U" : "-"),
337                         (expected & PF_WRITE ? "W" : "-")
338                         );
339                 expected = 0;
340         }
341 }
342
343 /**
344  * \fn tPAddr MM_Allocate(tVAddr VAddr)
345  */
346 tPAddr MM_Allocate(tVAddr VAddr)
347 {
348         tPAddr  paddr;
349         //ENTER("xVAddr", VAddr);
350         //__asm__ __volatile__ ("xchg %bx,%bx");
351         // Check if the directory is mapped
352         if( gaPageDir[ VAddr >> 22 ] == 0 )
353         {
354                 // Allocate directory
355                 paddr = MM_AllocPhys();
356                 //LOG("paddr = 0x%llx (new table)", paddr);
357                 if( paddr == 0 ) {
358                         Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
359                         //LEAVE('i',0);
360                         return 0;
361                 }
362                 // Map
363                 gaPageDir[ VAddr >> 22 ] = paddr | 3;
364                 // Mark as user
365                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
366                 
367                 INVLPG( &gaPageDir[ VAddr >> 22 ] );
368                 //LOG("Clearing new table");
369                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
370         }
371         // Check if the page is already allocated
372         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
373                 Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
374                 //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
375                 return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
376         }
377         
378         // Allocate
379         paddr = MM_AllocPhys();
380         //LOG("paddr = 0x%llx", paddr);
381         if( paddr == 0 ) {
382                 Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
383                         VAddr, __builtin_return_address(0));
384                 //LEAVE('i',0);
385                 return 0;
386         }
387         // Map
388         gaPageTable[ VAddr >> 12 ] = paddr | 3;
389         // Mark as user
390         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
391         // Invalidate Cache for address
392         INVLPG( VAddr & ~0xFFF );
393         
394         //LEAVE('X', paddr);
395         return paddr;
396 }
397
398 /**
399  * \fn void MM_Deallocate(tVAddr VAddr)
400  */
401 void MM_Deallocate(tVAddr VAddr)
402 {
403         if( gaPageDir[ VAddr >> 22 ] == 0 ) {
404                 Warning("MM_Deallocate - Directory not mapped");
405                 return;
406         }
407         
408         if(gaPageTable[ VAddr >> 12 ] == 0) {
409                 Warning("MM_Deallocate - Page is not allocated");
410                 return;
411         }
412         
413         // Dereference page
414         MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
415         // Clear page
416         gaPageTable[ VAddr >> 12 ] = 0;
417 }
418
419 /**
420  * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
421  * \brief Checks if the passed address is accesable
422  */
423 tPAddr MM_GetPhysAddr(tVAddr Addr)
424 {
425         if( !(gaPageDir[Addr >> 22] & 1) )
426                 return 0;
427         if( !(gaPageTable[Addr >> 12] & 1) )
428                 return 0;
429         return (gaPageTable[Addr >> 12] & ~0xFFF) | (Addr & 0xFFF);
430 }
431
432 /**
433  * \fn void MM_SetCR3(Uint CR3)
434  * \brief Sets the current process space
435  */
436 void MM_SetCR3(Uint CR3)
437 {
438         __asm__ __volatile__ ("mov %0, %%cr3"::"r"(CR3));
439 }
440
441 /**
442  * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
443  * \brief Map a physical page to a virtual one
444  */
445 int MM_Map(tVAddr VAddr, tPAddr PAddr)
446 {
447         //ENTER("xVAddr xPAddr", VAddr, PAddr);
448         // Sanity check
449         if( PAddr & 0xFFF || VAddr & 0xFFF ) {
450                 Warning("MM_Map - Physical or Virtual Addresses are not aligned");
451                 //LEAVE('i', 0);
452                 return 0;
453         }
454         
455         // Align addresses
456         PAddr &= ~0xFFF;        VAddr &= ~0xFFF;
457         
458         // Check if the directory is mapped
459         if( gaPageDir[ VAddr >> 22 ] == 0 )
460         {
461                 gaPageDir[ VAddr >> 22 ] = MM_AllocPhys() | 3;
462                 
463                 // Mark as user
464                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
465                 
466                 INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
467                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
468         }
469         // Check if the page is already allocated
470         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
471                 Warning("MM_Map - Allocating to used address");
472                 //LEAVE('i', 0);
473                 return 0;
474         }
475         
476         // Map
477         gaPageTable[ VAddr >> 12 ] = PAddr | 3;
478         // Mark as user
479         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
480         
481         //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
482         //      VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
483         
484         // Reference
485         MM_RefPhys( PAddr );
486         
487         //LOG("INVLPG( 0x%x )", VAddr);
488         INVLPG( VAddr );
489         
490         //LEAVE('i', 1);
491         return 1;
492 }
493
494 /**
495  * \fn tVAddr MM_ClearUser()
496  * \brief Clear user's address space
497  */
498 tVAddr MM_ClearUser(void)
499 {
500         Uint    i, j;
501         
502         // Copy Directories
503         for( i = 0; i < (MM_USER_MAX>>22); i ++ )
504         {
505                 // Check if directory is not allocated
506                 if( !(gaPageDir[i] & PF_PRESENT) ) {
507                         gaPageDir[i] = 0;
508                         continue;
509                 }
510                 
511                 
512                 for( j = 0; j < 1024; j ++ )
513                 {
514                         if( gaPageTable[i*1024+j] & 1 )
515                                 MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF );
516                         gaPageTable[i*1024+j] = 0;
517                 }
518                 
519                 MM_DerefPhys( gaPageDir[i] & ~0xFFF );
520                 gaPageDir[i] = 0;
521                 INVLPG( &gaPageTable[i*1024] );
522         }
523         INVLPG( gaPageDir );
524         
525         return *gpPageCR3;
526 }
527
528 /**
529  * \fn tPAddr MM_Clone(void)
530  * \brief Clone the current address space
531  */
532 tPAddr MM_Clone(void)
533 {
534         Uint    i, j;
535         tVAddr  ret;
536         Uint    page = 0;
537         tVAddr  kStackBase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
538         void    *tmp;
539         
540         Mutex_Acquire( &glTempFractal );
541         
542         // Create Directory Table
543         *gpTmpCR3 = MM_AllocPhys() | 3;
544         INVLPG( gaTmpDir );
545         //LOG("Allocated Directory (%x)", *gpTmpCR3);
546         memsetd( gaTmpDir, 0, 1024 );
547         
548         if( Threads_GetPID() != 0 )
549         {       
550                 // Copy Tables
551                 for( i = 0; i < 768; i ++)
552                 {
553                         // Check if table is allocated
554                         if( !(gaPageDir[i] & PF_PRESENT) ) {
555                                 gaTmpDir[i] = 0;
556                                 page += 1024;
557                                 continue;
558                         }
559                         
560                         // Allocate new table
561                         gaTmpDir[i] = MM_AllocPhys() | (gaPageDir[i] & 7);
562                         INVLPG( &gaTmpTable[page] );
563                         // Fill
564                         for( j = 0; j < 1024; j ++, page++ )
565                         {
566                                 if( !(gaPageTable[page] & PF_PRESENT) ) {
567                                         gaTmpTable[page] = 0;
568                                         continue;
569                                 }
570                                 
571                                 // Refrence old page
572                                 MM_RefPhys( gaPageTable[page] & ~0xFFF );
573                                 // Add to new table
574                                 if(gaPageTable[page] & PF_WRITE) {
575                                         gaTmpTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
576                                         gaPageTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
577                                         INVLPG( page << 12 );
578                                 }
579                                 else
580                                         gaTmpTable[page] = gaPageTable[page];
581                         }
582                 }
583         }
584         
585         // Map in kernel tables (and make fractal mapping)
586         for( i = 768; i < 1024; i ++ )
587         {
588                 // Fractal
589                 if( i == (PAGE_TABLE_ADDR >> 22) ) {
590                         gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
591                         continue;
592                 }
593                 
594                 if( gaPageDir[i] == 0 ) {
595                         gaTmpDir[i] = 0;
596                         continue;
597                 }
598                 
599                 //LOG("gaPageDir[%x/4] = 0x%x", i*4, gaPageDir[i]);
600                 MM_RefPhys( gaPageDir[i] & ~0xFFF );
601                 gaTmpDir[i] = gaPageDir[i];
602         }
603         
604         // Allocate kernel stack
605         for(i = KERNEL_STACKS >> 22;
606                 i < KERNEL_STACKS_END >> 22;
607                 i ++ )
608         {
609                 // Check if directory is allocated
610                 if( (gaPageDir[i] & 1) == 0 ) {
611                         gaTmpDir[i] = 0;
612                         continue;
613                 }               
614                 
615                 // We don't care about other kernel stacks, just the current one
616                 if( i != kStackBase >> 22 ) {
617                         MM_DerefPhys( gaPageDir[i] & ~0xFFF );
618                         gaTmpDir[i] = 0;
619                         continue;
620                 }
621                 
622                 // Create a copy
623                 gaTmpDir[i] = MM_AllocPhys() | 3;
624                 INVLPG( &gaTmpTable[i*1024] );
625                 for( j = 0; j < 1024; j ++ )
626                 {
627                         // Is the page allocated? If not, skip
628                         if( !(gaPageTable[i*1024+j] & 1) ) {
629                                 gaTmpTable[i*1024+j] = 0;
630                                 continue;
631                         }
632                         
633                         // We don't care about other kernel stacks
634                         if( ((i*1024+j)*4096 & ~(KERNEL_STACK_SIZE-1)) != kStackBase ) {
635                                 gaTmpTable[i*1024+j] = 0;
636                                 continue;
637                         }
638                         
639                         // Allocate page
640                         gaTmpTable[i*1024+j] = MM_AllocPhys() | 3;
641                         
642                         MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF );
643                         
644                         tmp = (void *) MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF );
645                         memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 );
646                         MM_FreeTemp( (Uint)tmp );
647                 }
648         }
649         
650         ret = *gpTmpCR3 & ~0xFFF;
651         Mutex_Release( &glTempFractal );
652         
653         //LEAVE('x', ret);
654         return ret;
655 }
656
657 /**
658  * \fn tVAddr MM_NewKStack(void)
659  * \brief Create a new kernel stack
660  */
661 tVAddr MM_NewKStack(void)
662 {
663         tVAddr  base;
664         Uint    i;
665         for(base = KERNEL_STACKS; base < KERNEL_STACKS_END; base += KERNEL_STACK_SIZE)
666         {
667                 // Check if space is free
668                 if(MM_GetPhysAddr(base) != 0)   continue;
669                 // Allocate
670                 //for(i = KERNEL_STACK_SIZE; i -= 0x1000 ; )
671                 for(i = 0; i < KERNEL_STACK_SIZE; i += 0x1000 )
672                 {
673                         if( MM_Allocate(base+i) == 0 )
674                         {
675                                 // On error, print a warning and return error
676                                 Warning("MM_NewKStack - Out of memory");
677                                 // - Clean up
678                                 //for( i += 0x1000 ; i < KERNEL_STACK_SIZE; i += 0x1000 )
679                                 //      MM_Deallocate(base+i);
680                                 return 0;
681                         }
682                 }
683                 // Success
684                 Log("MM_NewKStack - Allocated %p", base + KERNEL_STACK_SIZE);
685                 return base+KERNEL_STACK_SIZE;
686         }
687         // No stacks left
688         Warning("MM_NewKStack - No address space left");
689         return 0;
690 }
691
692 /**
693  * \fn tVAddr MM_NewWorkerStack()
694  * \brief Creates a new worker stack
695  */
696 tVAddr MM_NewWorkerStack()
697 {
698         Uint    esp, ebp;
699         Uint    oldstack;
700         Uint    base, addr;
701          int    i, j;
702         Uint    *tmpPage;
703         tPAddr  pages[WORKER_STACK_SIZE>>12];
704         
705         // Get the old ESP and EBP
706         __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
707         __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
708         
709         // TODO: Thread safety
710         // Find a free worker stack address
711         for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++)
712         {
713                 // Used block
714                 if( gWorkerStacks[base/32] == -1 ) {
715                         base += 31;     base &= ~31;
716                         base --;        // Counteracted by the base++
717                         continue;
718                 }
719                 // Used stack
720                 if( gWorkerStacks[base/32] & (1 << base) ) {
721                         continue;
722                 }
723                 break;
724         }
725         if(base >= NUM_WORKER_STACKS) {
726                 Warning("Uh-oh! Out of worker stacks");
727                 return 0;
728         }
729         
730         // It's ours now!
731         gWorkerStacks[base/32] |= (1 << base);
732         // Make life easier for later calls
733         giLastUsedWorker = base;
734         // We have one
735         base = WORKER_STACKS + base * WORKER_STACK_SIZE;
736         //Log(" MM_NewWorkerStack: base = 0x%x", base);
737         
738         // Acquire the lock for the temp fractal mappings
739         Mutex_Acquire(&glTempFractal);
740         
741         // Set the temp fractals to TID0's address space
742         *gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
743         //Log(" MM_NewWorkerStack: *gpTmpCR3 = 0x%x", *gpTmpCR3);
744         INVLPG( gaTmpDir );
745         
746         
747         // Check if the directory is mapped (we are assuming that the stacks
748         // will fit neatly in a directory)
749         //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
750         if(gaTmpDir[ base >> 22 ] == 0) {
751                 gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3;
752                 INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] );
753         }
754         
755         // Mapping Time!
756         for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 )
757         //for( addr = WORKER_STACK_SIZE; addr; addr -= 0x1000 )
758         {
759                 pages[ addr >> 12 ] = MM_AllocPhys();
760                 gaTmpTable[ (base + addr) >> 12 ] = pages[addr>>12] | 3;
761         }
762         *gpTmpCR3 = 0;
763         // Release the temp mapping lock
764         Mutex_Release(&glTempFractal);
765         
766         // Copy the old stack
767         oldstack = (esp + KERNEL_STACK_SIZE-1) & ~(KERNEL_STACK_SIZE-1);
768         esp = oldstack - esp;   // ESP as an offset in the stack
769         
770         // Make `base` be the top of the stack
771         base += WORKER_STACK_SIZE;
772         
773         i = (WORKER_STACK_SIZE>>12) - 1;
774         // Copy the contents of the old stack to the new one, altering the addresses
775         // `addr` is refering to bytes from the stack base (mem downwards)
776         for(addr = 0; addr < esp; addr += 0x1000)
777         {
778                 Uint    *stack = (Uint*)( oldstack-(addr+0x1000) );
779                 tmpPage = (void*)MM_MapTemp( pages[i] );
780                 // Copy old stack
781                 for(j = 0; j < 1024; j++)
782                 {
783                         // Possible Stack address?
784                         if(oldstack-esp < stack[j] && stack[j] < oldstack)
785                                 tmpPage[j] = base - (oldstack - stack[j]);
786                         else    // Seems not, best leave it alone
787                                 tmpPage[j] = stack[j];
788                 }
789                 MM_FreeTemp((tVAddr)tmpPage);
790                 i --;
791         }
792         
793         //Log("MM_NewWorkerStack: RETURN 0x%x", base);
794         return base;
795 }
796
797 /**
798  * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
799  * \brief Sets the flags on a page
800  */
801 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
802 {
803         tTabEnt *ent;
804         if( !(gaPageDir[VAddr >> 22] & 1) )     return ;
805         if( !(gaPageTable[VAddr >> 12] & 1) )   return ;
806         
807         ent = &gaPageTable[VAddr >> 12];
808         
809         // Read-Only
810         if( Mask & MM_PFLAG_RO )
811         {
812                 if( Flags & MM_PFLAG_RO ) {
813                         *ent &= ~PF_WRITE;
814                 }
815                 else {
816                         gaPageDir[VAddr >> 22] |= PF_WRITE;
817                         *ent |= PF_WRITE;
818                 }
819         }
820         
821         // Kernel
822         if( Mask & MM_PFLAG_KERNEL )
823         {
824                 if( Flags & MM_PFLAG_KERNEL ) {
825                         *ent &= ~PF_USER;
826                 }
827                 else {
828                         gaPageDir[VAddr >> 22] |= PF_USER;
829                         *ent |= PF_USER;
830                 }
831         }
832         
833         // Copy-On-Write
834         if( Mask & MM_PFLAG_COW )
835         {
836                 if( Flags & MM_PFLAG_COW ) {
837                         *ent &= ~PF_WRITE;
838                         *ent |= PF_COW;
839                 }
840                 else {
841                         *ent &= ~PF_COW;
842                         *ent |= PF_WRITE;
843                 }
844         }
845         
846         //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
847         //      *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
848 }
849
850 /**
851  * \brief Get the flags on a page
852  */
853 Uint MM_GetFlags(tVAddr VAddr)
854 {
855         tTabEnt *ent;
856         Uint    ret = 0;
857         
858         // Validity Check
859         if( !(gaPageDir[VAddr >> 22] & 1) )     return 0;
860         if( !(gaPageTable[VAddr >> 12] & 1) )   return 0;
861         
862         ent = &gaPageTable[VAddr >> 12];
863         
864         // Read-Only
865         if( !(*ent & PF_WRITE) )        ret |= MM_PFLAG_RO;
866         // Kernel
867         if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
868         // Copy-On-Write
869         if( *ent & PF_COW )     ret |= MM_PFLAG_COW;
870         
871         return ret;
872 }
873
874 /**
875  * \fn tPAddr MM_DuplicatePage(tVAddr VAddr)
876  * \brief Duplicates a virtual page to a physical one
877  */
878 tPAddr MM_DuplicatePage(tVAddr VAddr)
879 {
880         tPAddr  ret;
881         Uint    temp;
882          int    wasRO = 0;
883         
884         //ENTER("xVAddr", VAddr);
885         
886         // Check if mapped
887         if( !(gaPageDir  [VAddr >> 22] & PF_PRESENT) )  return 0;
888         if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) )  return 0;
889         
890         // Page Align
891         VAddr &= ~0xFFF;
892         
893         // Allocate new page
894         ret = MM_AllocPhys();
895         
896         // Write-lock the page (to keep data constistent), saving its R/W state
897         wasRO = (gaPageTable[VAddr >> 12] & PF_WRITE ? 0 : 1);
898         gaPageTable[VAddr >> 12] &= ~PF_WRITE;
899         INVLPG( VAddr );
900         
901         // Copy Data
902         temp = MM_MapTemp(ret);
903         memcpy( (void*)temp, (void*)VAddr, 0x1000 );
904         MM_FreeTemp(temp);
905         
906         // Restore Writeable status
907         if(!wasRO)      gaPageTable[VAddr >> 12] |= PF_WRITE;
908         INVLPG(VAddr);
909         
910         //LEAVE('X', ret);
911         return ret;
912 }
913
914 /**
915  * \fn Uint MM_MapTemp(tPAddr PAddr)
916  * \brief Create a temporary memory mapping
917  * \todo Show Luigi Barone (C Lecturer) and see what he thinks
918  */
919 tVAddr MM_MapTemp(tPAddr PAddr)
920 {
921          int    i;
922         
923         //ENTER("XPAddr", PAddr);
924         
925         PAddr &= ~0xFFF;
926         
927         //LOG("glTempMappings = %i", glTempMappings);
928         
929         for(;;)
930         {
931                 Mutex_Acquire( &glTempMappings );
932                 
933                 for( i = 0; i < NUM_TEMP_PAGES; i ++ )
934                 {
935                         // Check if page used
936                         if(gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] & 1)        continue;
937                         // Mark as used
938                         gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
939                         INVLPG( TEMP_MAP_ADDR + (i << 12) );
940                         //LEAVE('p', TEMP_MAP_ADDR + (i << 12));
941                         Mutex_Release( &glTempMappings );
942                         return TEMP_MAP_ADDR + (i << 12);
943                 }
944                 Mutex_Release( &glTempMappings );
945                 Threads_Yield();        // TODO: Use a sleep queue here instead
946         }
947 }
948
949 /**
950  * \fn void MM_FreeTemp(tVAddr PAddr)
951  * \brief Free's a temp mapping
952  */
953 void MM_FreeTemp(tVAddr VAddr)
954 {
955          int    i = VAddr >> 12;
956         //ENTER("xVAddr", VAddr);
957         
958         if(i >= (TEMP_MAP_ADDR >> 12))
959                 gaPageTable[ i ] = 0;
960         
961         //LEAVE('-');
962 }
963
964 /**
965  * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
966  * \brief Allocates a contigous number of pages
967  */
968 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
969 {
970          int    i, j;
971         
972         PAddr &= ~0xFFF;
973         
974         // Scan List
975         for( i = 0; i < NUM_HW_PAGES; i ++ )
976         {               
977                 // Check if addr used
978                 if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 )
979                         continue;
980                 
981                 // Check possible region
982                 for( j = 0; j < Number && i + j < NUM_HW_PAGES; j ++ )
983                 {
984                         // If there is an allocated page in the region we are testing, break
985                         if( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & 1 )    break;
986                 }
987                 // Is it all free?
988                 if( j == Number )
989                 {
990                         // Allocate
991                         for( j = 0; j < Number; j++ ) {
992                                 MM_RefPhys( PAddr + (j<<12) );
993                                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3;
994                         }
995                         return HW_MAP_ADDR + (i<<12);
996                 }
997         }
998         // If we don't find any, return NULL
999         return 0;
1000 }
1001
1002 /**
1003  * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1004  * \brief Allocates DMA physical memory
1005  * \param Pages Number of pages required
1006  * \param MaxBits       Maximum number of bits the physical address can have
1007  * \param PhysAddr      Pointer to the location to place the physical address allocated
1008  * \return Virtual address allocate
1009  */
1010 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1011 {
1012         tPAddr  maxCheck = (1 << MaxBits);
1013         tPAddr  phys;
1014         tVAddr  ret;
1015         
1016         ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr);
1017         
1018         // Sanity Check
1019         if(MaxBits < 12 || !PhysAddr) {
1020                 LEAVE('i', 0);
1021                 return 0;
1022         }
1023         
1024         // Bound
1025         if(MaxBits >= PHYS_BITS)        maxCheck = -1;
1026         
1027         // Fast Allocate
1028         if(Pages == 1 && MaxBits >= PHYS_BITS)
1029         {
1030                 phys = MM_AllocPhys();
1031                 *PhysAddr = phys;
1032                 ret = MM_MapHWPages(phys, 1);
1033                 if(ret == 0) {
1034                         MM_DerefPhys(phys);
1035                         LEAVE('i', 0);
1036                         return 0;
1037                 }
1038                 LEAVE('x', ret);
1039                 return ret;
1040         }
1041         
1042         // Slow Allocate
1043         phys = MM_AllocPhysRange(Pages, MaxBits);
1044         // - Was it allocated?
1045         if(phys == 0) {
1046                 LEAVE('i', 0);
1047                 return 0;
1048         }
1049         
1050         // Allocated successfully, now map
1051         ret = MM_MapHWPages(phys, Pages);
1052         if( ret == 0 ) {
1053                 // If it didn't map, free then return 0
1054                 for(;Pages--;phys+=0x1000)
1055                         MM_DerefPhys(phys);
1056                 LEAVE('i', 0);
1057                 return 0;
1058         }
1059         
1060         *PhysAddr = phys;
1061         LEAVE('x', ret);
1062         return ret;
1063 }
1064
1065 /**
1066  * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1067  * \brief Unmap a hardware page
1068  */
1069 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1070 {
1071          int    i, j;
1072         
1073         //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
1074         
1075         // Sanity Check
1076         if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX)     return;
1077         
1078         i = VAddr >> 12;
1079         
1080         Mutex_Acquire( &glTempMappings );       // Temp and HW share a directory, so they share a lock
1081         
1082         for( j = 0; j < Number; j++ )
1083         {
1084                 MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
1085                 gaPageTable[ i + j ] = 0;
1086         }
1087         
1088         Mutex_Release( &glTempMappings );
1089 }
1090
1091 // --- EXPORTS ---
1092 EXPORT(MM_GetPhysAddr);
1093 EXPORT(MM_Map);
1094 //EXPORT(MM_Unmap);
1095 EXPORT(MM_MapHWPages);
1096 EXPORT(MM_AllocDMA);
1097 EXPORT(MM_UnmapHWPages);

UCC git Repository :: git.ucc.asn.au