62ae08eb0da4ac198a3abad61a5d55259f59ae56
[tpg/acess2.git] / Kernel / arch / x86 / mm_virt.c
1 /*
2  * AcessOS Microkernel Version
3  * mm_virt.c
4  * 
5  * Memory Map
6  * 0xE0 - Kernel Base
7  * 0xF0 - Kernel Stacks
8  * 0xFD - Fractals
9  * 0xFE - Unused
10  * 0xFF - System Calls / Kernel's User Code
11  */
12 #define DEBUG   0
13 #define SANITY  1
14 #include <acess.h>
15 #include <mm_virt.h>
16 #include <mm_phys.h>
17 #include <proc.h>
18
19 #if USE_PAE
20 # define TAB    21
21 # define DIR    30
22 #else
23 # define TAB    22
24 #endif
25
26 #define KERNEL_STACKS           0xF0000000
27 #define KERNEL_STACK_SIZE       0x00008000
28 #define KERNEL_STACKS_END       0xFC000000
29 #define WORKER_STACKS           0x00100000      // Thread0 Only!
30 #define WORKER_STACK_SIZE       KERNEL_STACK_SIZE
31 #define WORKER_STACKS_END       0xB0000000
32 #define NUM_WORKER_STACKS       ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
33
34 #define PAE_PAGE_TABLE_ADDR     0xFC000000      // 16 MiB
35 #define PAE_PAGE_DIR_ADDR       0xFCFC0000      // 16 KiB
36 #define PAE_PAGE_PDPT_ADDR      0xFCFC3F00      // 32 bytes
37 #define PAE_TMP_PDPT_ADDR       0xFCFC3F20      // 32 bytes
38 #define PAE_TMP_DIR_ADDR        0xFCFE0000      // 16 KiB
39 #define PAE_TMP_TABLE_ADDR      0xFD000000      // 16 MiB
40
41 #define PAGE_TABLE_ADDR 0xFC000000
42 #define PAGE_DIR_ADDR   0xFC3F0000
43 #define PAGE_CR3_ADDR   0xFC3F0FC0
44 #define TMP_CR3_ADDR    0xFC3F0FC4      // Part of core instead of temp
45 #define TMP_DIR_ADDR    0xFC3F1000      // Same
46 #define TMP_TABLE_ADDR  0xFC400000
47
48 #define HW_MAP_ADDR             0xFE000000
49 #define HW_MAP_MAX              0xFFEF0000
50 #define NUM_HW_PAGES    ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
51 #define TEMP_MAP_ADDR   0xFFEF0000      // Allows 16 "temp" pages
52 #define NUM_TEMP_PAGES  16
53 #define LAST_BLOCK_ADDR 0xFFFF0000      // Free space for kernel provided user code/ *(-1) protection
54
55 #define PF_PRESENT      0x1
56 #define PF_WRITE        0x2
57 #define PF_USER         0x4
58 #define PF_GLOBAL       0x80
59 #define PF_COW          0x200
60 #define PF_NOPAGE       0x400
61
62 #define INVLPG(addr)    __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
63
64 #if USE_PAE
65 typedef Uint64  tTabEnt;
66 #else
67 typedef Uint32  tTabEnt;
68 #endif
69
70 // === IMPORTS ===
71 extern void     _UsertextEnd, _UsertextBase;
72 extern Uint32   gaInitPageDir[1024];
73 extern Uint32   gaInitPageTable[1024];
74 extern void     Threads_SegFault(tVAddr Addr);
75 extern void     Error_Backtrace(Uint eip, Uint ebp);
76
77 // === PROTOTYPES ===
78 void    MM_PreinitVirtual(void);
79 void    MM_InstallVirtual(void);
80 void    MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
81 void    MM_DumpTables(tVAddr Start, tVAddr End);
82 tVAddr  MM_ClearUser(void);
83 tPAddr  MM_DuplicatePage(tVAddr VAddr);
84
85 // === GLOBALS ===
86 #define gaPageTable     ((tTabEnt*)PAGE_TABLE_ADDR)
87 #define gaPageDir       ((tTabEnt*)PAGE_DIR_ADDR)
88 #define gaTmpTable      ((tTabEnt*)TMP_TABLE_ADDR)
89 #define gaTmpDir        ((tTabEnt*)TMP_DIR_ADDR)
90 #define gpPageCR3       ((tTabEnt*)PAGE_CR3_ADDR)
91 #define gpTmpCR3        ((tTabEnt*)TMP_CR3_ADDR)
92
93 #define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
94 #define gaPAE_PageDir   ((tTabEnt*)PAE_PAGE_DIR_ADDR)
95 #define gaPAE_MainPDPT  ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
96 #define gaPAE_TmpTable  ((tTabEnt*)PAE_TMP_DIR_ADDR)
97 #define gaPAE_TmpDir    ((tTabEnt*)PAE_TMP_DIR_ADDR)
98 #define gaPAE_TmpPDPT   ((tTabEnt*)PAE_TMP_PDPT_ADDR)
99  int    gbUsePAE = 0;
100 tMutex  glTempMappings;
101 tMutex  glTempFractal;
102 Uint32  gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
103  int    giLastUsedWorker = 0;
104
105 // === CODE ===
106 /**
107  * \fn void MM_PreinitVirtual(void)
108  * \brief Maps the fractal mappings
109  */
110 void MM_PreinitVirtual(void)
111 {
112         #if USE_PAE
113         gaInitPageDir[ ((PAGE_TABLE_ADDR >> TAB)-3*512+3)*2 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
114         #else
115         gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
116         #endif
117         INVLPG( PAGE_TABLE_ADDR );
118 }
119
120 /**
121  * \fn void MM_InstallVirtual(void)
122  * \brief Sets up the constant page mappings
123  */
124 void MM_InstallVirtual(void)
125 {
126          int    i;
127         
128         #if USE_PAE
129         // --- Pre-Allocate kernel tables
130         for( i = KERNEL_BASE >> TAB; i < 1024*4; i ++ )
131         {
132                 if( gaPAE_PageDir[ i ] )        continue;
133                 
134                 // Skip stack tables, they are process unique
135                 if( i > KERNEL_STACKS >> TAB && i < KERNEL_STACKS_END >> TAB) {
136                         gaPAE_PageDir[ i ] = 0;
137                         continue;
138                 }
139                 // Preallocate table
140                 gaPAE_PageDir[ i ] = MM_AllocPhys() | 3;
141                 INVLPG( &gaPAE_PageTable[i*512] );
142                 memset( &gaPAE_PageTable[i*512], 0, 0x1000 );
143         }
144         #else
145         // --- Pre-Allocate kernel tables
146         for( i = KERNEL_BASE>>22; i < 1024; i ++ )
147         {
148                 if( gaPageDir[ i ] )    continue;
149                 // Skip stack tables, they are process unique
150                 if( i > KERNEL_STACKS >> 22 && i < KERNEL_STACKS_END >> 22) {
151                         gaPageDir[ i ] = 0;
152                         continue;
153                 }
154                 // Preallocate table
155                 gaPageDir[ i ] = MM_AllocPhys() | 3;
156                 INVLPG( &gaPageTable[i*1024] );
157                 memset( &gaPageTable[i*1024], 0, 0x1000 );
158         }
159         #endif
160         
161         // Unset kernel on the User Text pages
162         for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
163                 MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
164         }
165 }
166
167 /**
168  * \brief Cleans up the SMP required mappings
169  */
170 void MM_FinishVirtualInit(void)
171 {
172         #if USE_PAE
173         gaInitPDPT[ 0 ] = 0;
174         #else
175         gaInitPageDir[ 0 ] = 0;
176         #endif
177 }
178
179 /**
180  * \fn void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
181  * \brief Called on a page fault
182  */
183 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
184 {
185         //ENTER("xAddr bErrorCode", Addr, ErrorCode);
186         
187         // -- Check for COW --
188         if( gaPageDir  [Addr>>22] & PF_PRESENT  && gaPageTable[Addr>>12] & PF_PRESENT
189          && gaPageTable[Addr>>12] & PF_COW )
190         {
191                 tPAddr  paddr;
192                 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
193                 {
194                         gaPageTable[Addr>>12] &= ~PF_COW;
195                         gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
196                 }
197                 else
198                 {
199                         //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
200                         paddr = MM_DuplicatePage( Addr );
201                         MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
202                         gaPageTable[Addr>>12] &= PF_USER;
203                         gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
204                 }
205                 
206                 INVLPG( Addr & ~0xFFF );
207                 return;
208         }
209         
210         // If it was a user, tell the thread handler
211         if(ErrorCode & 4) {
212                 Warning("%s %s %s memory%s",
213                         (ErrorCode&4?"User":"Kernel"),
214                         (ErrorCode&2?"write to":"read from"),
215                         (ErrorCode&1?"bad/locked":"non-present"),
216                         (ErrorCode&16?" (Instruction Fetch)":"")
217                         );
218                 Warning("User Pagefault: Instruction at %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
219                 __asm__ __volatile__ ("sti");   // Restart IRQs
220                 #if 1
221                 Error_Backtrace(Regs->eip, Regs->ebp);
222                 #endif
223                 Threads_SegFault(Addr);
224                 return ;
225         }
226         
227         Debug_KernelPanic();
228         
229         // -- Check Error Code --
230         if(ErrorCode & 8)
231                 Warning("Reserved Bits Trashed!");
232         else
233         {
234                 Warning("%s %s %s memory%s",
235                         (ErrorCode&4?"User":"Kernel"),
236                         (ErrorCode&2?"write to":"read from"),
237                         (ErrorCode&1?"bad/locked":"non-present"),
238                         (ErrorCode&16?" (Instruction Fetch)":"")
239                         );
240         }
241         
242         Log("Code at %p accessed %p", Regs->eip, Addr);
243         // Print Stack Backtrace
244         Error_Backtrace(Regs->eip, Regs->ebp);
245         
246         Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
247         if( gaPageDir[Addr>>22] & PF_PRESENT )
248                 Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
249         
250         //MM_DumpTables(0, -1); 
251         
252         // Register Dump
253         Log("EAX %08x ECX %08x EDX %08x EBX %08x", Regs->eax, Regs->ecx, Regs->edx, Regs->ebx);
254         Log("ESP %08x EBP %08x ESI %08x EDI %08x", Regs->esp, Regs->ebp, Regs->esi, Regs->edi);
255         //Log("SS:ESP %04x:%08x", Regs->ss, Regs->esp);
256         Log("CS:EIP %04x:%08x", Regs->cs, Regs->eip);
257         Log("DS %04x ES %04x FS %04x GS %04x", Regs->ds, Regs->es, Regs->fs, Regs->gs);
258         {
259                 Uint    dr0, dr1;
260                 __ASM__ ("mov %%dr0, %0":"=r"(dr0):);
261                 __ASM__ ("mov %%dr1, %0":"=r"(dr1):);
262                 Log("DR0 %08x DR1 %08x", dr0, dr1);
263         }
264         
265         Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
266 }
267
268 /**
269  * \fn void MM_DumpTables(tVAddr Start, tVAddr End)
270  * \brief Dumps the layout of the page tables
271  */
272 void MM_DumpTables(tVAddr Start, tVAddr End)
273 {
274         tVAddr  rangeStart = 0;
275         tPAddr  expected = 0;
276         tVAddr  curPos;
277         Uint    page;
278         const tPAddr    MASK = ~0xF78;
279         
280         Start >>= 12;   End >>= 12;
281         
282         #if 0
283         Log("Directory Entries:");
284         for(page = Start >> 10;
285                 page < (End >> 10)+1;
286                 page ++)
287         {
288                 if(gaPageDir[page])
289                 {
290                         Log(" 0x%08x-0x%08x :: 0x%08x",
291                                 page<<22, ((page+1)<<22)-1,
292                                 gaPageDir[page]&~0xFFF
293                                 );
294                 }
295         }
296         #endif
297         
298         Log("Table Entries:");
299         for(page = Start, curPos = Start<<12;
300                 page < End;
301                 curPos += 0x1000, page++)
302         {
303                 if( !(gaPageDir[curPos>>22] & PF_PRESENT)
304                 ||  !(gaPageTable[page] & PF_PRESENT)
305                 ||  (gaPageTable[page] & MASK) != expected)
306                 {
307                         if(expected) {
308                                 Log(" 0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s)",
309                                         rangeStart,
310                                         gaPageTable[rangeStart>>12] & ~0xFFF,
311                                         curPos - rangeStart,
312                                         (expected & PF_NOPAGE ? "P" : "-"),
313                                         (expected & PF_COW ? "C" : "-"),
314                                         (expected & PF_GLOBAL ? "G" : "-"),
315                                         (expected & PF_USER ? "U" : "-"),
316                                         (expected & PF_WRITE ? "W" : "-"),
317                                         gaPageTable[page] & MASK, expected
318                                         );
319                                 expected = 0;
320                         }
321                         if( !(gaPageDir[curPos>>22] & PF_PRESENT) )     continue;
322                         if( !(gaPageTable[curPos>>12] & PF_PRESENT) )   continue;
323                         
324                         expected = (gaPageTable[page] & MASK);
325                         rangeStart = curPos;
326                 }
327                 if(expected)    expected += 0x1000;
328         }
329         
330         if(expected) {
331                 Log("0x%08x => 0x%08x - 0x%08x (%s%s%s%s)",
332                         rangeStart,
333                         gaPageTable[rangeStart>>12] & ~0xFFF,
334                         curPos - rangeStart,
335                         (expected & PF_NOPAGE ? "p" : "-"),
336                         (expected & PF_COW ? "C" : "-"),
337                         (expected & PF_USER ? "U" : "-"),
338                         (expected & PF_WRITE ? "W" : "-")
339                         );
340                 expected = 0;
341         }
342 }
343
344 /**
345  * \fn tPAddr MM_Allocate(tVAddr VAddr)
346  */
347 tPAddr MM_Allocate(tVAddr VAddr)
348 {
349         tPAddr  paddr;
350         //ENTER("xVAddr", VAddr);
351         //__asm__ __volatile__ ("xchg %bx,%bx");
352         // Check if the directory is mapped
353         if( gaPageDir[ VAddr >> 22 ] == 0 )
354         {
355                 // Allocate directory
356                 paddr = MM_AllocPhys();
357                 if( paddr == 0 ) {
358                         Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
359                         //LEAVE('i',0);
360                         return 0;
361                 }
362                 // Map and mark as user (if needed)
363                 gaPageDir[ VAddr >> 22 ] = paddr | 3;
364                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
365                 
366                 INVLPG( &gaPageDir[ VAddr >> 22 ] );
367                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
368         }
369         // Check if the page is already allocated
370         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
371                 Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
372                 //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
373                 return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
374         }
375         
376         // Allocate
377         paddr = MM_AllocPhys();
378         //LOG("paddr = 0x%llx", paddr);
379         if( paddr == 0 ) {
380                 Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
381                         VAddr, __builtin_return_address(0));
382                 //LEAVE('i',0);
383                 return 0;
384         }
385         // Map
386         gaPageTable[ VAddr >> 12 ] = paddr | 3;
387         // Mark as user
388         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
389         // Invalidate Cache for address
390         INVLPG( VAddr & ~0xFFF );
391         
392         //LEAVE('X', paddr);
393         return paddr;
394 }
395
396 /**
397  * \fn void MM_Deallocate(tVAddr VAddr)
398  */
399 void MM_Deallocate(tVAddr VAddr)
400 {
401         if( gaPageDir[ VAddr >> 22 ] == 0 ) {
402                 Warning("MM_Deallocate - Directory not mapped");
403                 return;
404         }
405         
406         if(gaPageTable[ VAddr >> 12 ] == 0) {
407                 Warning("MM_Deallocate - Page is not allocated");
408                 return;
409         }
410         
411         // Dereference page
412         MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
413         // Clear page
414         gaPageTable[ VAddr >> 12 ] = 0;
415 }
416
417 /**
418  * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
419  * \brief Checks if the passed address is accesable
420  */
421 tPAddr MM_GetPhysAddr(tVAddr Addr)
422 {
423         if( !(gaPageDir[Addr >> 22] & 1) )
424                 return 0;
425         if( !(gaPageTable[Addr >> 12] & 1) )
426                 return 0;
427         return (gaPageTable[Addr >> 12] & ~0xFFF) | (Addr & 0xFFF);
428 }
429
430 /**
431  * \fn void MM_SetCR3(Uint CR3)
432  * \brief Sets the current process space
433  */
434 void MM_SetCR3(Uint CR3)
435 {
436         __asm__ __volatile__ ("mov %0, %%cr3"::"r"(CR3));
437 }
438
439 /**
440  * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
441  * \brief Map a physical page to a virtual one
442  */
443 int MM_Map(tVAddr VAddr, tPAddr PAddr)
444 {
445         //ENTER("xVAddr xPAddr", VAddr, PAddr);
446         // Sanity check
447         if( PAddr & 0xFFF || VAddr & 0xFFF ) {
448                 Warning("MM_Map - Physical or Virtual Addresses are not aligned");
449                 //LEAVE('i', 0);
450                 return 0;
451         }
452         
453         // Align addresses
454         PAddr &= ~0xFFF;        VAddr &= ~0xFFF;
455         
456         // Check if the directory is mapped
457         if( gaPageDir[ VAddr >> 22 ] == 0 )
458         {
459                 gaPageDir[ VAddr >> 22 ] = MM_AllocPhys() | 3;
460                 
461                 // Mark as user
462                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
463                 
464                 INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
465                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
466         }
467         // Check if the page is already allocated
468         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
469                 Warning("MM_Map - Allocating to used address");
470                 //LEAVE('i', 0);
471                 return 0;
472         }
473         
474         // Map
475         gaPageTable[ VAddr >> 12 ] = PAddr | 3;
476         // Mark as user
477         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
478         
479         //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
480         //      VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
481         
482         // Reference
483         MM_RefPhys( PAddr );
484         
485         //LOG("INVLPG( 0x%x )", VAddr);
486         INVLPG( VAddr );
487         
488         //LEAVE('i', 1);
489         return 1;
490 }
491
492 /**
493  * \fn tVAddr MM_ClearUser()
494  * \brief Clear user's address space
495  */
496 tVAddr MM_ClearUser(void)
497 {
498         Uint    i, j;
499         
500         for( i = 0; i < (MM_USER_MAX>>22); i ++ )
501         {
502                 // Check if directory is not allocated
503                 if( !(gaPageDir[i] & PF_PRESENT) ) {
504                         gaPageDir[i] = 0;
505                         continue;
506                 }
507                 
508                 // Deallocate tables
509                 for( j = 0; j < 1024; j ++ )
510                 {
511                         if( gaPageTable[i*1024+j] & 1 )
512                                 MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF );
513                         gaPageTable[i*1024+j] = 0;
514                 }
515                 
516                 // Deallocate directory
517                 MM_DerefPhys( gaPageDir[i] & ~0xFFF );
518                 gaPageDir[i] = 0;
519                 INVLPG( &gaPageTable[i*1024] );
520         }
521         INVLPG( gaPageDir );
522         
523         return *gpPageCR3;
524 }
525
526 /**
527  * \fn tPAddr MM_Clone(void)
528  * \brief Clone the current address space
529  */
530 tPAddr MM_Clone(void)
531 {
532         Uint    i, j;
533         tVAddr  ret;
534         Uint    page = 0;
535         tVAddr  kStackBase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
536         void    *tmp;
537         
538         Mutex_Acquire( &glTempFractal );
539         
540         // Create Directory Table
541         *gpTmpCR3 = MM_AllocPhys() | 3;
542         INVLPG( gaTmpDir );
543         //LOG("Allocated Directory (%x)", *gpTmpCR3);
544         memsetd( gaTmpDir, 0, 1024 );
545         
546         if( Threads_GetPID() != 0 )
547         {       
548                 // Copy Tables
549                 for( i = 0; i < 768; i ++)
550                 {
551                         // Check if table is allocated
552                         if( !(gaPageDir[i] & PF_PRESENT) ) {
553                                 gaTmpDir[i] = 0;
554                                 page += 1024;
555                                 continue;
556                         }
557                         
558                         // Allocate new table
559                         gaTmpDir[i] = MM_AllocPhys() | (gaPageDir[i] & 7);
560                         INVLPG( &gaTmpTable[page] );
561                         // Fill
562                         for( j = 0; j < 1024; j ++, page++ )
563                         {
564                                 if( !(gaPageTable[page] & PF_PRESENT) ) {
565                                         gaTmpTable[page] = 0;
566                                         continue;
567                                 }
568                                 
569                                 // Refrence old page
570                                 MM_RefPhys( gaPageTable[page] & ~0xFFF );
571                                 // Add to new table
572                                 if(gaPageTable[page] & PF_WRITE) {
573                                         gaTmpTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
574                                         gaPageTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
575                                         INVLPG( page << 12 );
576                                 }
577                                 else
578                                         gaTmpTable[page] = gaPageTable[page];
579                         }
580                 }
581         }
582         
583         // Map in kernel tables (and make fractal mapping)
584         for( i = 768; i < 1024; i ++ )
585         {
586                 // Fractal
587                 if( i == (PAGE_TABLE_ADDR >> 22) ) {
588                         gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
589                         continue;
590                 }
591                 
592                 if( gaPageDir[i] == 0 ) {
593                         gaTmpDir[i] = 0;
594                         continue;
595                 }
596                 
597                 //LOG("gaPageDir[%x/4] = 0x%x", i*4, gaPageDir[i]);
598                 MM_RefPhys( gaPageDir[i] & ~0xFFF );
599                 gaTmpDir[i] = gaPageDir[i];
600         }
601         
602         // Allocate kernel stack
603         for(i = KERNEL_STACKS >> 22;
604                 i < KERNEL_STACKS_END >> 22;
605                 i ++ )
606         {
607                 // Check if directory is allocated
608                 if( (gaPageDir[i] & 1) == 0 ) {
609                         gaTmpDir[i] = 0;
610                         continue;
611                 }               
612                 
613                 // We don't care about other kernel stacks, just the current one
614                 if( i != kStackBase >> 22 ) {
615                         MM_DerefPhys( gaPageDir[i] & ~0xFFF );
616                         gaTmpDir[i] = 0;
617                         continue;
618                 }
619                 
620                 // Create a copy
621                 gaTmpDir[i] = MM_AllocPhys() | 3;
622                 INVLPG( &gaTmpTable[i*1024] );
623                 for( j = 0; j < 1024; j ++ )
624                 {
625                         // Is the page allocated? If not, skip
626                         if( !(gaPageTable[i*1024+j] & 1) ) {
627                                 gaTmpTable[i*1024+j] = 0;
628                                 continue;
629                         }
630                         
631                         // We don't care about other kernel stacks
632                         if( ((i*1024+j)*4096 & ~(KERNEL_STACK_SIZE-1)) != kStackBase ) {
633                                 gaTmpTable[i*1024+j] = 0;
634                                 continue;
635                         }
636                         
637                         // Allocate page
638                         gaTmpTable[i*1024+j] = MM_AllocPhys() | 3;
639                         
640                         MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF );
641                         
642                         tmp = (void *) MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF );
643                         memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 );
644                         MM_FreeTemp( (Uint)tmp );
645                 }
646         }
647         
648         ret = *gpTmpCR3 & ~0xFFF;
649         Mutex_Release( &glTempFractal );
650         
651         //LEAVE('x', ret);
652         return ret;
653 }
654
655 /**
656  * \fn tVAddr MM_NewKStack(void)
657  * \brief Create a new kernel stack
658  */
659 tVAddr MM_NewKStack(void)
660 {
661         tVAddr  base;
662         Uint    i;
663         for(base = KERNEL_STACKS; base < KERNEL_STACKS_END; base += KERNEL_STACK_SIZE)
664         {
665                 // Check if space is free
666                 if(MM_GetPhysAddr(base) != 0)   continue;
667                 // Allocate
668                 //for(i = KERNEL_STACK_SIZE; i -= 0x1000 ; )
669                 for(i = 0; i < KERNEL_STACK_SIZE; i += 0x1000 )
670                 {
671                         if( MM_Allocate(base+i) == 0 )
672                         {
673                                 // On error, print a warning and return error
674                                 Warning("MM_NewKStack - Out of memory");
675                                 // - Clean up
676                                 //for( i += 0x1000 ; i < KERNEL_STACK_SIZE; i += 0x1000 )
677                                 //      MM_Deallocate(base+i);
678                                 return 0;
679                         }
680                 }
681                 // Success
682                 Log("MM_NewKStack - Allocated %p", base + KERNEL_STACK_SIZE);
683                 return base+KERNEL_STACK_SIZE;
684         }
685         // No stacks left
686         Warning("MM_NewKStack - No address space left");
687         return 0;
688 }
689
690 /**
691  * \fn tVAddr MM_NewWorkerStack()
692  * \brief Creates a new worker stack
693  */
694 tVAddr MM_NewWorkerStack()
695 {
696         Uint    esp, ebp;
697         Uint    oldstack;
698         Uint    base, addr;
699          int    i, j;
700         Uint    *tmpPage;
701         tPAddr  pages[WORKER_STACK_SIZE>>12];
702         
703         // Get the old ESP and EBP
704         __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
705         __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
706         
707         // TODO: Thread safety
708         // Find a free worker stack address
709         for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++)
710         {
711                 // Used block
712                 if( gWorkerStacks[base/32] == -1 ) {
713                         base += 31;     base &= ~31;
714                         base --;        // Counteracted by the base++
715                         continue;
716                 }
717                 // Used stack
718                 if( gWorkerStacks[base/32] & (1 << base) ) {
719                         continue;
720                 }
721                 break;
722         }
723         if(base >= NUM_WORKER_STACKS) {
724                 Warning("Uh-oh! Out of worker stacks");
725                 return 0;
726         }
727         
728         // It's ours now!
729         gWorkerStacks[base/32] |= (1 << base);
730         // Make life easier for later calls
731         giLastUsedWorker = base;
732         // We have one
733         base = WORKER_STACKS + base * WORKER_STACK_SIZE;
734         //Log(" MM_NewWorkerStack: base = 0x%x", base);
735         
736         // Acquire the lock for the temp fractal mappings
737         Mutex_Acquire(&glTempFractal);
738         
739         // Set the temp fractals to TID0's address space
740         *gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
741         //Log(" MM_NewWorkerStack: *gpTmpCR3 = 0x%x", *gpTmpCR3);
742         INVLPG( gaTmpDir );
743         
744         
745         // Check if the directory is mapped (we are assuming that the stacks
746         // will fit neatly in a directory)
747         //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
748         if(gaTmpDir[ base >> 22 ] == 0) {
749                 gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3;
750                 INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] );
751         }
752         
753         // Mapping Time!
754         for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 )
755         //for( addr = WORKER_STACK_SIZE; addr; addr -= 0x1000 )
756         {
757                 pages[ addr >> 12 ] = MM_AllocPhys();
758                 gaTmpTable[ (base + addr) >> 12 ] = pages[addr>>12] | 3;
759         }
760         *gpTmpCR3 = 0;
761         // Release the temp mapping lock
762         Mutex_Release(&glTempFractal);
763         
764         // Copy the old stack
765         oldstack = (esp + KERNEL_STACK_SIZE-1) & ~(KERNEL_STACK_SIZE-1);
766         esp = oldstack - esp;   // ESP as an offset in the stack
767         
768         // Make `base` be the top of the stack
769         base += WORKER_STACK_SIZE;
770         
771         i = (WORKER_STACK_SIZE>>12) - 1;
772         // Copy the contents of the old stack to the new one, altering the addresses
773         // `addr` is refering to bytes from the stack base (mem downwards)
774         for(addr = 0; addr < esp; addr += 0x1000)
775         {
776                 Uint    *stack = (Uint*)( oldstack-(addr+0x1000) );
777                 tmpPage = (void*)MM_MapTemp( pages[i] );
778                 // Copy old stack
779                 for(j = 0; j < 1024; j++)
780                 {
781                         // Possible Stack address?
782                         if(oldstack-esp < stack[j] && stack[j] < oldstack)
783                                 tmpPage[j] = base - (oldstack - stack[j]);
784                         else    // Seems not, best leave it alone
785                                 tmpPage[j] = stack[j];
786                 }
787                 MM_FreeTemp((tVAddr)tmpPage);
788                 i --;
789         }
790         
791         //Log("MM_NewWorkerStack: RETURN 0x%x", base);
792         return base;
793 }
794
795 /**
796  * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
797  * \brief Sets the flags on a page
798  */
799 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
800 {
801         tTabEnt *ent;
802         if( !(gaPageDir[VAddr >> 22] & 1) )     return ;
803         if( !(gaPageTable[VAddr >> 12] & 1) )   return ;
804         
805         ent = &gaPageTable[VAddr >> 12];
806         
807         // Read-Only
808         if( Mask & MM_PFLAG_RO )
809         {
810                 if( Flags & MM_PFLAG_RO ) {
811                         *ent &= ~PF_WRITE;
812                 }
813                 else {
814                         gaPageDir[VAddr >> 22] |= PF_WRITE;
815                         *ent |= PF_WRITE;
816                 }
817         }
818         
819         // Kernel
820         if( Mask & MM_PFLAG_KERNEL )
821         {
822                 if( Flags & MM_PFLAG_KERNEL ) {
823                         *ent &= ~PF_USER;
824                 }
825                 else {
826                         gaPageDir[VAddr >> 22] |= PF_USER;
827                         *ent |= PF_USER;
828                 }
829         }
830         
831         // Copy-On-Write
832         if( Mask & MM_PFLAG_COW )
833         {
834                 if( Flags & MM_PFLAG_COW ) {
835                         *ent &= ~PF_WRITE;
836                         *ent |= PF_COW;
837                 }
838                 else {
839                         *ent &= ~PF_COW;
840                         *ent |= PF_WRITE;
841                 }
842         }
843         
844         //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
845         //      *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
846 }
847
848 /**
849  * \brief Get the flags on a page
850  */
851 Uint MM_GetFlags(tVAddr VAddr)
852 {
853         tTabEnt *ent;
854         Uint    ret = 0;
855         
856         // Validity Check
857         if( !(gaPageDir[VAddr >> 22] & 1) )     return 0;
858         if( !(gaPageTable[VAddr >> 12] & 1) )   return 0;
859         
860         ent = &gaPageTable[VAddr >> 12];
861         
862         // Read-Only
863         if( !(*ent & PF_WRITE) )        ret |= MM_PFLAG_RO;
864         // Kernel
865         if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
866         // Copy-On-Write
867         if( *ent & PF_COW )     ret |= MM_PFLAG_COW;
868         
869         return ret;
870 }
871
872 /**
873  * \fn tPAddr MM_DuplicatePage(tVAddr VAddr)
874  * \brief Duplicates a virtual page to a physical one
875  */
876 tPAddr MM_DuplicatePage(tVAddr VAddr)
877 {
878         tPAddr  ret;
879         Uint    temp;
880          int    wasRO = 0;
881         
882         //ENTER("xVAddr", VAddr);
883         
884         // Check if mapped
885         if( !(gaPageDir  [VAddr >> 22] & PF_PRESENT) )  return 0;
886         if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) )  return 0;
887         
888         // Page Align
889         VAddr &= ~0xFFF;
890         
891         // Allocate new page
892         ret = MM_AllocPhys();
893         
894         // Write-lock the page (to keep data constistent), saving its R/W state
895         wasRO = (gaPageTable[VAddr >> 12] & PF_WRITE ? 0 : 1);
896         gaPageTable[VAddr >> 12] &= ~PF_WRITE;
897         INVLPG( VAddr );
898         
899         // Copy Data
900         temp = MM_MapTemp(ret);
901         memcpy( (void*)temp, (void*)VAddr, 0x1000 );
902         MM_FreeTemp(temp);
903         
904         // Restore Writeable status
905         if(!wasRO)      gaPageTable[VAddr >> 12] |= PF_WRITE;
906         INVLPG(VAddr);
907         
908         //LEAVE('X', ret);
909         return ret;
910 }
911
912 /**
913  * \fn Uint MM_MapTemp(tPAddr PAddr)
914  * \brief Create a temporary memory mapping
915  * \todo Show Luigi Barone (C Lecturer) and see what he thinks
916  */
917 tVAddr MM_MapTemp(tPAddr PAddr)
918 {
919          int    i;
920         
921         //ENTER("XPAddr", PAddr);
922         
923         PAddr &= ~0xFFF;
924         
925         //LOG("glTempMappings = %i", glTempMappings);
926         
927         for(;;)
928         {
929                 Mutex_Acquire( &glTempMappings );
930                 
931                 for( i = 0; i < NUM_TEMP_PAGES; i ++ )
932                 {
933                         // Check if page used
934                         if(gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] & 1)        continue;
935                         // Mark as used
936                         gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
937                         INVLPG( TEMP_MAP_ADDR + (i << 12) );
938                         //LEAVE('p', TEMP_MAP_ADDR + (i << 12));
939                         Mutex_Release( &glTempMappings );
940                         return TEMP_MAP_ADDR + (i << 12);
941                 }
942                 Mutex_Release( &glTempMappings );
943                 Threads_Yield();        // TODO: Use a sleep queue here instead
944         }
945 }
946
947 /**
948  * \fn void MM_FreeTemp(tVAddr PAddr)
949  * \brief Free's a temp mapping
950  */
951 void MM_FreeTemp(tVAddr VAddr)
952 {
953          int    i = VAddr >> 12;
954         //ENTER("xVAddr", VAddr);
955         
956         if(i >= (TEMP_MAP_ADDR >> 12))
957                 gaPageTable[ i ] = 0;
958         
959         //LEAVE('-');
960 }
961
962 /**
963  * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
964  * \brief Allocates a contigous number of pages
965  */
966 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
967 {
968          int    i, j;
969         
970         PAddr &= ~0xFFF;
971         
972         // Scan List
973         for( i = 0; i < NUM_HW_PAGES; i ++ )
974         {               
975                 // Check if addr used
976                 if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 )
977                         continue;
978                 
979                 // Check possible region
980                 for( j = 0; j < Number && i + j < NUM_HW_PAGES; j ++ )
981                 {
982                         // If there is an allocated page in the region we are testing, break
983                         if( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & 1 )    break;
984                 }
985                 // Is it all free?
986                 if( j == Number )
987                 {
988                         // Allocate
989                         for( j = 0; j < Number; j++ ) {
990                                 MM_RefPhys( PAddr + (j<<12) );
991                                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3;
992                         }
993                         return HW_MAP_ADDR + (i<<12);
994                 }
995         }
996         // If we don't find any, return NULL
997         return 0;
998 }
999
1000 /**
1001  * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1002  * \brief Allocates DMA physical memory
1003  * \param Pages Number of pages required
1004  * \param MaxBits       Maximum number of bits the physical address can have
1005  * \param PhysAddr      Pointer to the location to place the physical address allocated
1006  * \return Virtual address allocate
1007  */
1008 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1009 {
1010         tPAddr  maxCheck = (1 << MaxBits);
1011         tPAddr  phys;
1012         tVAddr  ret;
1013         
1014         ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr);
1015         
1016         // Sanity Check
1017         if(MaxBits < 12 || !PhysAddr) {
1018                 LEAVE('i', 0);
1019                 return 0;
1020         }
1021         
1022         // Bound
1023         if(MaxBits >= PHYS_BITS)        maxCheck = -1;
1024         
1025         // Fast Allocate
1026         if(Pages == 1 && MaxBits >= PHYS_BITS)
1027         {
1028                 phys = MM_AllocPhys();
1029                 *PhysAddr = phys;
1030                 ret = MM_MapHWPages(phys, 1);
1031                 if(ret == 0) {
1032                         MM_DerefPhys(phys);
1033                         LEAVE('i', 0);
1034                         return 0;
1035                 }
1036                 LEAVE('x', ret);
1037                 return ret;
1038         }
1039         
1040         // Slow Allocate
1041         phys = MM_AllocPhysRange(Pages, MaxBits);
1042         // - Was it allocated?
1043         if(phys == 0) {
1044                 LEAVE('i', 0);
1045                 return 0;
1046         }
1047         
1048         // Allocated successfully, now map
1049         ret = MM_MapHWPages(phys, Pages);
1050         if( ret == 0 ) {
1051                 // If it didn't map, free then return 0
1052                 for(;Pages--;phys+=0x1000)
1053                         MM_DerefPhys(phys);
1054                 LEAVE('i', 0);
1055                 return 0;
1056         }
1057         
1058         *PhysAddr = phys;
1059         LEAVE('x', ret);
1060         return ret;
1061 }
1062
1063 /**
1064  * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1065  * \brief Unmap a hardware page
1066  */
1067 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1068 {
1069          int    i, j;
1070         
1071         //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
1072         
1073         // Sanity Check
1074         if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX)     return;
1075         
1076         i = VAddr >> 12;
1077         
1078         Mutex_Acquire( &glTempMappings );       // Temp and HW share a directory, so they share a lock
1079         
1080         for( j = 0; j < Number; j++ )
1081         {
1082                 MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
1083                 gaPageTable[ i + j ] = 0;
1084         }
1085         
1086         Mutex_Release( &glTempMappings );
1087 }
1088
1089 // --- EXPORTS ---
1090 EXPORT(MM_GetPhysAddr);
1091 EXPORT(MM_Map);
1092 //EXPORT(MM_Unmap);
1093 EXPORT(MM_MapHWPages);
1094 EXPORT(MM_AllocDMA);
1095 EXPORT(MM_UnmapHWPages);

UCC git Repository :: git.ucc.asn.au