Kernel - Fixing very slow CheckMem function
[tpg/acess2.git] / Kernel / arch / x86 / mm_virt.c
1 /*
2  * AcessOS Microkernel Version
3  * mm_virt.c
4  * 
5  * Memory Map
6  * 0xE0 - Kernel Base
7  * 0xF0 - Kernel Stacks
8  * 0xFD - Fractals
9  * 0xFE - Unused
10  * 0xFF - System Calls / Kernel's User Code
11  */
12 #define DEBUG   0
13 #define SANITY  1
14 #include <acess.h>
15 #include <mm_virt.h>
16 #include <mm_phys.h>
17 #include <proc.h>
18 #include <hal_proc.h>
19
20 #define TAB     22
21
22 #define KERNEL_STACKS           0xF0000000
23 #define KERNEL_STACK_SIZE       0x00008000
24 #define KERNEL_STACKS_END       0xFC000000
25 #define WORKER_STACKS           0x00100000      // Thread0 Only!
26 #define WORKER_STACK_SIZE       KERNEL_STACK_SIZE
27 #define WORKER_STACKS_END       0xB0000000
28 #define NUM_WORKER_STACKS       ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
29
30 #define PAE_PAGE_TABLE_ADDR     0xFC000000      // 16 MiB
31 #define PAE_PAGE_DIR_ADDR       0xFCFC0000      // 16 KiB
32 #define PAE_PAGE_PDPT_ADDR      0xFCFC3F00      // 32 bytes
33 #define PAE_TMP_PDPT_ADDR       0xFCFC3F20      // 32 bytes
34 #define PAE_TMP_DIR_ADDR        0xFCFE0000      // 16 KiB
35 #define PAE_TMP_TABLE_ADDR      0xFD000000      // 16 MiB
36
37 #define PAGE_TABLE_ADDR 0xFC000000
38 #define PAGE_DIR_ADDR   0xFC3F0000
39 #define PAGE_CR3_ADDR   0xFC3F0FC0
40 #define TMP_CR3_ADDR    0xFC3F0FC4      // Part of core instead of temp
41 #define TMP_DIR_ADDR    0xFC3F1000      // Same
42 #define TMP_TABLE_ADDR  0xFC400000
43
44 #define HW_MAP_ADDR             0xFE000000
45 #define HW_MAP_MAX              0xFFEF0000
46 #define NUM_HW_PAGES    ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
47 #define TEMP_MAP_ADDR   0xFFEF0000      // Allows 16 "temp" pages
48 #define NUM_TEMP_PAGES  16
49 #define LAST_BLOCK_ADDR 0xFFFF0000      // Free space for kernel provided user code/ *(-1) protection
50
51 #define PF_PRESENT      0x1
52 #define PF_WRITE        0x2
53 #define PF_USER         0x4
54 #define PF_GLOBAL       0x80
55 #define PF_COW          0x200
56 #define PF_NOPAGE       0x400
57
58 #define INVLPG(addr)    __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
59
60 typedef Uint32  tTabEnt;
61
62 // === IMPORTS ===
63 extern char     _UsertextEnd[], _UsertextBase[];
64 extern Uint32   gaInitPageDir[1024];
65 extern Uint32   gaInitPageTable[1024];
66 extern void     Threads_SegFault(tVAddr Addr);
67 extern void     Error_Backtrace(Uint eip, Uint ebp);
68
69 // === PROTOTYPES ===
70 void    MM_PreinitVirtual(void);
71 void    MM_InstallVirtual(void);
72 void    MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
73 //void  MM_DumpTables(tVAddr Start, tVAddr End);
74 //void  MM_ClearUser(void);
75 tPAddr  MM_DuplicatePage(tVAddr VAddr);
76
77 // === GLOBALS ===
78 #define gaPageTable     ((tTabEnt*)PAGE_TABLE_ADDR)
79 #define gaPageDir       ((tTabEnt*)PAGE_DIR_ADDR)
80 #define gaTmpTable      ((tTabEnt*)TMP_TABLE_ADDR)
81 #define gaTmpDir        ((tTabEnt*)TMP_DIR_ADDR)
82 #define gpPageCR3       ((tTabEnt*)PAGE_CR3_ADDR)
83 #define gpTmpCR3        ((tTabEnt*)TMP_CR3_ADDR)
84
85 #define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
86 #define gaPAE_PageDir   ((tTabEnt*)PAE_PAGE_DIR_ADDR)
87 #define gaPAE_MainPDPT  ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
88 #define gaPAE_TmpTable  ((tTabEnt*)PAE_TMP_DIR_ADDR)
89 #define gaPAE_TmpDir    ((tTabEnt*)PAE_TMP_DIR_ADDR)
90 #define gaPAE_TmpPDPT   ((tTabEnt*)PAE_TMP_PDPT_ADDR)
91  int    gbUsePAE = 0;
92 tMutex  glTempMappings;
93 tMutex  glTempFractal;
94 Uint32  gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
95  int    giLastUsedWorker = 0;
96 struct sPageInfo {
97         void    *Node;
98         tVAddr  Base;
99         Uint64  Offset;
100          int    Length;
101          int    Flags;
102 }       *gaMappedRegions;       // sizeof = 24 bytes
103
104 // === CODE ===
105 /**
106  * \fn void MM_PreinitVirtual(void)
107  * \brief Maps the fractal mappings
108  */
109 void MM_PreinitVirtual(void)
110 {
111         gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
112         INVLPG( PAGE_TABLE_ADDR );
113 }
114
115 /**
116  * \fn void MM_InstallVirtual(void)
117  * \brief Sets up the constant page mappings
118  */
119 void MM_InstallVirtual(void)
120 {
121          int    i;
122         
123         // --- Pre-Allocate kernel tables
124         for( i = KERNEL_BASE>>22; i < 1024; i ++ )
125         {
126                 if( gaPageDir[ i ] )    continue;
127                 // Skip stack tables, they are process unique
128                 if( i > KERNEL_STACKS >> 22 && i < KERNEL_STACKS_END >> 22) {
129                         gaPageDir[ i ] = 0;
130                         continue;
131                 }
132                 // Preallocate table
133                 gaPageDir[ i ] = MM_AllocPhys() | 3;
134                 INVLPG( &gaPageTable[i*1024] );
135                 memset( &gaPageTable[i*1024], 0, 0x1000 );
136         }
137         
138         // Unset kernel on the User Text pages
139         for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
140                 MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
141         }
142 }
143
144 /**
145  * \brief Cleans up the SMP required mappings
146  */
147 void MM_FinishVirtualInit(void)
148 {
149         gaInitPageDir[ 0 ] = 0;
150 }
151
152 /**
153  * \fn void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
154  * \brief Called on a page fault
155  */
156 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
157 {
158         //ENTER("xAddr bErrorCode", Addr, ErrorCode);
159         
160         // -- Check for COW --
161         if( gaPageDir  [Addr>>22] & PF_PRESENT  && gaPageTable[Addr>>12] & PF_PRESENT
162          && gaPageTable[Addr>>12] & PF_COW )
163         {
164                 tPAddr  paddr;
165                 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
166                 {
167                         gaPageTable[Addr>>12] &= ~PF_COW;
168                         gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
169                 }
170                 else
171                 {
172                         //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
173                         paddr = MM_DuplicatePage( Addr );
174                         MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
175                         gaPageTable[Addr>>12] &= PF_USER;
176                         gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
177                 }
178                 
179 //              Log_Debug("MMVirt", "COW for %p (%P)", Addr, gaPageTable[Addr>>12]);
180                 
181                 INVLPG( Addr & ~0xFFF );
182                 return;
183         }
184         
185         __asm__ __volatile__ ("pushf; andw $0xFEFF, 0(%esp); popf");
186         Proc_GetCurThread()->bInstrTrace = 0;
187
188         // If it was a user, tell the thread handler
189         if(ErrorCode & 4) {
190                 Log_Warning("MMVirt", "User %s %s memory%s",
191                         (ErrorCode&2?"write to":"read from"),
192                         (ErrorCode&1?"bad/locked":"non-present"),
193                         (ErrorCode&16?" (Instruction Fetch)":"")
194                         );
195                 Log_Warning("MMVirt", "Instruction %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
196                 __asm__ __volatile__ ("sti");   // Restart IRQs
197                 #if 1
198                 Error_Backtrace(Regs->eip, Regs->ebp);
199                 #endif
200                 Threads_SegFault(Addr);
201                 return ;
202         }
203         
204         Debug_KernelPanic();
205         
206         // -- Check Error Code --
207         if(ErrorCode & 8)
208                 Warning("Reserved Bits Trashed!");
209         else
210         {
211                 Warning("Kernel %s %s memory%s",
212                         (ErrorCode&2?"write to":"read from"),
213                         (ErrorCode&1?"bad/locked":"non-present"),
214                         (ErrorCode&16?" (Instruction Fetch)":"")
215                         );
216         }
217         
218         Log("Code at %p accessed %p", Regs->eip, Addr);
219         // Print Stack Backtrace
220         Error_Backtrace(Regs->eip, Regs->ebp);
221         
222         Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
223         if( gaPageDir[Addr>>22] & PF_PRESENT )
224                 Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
225         
226         //MM_DumpTables(0, -1); 
227         
228         // Register Dump
229         Log("EAX %08x ECX %08x EDX %08x EBX %08x", Regs->eax, Regs->ecx, Regs->edx, Regs->ebx);
230         Log("ESP %08x EBP %08x ESI %08x EDI %08x", Regs->esp, Regs->ebp, Regs->esi, Regs->edi);
231         //Log("SS:ESP %04x:%08x", Regs->ss, Regs->esp);
232         Log("CS:EIP %04x:%08x", Regs->cs, Regs->eip);
233         Log("DS %04x ES %04x FS %04x GS %04x", Regs->ds, Regs->es, Regs->fs, Regs->gs);
234         {
235                 Uint    dr0, dr1;
236                 __ASM__ ("mov %%dr0, %0":"=r"(dr0):);
237                 __ASM__ ("mov %%dr1, %0":"=r"(dr1):);
238                 Log("DR0 %08x DR1 %08x", dr0, dr1);
239         }
240         
241         Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
242 }
243
244 /**
245  * \fn void MM_DumpTables(tVAddr Start, tVAddr End)
246  * \brief Dumps the layout of the page tables
247  */
248 void MM_DumpTables(tVAddr Start, tVAddr End)
249 {
250         tVAddr  rangeStart = 0;
251         tPAddr  expected = 0;
252         void    *expected_node = NULL, *tmpnode = NULL;
253         tVAddr  curPos;
254         Uint    page;
255         const tPAddr    MASK = ~0xF78;
256         
257         Start >>= 12;   End >>= 12;
258         
259         #if 0
260         Log("Directory Entries:");
261         for(page = Start >> 10;
262                 page < (End >> 10)+1;
263                 page ++)
264         {
265                 if(gaPageDir[page])
266                 {
267                         Log(" 0x%08x-0x%08x :: 0x%08x",
268                                 page<<22, ((page+1)<<22)-1,
269                                 gaPageDir[page]&~0xFFF
270                                 );
271                 }
272         }
273         #endif
274         
275         Log("Table Entries:");
276         for(page = Start, curPos = Start<<12;
277                 page < End;
278                 curPos += 0x1000, page++)
279         {
280                 if( !(gaPageDir[curPos>>22] & PF_PRESENT)
281                 ||  !(gaPageTable[page] & PF_PRESENT)
282                 ||  (gaPageTable[page] & MASK) != expected
283                 ||  (tmpnode=NULL,MM_GetPageNode(expected, &tmpnode), tmpnode != expected_node))
284                 {
285                         if(expected) {
286                                 tPAddr  orig = gaPageTable[rangeStart>>12];
287                                 Log(" 0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
288                                         rangeStart,
289                                         orig & ~0xFFF,
290                                         curPos - rangeStart,
291                                         (orig & PF_NOPAGE ? "P" : "-"),
292                                         (orig & PF_COW ? "C" : "-"),
293                                         (orig & PF_GLOBAL ? "G" : "-"),
294                                         (orig & PF_USER ? "U" : "-"),
295                                         (orig & PF_WRITE ? "W" : "-"),
296                                         expected_node
297                                         );
298                                 expected = 0;
299                         }
300                         if( !(gaPageDir[curPos>>22] & PF_PRESENT) )     continue;
301                         if( !(gaPageTable[curPos>>12] & PF_PRESENT) )   continue;
302                         
303                         expected = (gaPageTable[page] & MASK);
304                         MM_GetPageNode(expected, &expected_node);
305                         rangeStart = curPos;
306                 }
307                 if(expected)    expected += 0x1000;
308         }
309         
310         if(expected) {
311                 tPAddr  orig = gaPageTable[rangeStart>>12];
312                 Log("0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
313                         rangeStart,
314                         orig & ~0xFFF,
315                         curPos - rangeStart,
316                         (orig & PF_NOPAGE ? "p" : "-"),
317                         (orig & PF_COW ? "C" : "-"),
318                         (orig & PF_GLOBAL ? "G" : "-"),
319                         (orig & PF_USER ? "U" : "-"),
320                         (orig & PF_WRITE ? "W" : "-"),
321                         expected_node
322                         );
323                 expected = 0;
324         }
325 }
326
327 /**
328  * \fn tPAddr MM_Allocate(tVAddr VAddr)
329  */
330 tPAddr MM_Allocate(tVAddr VAddr)
331 {
332         tPAddr  paddr;
333         //ENTER("xVAddr", VAddr);
334         //__asm__ __volatile__ ("xchg %bx,%bx");
335         // Check if the directory is mapped
336         if( gaPageDir[ VAddr >> 22 ] == 0 )
337         {
338                 // Allocate directory
339                 paddr = MM_AllocPhys();
340                 if( paddr == 0 ) {
341                         Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
342                         //LEAVE('i',0);
343                         return 0;
344                 }
345                 // Map and mark as user (if needed)
346                 gaPageDir[ VAddr >> 22 ] = paddr | 3;
347                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
348                 
349                 INVLPG( &gaPageDir[ VAddr >> 22 ] );
350                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
351         }
352         // Check if the page is already allocated
353         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
354                 Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
355                 //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
356                 return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
357         }
358         
359         // Allocate
360         paddr = MM_AllocPhys();
361         //LOG("paddr = 0x%llx", paddr);
362         if( paddr == 0 ) {
363                 Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
364                         VAddr, __builtin_return_address(0));
365                 //LEAVE('i',0);
366                 return 0;
367         }
368         // Map
369         gaPageTable[ VAddr >> 12 ] = paddr | 3;
370         // Mark as user
371         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
372         // Invalidate Cache for address
373         INVLPG( VAddr & ~0xFFF );
374         
375         //LEAVE('X', paddr);
376         return paddr;
377 }
378
379 /**
380  * \fn void MM_Deallocate(tVAddr VAddr)
381  */
382 void MM_Deallocate(tVAddr VAddr)
383 {
384         if( gaPageDir[ VAddr >> 22 ] == 0 ) {
385                 Warning("MM_Deallocate - Directory not mapped");
386                 return;
387         }
388         
389         if(gaPageTable[ VAddr >> 12 ] == 0) {
390                 Warning("MM_Deallocate - Page is not allocated");
391                 return;
392         }
393         
394         // Dereference page
395         MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
396         // Clear page
397         gaPageTable[ VAddr >> 12 ] = 0;
398 }
399
400 /**
401  * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
402  * \brief Checks if the passed address is accesable
403  */
404 tPAddr MM_GetPhysAddr(tVAddr Addr)
405 {
406         if( !(gaPageDir[Addr >> 22] & 1) )
407                 return 0;
408         if( !(gaPageTable[Addr >> 12] & 1) )
409                 return 0;
410         return (gaPageTable[Addr >> 12] & ~0xFFF) | (Addr & 0xFFF);
411 }
412
413 /**
414  * \fn void MM_SetCR3(Uint CR3)
415  * \brief Sets the current process space
416  */
417 void MM_SetCR3(Uint CR3)
418 {
419         __asm__ __volatile__ ("mov %0, %%cr3"::"r"(CR3));
420 }
421
422 /**
423  * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
424  * \brief Map a physical page to a virtual one
425  */
426 int MM_Map(tVAddr VAddr, tPAddr PAddr)
427 {
428         //ENTER("xVAddr xPAddr", VAddr, PAddr);
429         // Sanity check
430         if( PAddr & 0xFFF || VAddr & 0xFFF ) {
431                 Warning("MM_Map - Physical or Virtual Addresses are not aligned");
432                 //LEAVE('i', 0);
433                 return 0;
434         }
435         
436         // Align addresses
437         PAddr &= ~0xFFF;        VAddr &= ~0xFFF;
438         
439         // Check if the directory is mapped
440         if( gaPageDir[ VAddr >> 22 ] == 0 )
441         {
442                 tPAddr  tmp = MM_AllocPhys();
443                 if( tmp == 0 )
444                         return 0;
445                 gaPageDir[ VAddr >> 22 ] = tmp | 3;
446                 
447                 // Mark as user
448                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
449                 
450                 INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
451                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
452         }
453         // Check if the page is already allocated
454         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
455                 Warning("MM_Map - Allocating to used address");
456                 //LEAVE('i', 0);
457                 return 0;
458         }
459         
460         // Map
461         gaPageTable[ VAddr >> 12 ] = PAddr | 3;
462         // Mark as user
463         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
464         
465         //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
466         //      VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
467         
468         // Reference
469         MM_RefPhys( PAddr );
470         
471         //LOG("INVLPG( 0x%x )", VAddr);
472         INVLPG( VAddr );
473         
474         //LEAVE('i', 1);
475         return 1;
476 }
477
478 /**
479  * \brief Clear user's address space
480  */
481 void MM_ClearUser(void)
482 {
483         Uint    i, j;
484         
485         for( i = 0; i < (MM_USER_MAX>>22); i ++ )
486         {
487                 // Check if directory is not allocated
488                 if( !(gaPageDir[i] & PF_PRESENT) ) {
489                         gaPageDir[i] = 0;
490                         continue;
491                 }
492                 
493                 // Deallocate tables
494                 for( j = 0; j < 1024; j ++ )
495                 {
496                         if( gaPageTable[i*1024+j] & 1 )
497                                 MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF );
498                         gaPageTable[i*1024+j] = 0;
499                 }
500                 
501                 // Deallocate directory
502                 MM_DerefPhys( gaPageDir[i] & ~0xFFF );
503                 gaPageDir[i] = 0;
504                 INVLPG( &gaPageTable[i*1024] );
505         }
506         INVLPG( gaPageDir );
507 }
508
509 /**
510  * \fn tPAddr MM_Clone(void)
511  * \brief Clone the current address space
512  */
513 tPAddr MM_Clone(void)
514 {
515         Uint    i, j;
516         tVAddr  ret;
517         Uint    page = 0;
518         tVAddr  kStackBase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
519         void    *tmp;
520         
521         Mutex_Acquire( &glTempFractal );
522         
523         // Create Directory Table
524         *gpTmpCR3 = MM_AllocPhys() | 3;
525         if( *gpTmpCR3 == 3 ) {
526                 *gpTmpCR3 = 0;
527                 return 0;
528         }
529         INVLPG( gaTmpDir );
530         //LOG("Allocated Directory (%x)", *gpTmpCR3);
531         memsetd( gaTmpDir, 0, 1024 );
532         
533         if( Threads_GetPID() != 0 )
534         {       
535                 // Copy Tables
536                 for( i = 0; i < 768; i ++)
537                 {
538                         // Check if table is allocated
539                         if( !(gaPageDir[i] & PF_PRESENT) ) {
540                                 gaTmpDir[i] = 0;
541                                 page += 1024;
542                                 continue;
543                         }
544                         
545                         // Allocate new table
546                         gaTmpDir[i] = MM_AllocPhys() | (gaPageDir[i] & 7);
547                         INVLPG( &gaTmpTable[page] );
548                         // Fill
549                         for( j = 0; j < 1024; j ++, page++ )
550                         {
551                                 if( !(gaPageTable[page] & PF_PRESENT) ) {
552                                         gaTmpTable[page] = 0;
553                                         continue;
554                                 }
555                                 
556                                 // Refrence old page
557                                 MM_RefPhys( gaPageTable[page] & ~0xFFF );
558                                 // Add to new table
559                                 if(gaPageTable[page] & PF_WRITE) {
560                                         gaTmpTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
561                                         gaPageTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
562                                         INVLPG( page << 12 );
563                                 }
564                                 else
565                                         gaTmpTable[page] = gaPageTable[page];
566                         }
567                 }
568         }
569         
570         // Map in kernel tables (and make fractal mapping)
571         for( i = 768; i < 1024; i ++ )
572         {
573                 // Fractal
574                 if( i == (PAGE_TABLE_ADDR >> 22) ) {
575                         gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
576                         continue;
577                 }
578                 
579                 if( gaPageDir[i] == 0 ) {
580                         gaTmpDir[i] = 0;
581                         continue;
582                 }
583                 
584                 //LOG("gaPageDir[%x/4] = 0x%x", i*4, gaPageDir[i]);
585                 MM_RefPhys( gaPageDir[i] & ~0xFFF );
586                 gaTmpDir[i] = gaPageDir[i];
587         }
588         
589         // Allocate kernel stack
590         for(i = KERNEL_STACKS >> 22;
591                 i < KERNEL_STACKS_END >> 22;
592                 i ++ )
593         {
594                 // Check if directory is allocated
595                 if( (gaPageDir[i] & 1) == 0 ) {
596                         gaTmpDir[i] = 0;
597                         continue;
598                 }               
599                 
600                 // We don't care about other kernel stacks, just the current one
601                 if( i != kStackBase >> 22 ) {
602                         MM_DerefPhys( gaPageDir[i] & ~0xFFF );
603                         gaTmpDir[i] = 0;
604                         continue;
605                 }
606                 
607                 // Create a copy
608                 gaTmpDir[i] = MM_AllocPhys() | 3;
609                 INVLPG( &gaTmpTable[i*1024] );
610                 for( j = 0; j < 1024; j ++ )
611                 {
612                         // Is the page allocated? If not, skip
613                         if( !(gaPageTable[i*1024+j] & 1) ) {
614                                 gaTmpTable[i*1024+j] = 0;
615                                 continue;
616                         }
617                         
618                         // We don't care about other kernel stacks
619                         if( ((i*1024+j)*4096 & ~(KERNEL_STACK_SIZE-1)) != kStackBase ) {
620                                 gaTmpTable[i*1024+j] = 0;
621                                 continue;
622                         }
623                         
624                         // Allocate page
625                         gaTmpTable[i*1024+j] = MM_AllocPhys() | 3;
626                         
627                         MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF );
628                         
629                         tmp = (void *) MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF );
630                         memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 );
631                         MM_FreeTemp( (Uint)tmp );
632                 }
633         }
634         
635         ret = *gpTmpCR3 & ~0xFFF;
636         Mutex_Release( &glTempFractal );
637         
638         //LEAVE('x', ret);
639         return ret;
640 }
641
642 /**
643  * \fn tVAddr MM_NewKStack(void)
644  * \brief Create a new kernel stack
645  */
646 tVAddr MM_NewKStack(void)
647 {
648         tVAddr  base;
649         Uint    i;
650         for(base = KERNEL_STACKS; base < KERNEL_STACKS_END; base += KERNEL_STACK_SIZE)
651         {
652                 // Check if space is free
653                 if(MM_GetPhysAddr(base) != 0)   continue;
654                 // Allocate
655                 //for(i = KERNEL_STACK_SIZE; i -= 0x1000 ; )
656                 for(i = 0; i < KERNEL_STACK_SIZE; i += 0x1000 )
657                 {
658                         if( MM_Allocate(base+i) == 0 )
659                         {
660                                 // On error, print a warning and return error
661                                 Warning("MM_NewKStack - Out of memory");
662                                 // - Clean up
663                                 //for( i += 0x1000 ; i < KERNEL_STACK_SIZE; i += 0x1000 )
664                                 //      MM_Deallocate(base+i);
665                                 return 0;
666                         }
667                 }
668                 // Success
669                 Log("MM_NewKStack - Allocated %p", base + KERNEL_STACK_SIZE);
670                 return base+KERNEL_STACK_SIZE;
671         }
672         // No stacks left
673         Warning("MM_NewKStack - No address space left");
674         return 0;
675 }
676
677 /**
678  * \fn tVAddr MM_NewWorkerStack()
679  * \brief Creates a new worker stack
680  */
681 tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize)
682 {
683         Uint    base, addr;
684         tVAddr  tmpPage;
685         tPAddr  page;
686         
687         // TODO: Thread safety
688         // Find a free worker stack address
689         for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++)
690         {
691                 // Used block
692                 if( gWorkerStacks[base/32] == -1 ) {
693                         base += 31;     base &= ~31;
694                         base --;        // Counteracted by the base++
695                         continue;
696                 }
697                 // Used stack
698                 if( gWorkerStacks[base/32] & (1 << base) ) {
699                         continue;
700                 }
701                 break;
702         }
703         if(base >= NUM_WORKER_STACKS) {
704                 Warning("Uh-oh! Out of worker stacks");
705                 return 0;
706         }
707         
708         // It's ours now!
709         gWorkerStacks[base/32] |= (1 << base);
710         // Make life easier for later calls
711         giLastUsedWorker = base;
712         // We have one
713         base = WORKER_STACKS + base * WORKER_STACK_SIZE;
714         //Log(" MM_NewWorkerStack: base = 0x%x", base);
715         
716         // Acquire the lock for the temp fractal mappings
717         Mutex_Acquire(&glTempFractal);
718         
719         // Set the temp fractals to TID0's address space
720         *gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
721         //Log(" MM_NewWorkerStack: *gpTmpCR3 = 0x%x", *gpTmpCR3);
722         INVLPG( gaTmpDir );
723         
724         
725         // Check if the directory is mapped (we are assuming that the stacks
726         // will fit neatly in a directory)
727         //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
728         if(gaTmpDir[ base >> 22 ] == 0) {
729                 gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3;
730                 INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] );
731         }
732         
733         // Mapping Time!
734         for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 )
735         {
736                 page = MM_AllocPhys();
737                 gaTmpTable[ (base + addr) >> 12 ] = page | 3;
738         }
739         *gpTmpCR3 = 0;
740         // Release the temp mapping lock
741         Mutex_Release(&glTempFractal);
742
743         // NOTE: Max of 1 page
744         // `page` is the last allocated page from the previious for loop
745         tmpPage = MM_MapTemp( page );
746         memcpy( (void*)( tmpPage + (0x1000 - ContentsSize) ), StackContents, ContentsSize);
747         MM_FreeTemp(tmpPage);   
748         
749         //Log("MM_NewWorkerStack: RETURN 0x%x", base);
750         return base + WORKER_STACK_SIZE;
751 }
752
753 /**
754  * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
755  * \brief Sets the flags on a page
756  */
757 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
758 {
759         tTabEnt *ent;
760         if( !(gaPageDir[VAddr >> 22] & 1) )     return ;
761         if( !(gaPageTable[VAddr >> 12] & 1) )   return ;
762         
763         ent = &gaPageTable[VAddr >> 12];
764         
765         // Read-Only
766         if( Mask & MM_PFLAG_RO )
767         {
768                 if( Flags & MM_PFLAG_RO ) {
769                         *ent &= ~PF_WRITE;
770                 }
771                 else {
772                         gaPageDir[VAddr >> 22] |= PF_WRITE;
773                         *ent |= PF_WRITE;
774                 }
775         }
776         
777         // Kernel
778         if( Mask & MM_PFLAG_KERNEL )
779         {
780                 if( Flags & MM_PFLAG_KERNEL ) {
781                         *ent &= ~PF_USER;
782                 }
783                 else {
784                         gaPageDir[VAddr >> 22] |= PF_USER;
785                         *ent |= PF_USER;
786                 }
787         }
788         
789         // Copy-On-Write
790         if( Mask & MM_PFLAG_COW )
791         {
792                 if( Flags & MM_PFLAG_COW ) {
793                         *ent &= ~PF_WRITE;
794                         *ent |= PF_COW;
795                 }
796                 else {
797                         *ent &= ~PF_COW;
798                         *ent |= PF_WRITE;
799                 }
800         }
801         
802         //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
803         //      *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
804 }
805
806 /**
807  * \brief Get the flags on a page
808  */
809 Uint MM_GetFlags(tVAddr VAddr)
810 {
811         tTabEnt *ent;
812         Uint    ret = 0;
813         
814         // Validity Check
815         if( !(gaPageDir[VAddr >> 22] & 1) )     return 0;
816         if( !(gaPageTable[VAddr >> 12] & 1) )   return 0;
817         
818         ent = &gaPageTable[VAddr >> 12];
819         
820         // Read-Only
821         if( !(*ent & PF_WRITE) )        ret |= MM_PFLAG_RO;
822         // Kernel
823         if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
824         // Copy-On-Write
825         if( *ent & PF_COW )     ret |= MM_PFLAG_COW;
826         
827         return ret;
828 }
829
830 /**
831  * \brief Check if the provided buffer is valid
832  * \return Boolean valid
833  */
834 int MM_IsValidBuffer(tVAddr Addr, size_t Size)
835 {
836          int    bIsUser;
837          int    dir, tab;
838
839         Size += Addr & (PAGE_SIZE-1);
840         Addr &= ~(PAGE_SIZE-1);
841
842         dir = Addr >> 22;
843         tab = Addr >> 12;
844         
845 //      Debug("Addr = %p, Size = 0x%x, dir = %i, tab = %i", Addr, Size, dir, tab);
846
847         if( !(gaPageDir[dir] & 1) )     return 0;
848         if( !(gaPageTable[tab] & 1) )   return 0;
849         
850         bIsUser = !!(gaPageTable[tab] & PF_USER);
851
852         while( Size >= PAGE_SIZE )
853         {
854                 if( (tab & 1023) == 0 )
855                 {
856                         dir ++;
857                         if( !(gaPageDir[dir] & 1) )     return 0;
858                 }
859                 
860                 if( !(gaPageTable[tab] & 1) )   return 0;
861                 if( bIsUser && !(gaPageTable[tab] & PF_USER) )  return 0;
862
863                 tab ++;
864                 Size -= PAGE_SIZE;
865         }
866         return 1;
867 }
868
869 /**
870  * \fn tPAddr MM_DuplicatePage(tVAddr VAddr)
871  * \brief Duplicates a virtual page to a physical one
872  */
873 tPAddr MM_DuplicatePage(tVAddr VAddr)
874 {
875         tPAddr  ret;
876         Uint    temp;
877          int    wasRO = 0;
878         
879         //ENTER("xVAddr", VAddr);
880         
881         // Check if mapped
882         if( !(gaPageDir  [VAddr >> 22] & PF_PRESENT) )  return 0;
883         if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) )  return 0;
884         
885         // Page Align
886         VAddr &= ~0xFFF;
887         
888         // Allocate new page
889         ret = MM_AllocPhys();
890         if( !ret ) {
891                 return 0;
892         }
893         
894         // Write-lock the page (to keep data constistent), saving its R/W state
895         wasRO = (gaPageTable[VAddr >> 12] & PF_WRITE ? 0 : 1);
896         gaPageTable[VAddr >> 12] &= ~PF_WRITE;
897         INVLPG( VAddr );
898         
899         // Copy Data
900         temp = MM_MapTemp(ret);
901         memcpy( (void*)temp, (void*)VAddr, 0x1000 );
902         MM_FreeTemp(temp);
903         
904         // Restore Writeable status
905         if(!wasRO)      gaPageTable[VAddr >> 12] |= PF_WRITE;
906         INVLPG(VAddr);
907         
908         //LEAVE('X', ret);
909         return ret;
910 }
911
912 /**
913  * \fn Uint MM_MapTemp(tPAddr PAddr)
914  * \brief Create a temporary memory mapping
915  * \todo Show Luigi Barone (C Lecturer) and see what he thinks
916  */
917 tVAddr MM_MapTemp(tPAddr PAddr)
918 {
919          int    i;
920         
921         //ENTER("XPAddr", PAddr);
922         
923         PAddr &= ~0xFFF;
924         
925         //LOG("glTempMappings = %i", glTempMappings);
926         
927         for(;;)
928         {
929                 Mutex_Acquire( &glTempMappings );
930                 
931                 for( i = 0; i < NUM_TEMP_PAGES; i ++ )
932                 {
933                         // Check if page used
934                         if(gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] & 1)        continue;
935                         // Mark as used
936                         gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
937                         INVLPG( TEMP_MAP_ADDR + (i << 12) );
938                         //LEAVE('p', TEMP_MAP_ADDR + (i << 12));
939                         Mutex_Release( &glTempMappings );
940                         return TEMP_MAP_ADDR + (i << 12);
941                 }
942                 Mutex_Release( &glTempMappings );
943                 Threads_Yield();        // TODO: Use a sleep queue here instead
944         }
945 }
946
947 /**
948  * \fn void MM_FreeTemp(tVAddr PAddr)
949  * \brief Free's a temp mapping
950  */
951 void MM_FreeTemp(tVAddr VAddr)
952 {
953          int    i = VAddr >> 12;
954         //ENTER("xVAddr", VAddr);
955         
956         if(i >= (TEMP_MAP_ADDR >> 12))
957                 gaPageTable[ i ] = 0;
958         
959         //LEAVE('-');
960 }
961
962 /**
963  * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
964  * \brief Allocates a contigous number of pages
965  */
966 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
967 {
968          int    i, j;
969         
970         PAddr &= ~0xFFF;
971         
972         // Scan List
973         for( i = 0; i < NUM_HW_PAGES; i ++ )
974         {               
975                 // Check if addr used
976                 if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 )
977                         continue;
978                 
979                 // Check possible region
980                 for( j = 0; j < Number && i + j < NUM_HW_PAGES; j ++ )
981                 {
982                         // If there is an allocated page in the region we are testing, break
983                         if( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & 1 )    break;
984                 }
985                 // Is it all free?
986                 if( j == Number )
987                 {
988                         // Allocate
989                         for( j = 0; j < Number; j++ ) {
990                                 MM_RefPhys( PAddr + (j<<12) );
991                                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3;
992                         }
993                         return HW_MAP_ADDR + (i<<12);
994                 }
995         }
996         // If we don't find any, return NULL
997         return 0;
998 }
999
1000 /**
1001  * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1002  * \brief Allocates DMA physical memory
1003  * \param Pages Number of pages required
1004  * \param MaxBits       Maximum number of bits the physical address can have
1005  * \param PhysAddr      Pointer to the location to place the physical address allocated
1006  * \return Virtual address allocate
1007  */
1008 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1009 {
1010         tPAddr  maxCheck = (1 << MaxBits);
1011         tPAddr  phys;
1012         tVAddr  ret;
1013         
1014         ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr);
1015         
1016         // Sanity Check
1017         if(MaxBits < 12 || !PhysAddr) {
1018                 LEAVE('i', 0);
1019                 return 0;
1020         }
1021         
1022         // Bound
1023         if(MaxBits >= PHYS_BITS)        maxCheck = -1;
1024         
1025         // Fast Allocate
1026         if(Pages == 1 && MaxBits >= PHYS_BITS)
1027         {
1028                 phys = MM_AllocPhys();
1029                 if( !phys ) {
1030                         *PhysAddr = 0;
1031                         LEAVE_RET('i', 0);
1032                 }
1033                 *PhysAddr = phys;
1034                 ret = MM_MapHWPages(phys, 1);
1035                 if(ret == 0) {
1036                         MM_DerefPhys(phys);
1037                         LEAVE('i', 0);
1038                         return 0;
1039                 }
1040                 LEAVE('x', ret);
1041                 return ret;
1042         }
1043         
1044         // Slow Allocate
1045         phys = MM_AllocPhysRange(Pages, MaxBits);
1046         // - Was it allocated?
1047         if(phys == 0) {
1048                 LEAVE('i', 0);
1049                 return 0;
1050         }
1051         
1052         // Allocated successfully, now map
1053         ret = MM_MapHWPages(phys, Pages);
1054         if( ret == 0 ) {
1055                 // If it didn't map, free then return 0
1056                 for(;Pages--;phys+=0x1000)
1057                         MM_DerefPhys(phys);
1058                 LEAVE('i', 0);
1059                 return 0;
1060         }
1061         
1062         *PhysAddr = phys;
1063         LEAVE('x', ret);
1064         return ret;
1065 }
1066
1067 /**
1068  * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1069  * \brief Unmap a hardware page
1070  */
1071 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1072 {
1073          int    i, j;
1074         
1075         //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
1076         
1077         // Sanity Check
1078         if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX)     return;
1079         
1080         i = VAddr >> 12;
1081         
1082         Mutex_Acquire( &glTempMappings );       // Temp and HW share a directory, so they share a lock
1083         
1084         for( j = 0; j < Number; j++ )
1085         {
1086                 MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
1087                 gaPageTable[ i + j ] = 0;
1088         }
1089         
1090         Mutex_Release( &glTempMappings );
1091 }
1092

UCC git Repository :: git.ucc.asn.au