137240c9cf214c84778ee7f00c9240f62f1a5d13
[tpg/acess2.git] / Kernel / arch / x86 / mm_virt.c
1 /*
2  * AcessOS Microkernel Version
3  * mm_virt.c
4  * 
5  * Memory Map
6  * 0xE0 - Kernel Base
7  * 0xF0 - Kernel Stacks
8  * 0xFD - Fractals
9  * 0xFE - Unused
10  * 0xFF - System Calls / Kernel's User Code
11  */
12 #define DEBUG   0
13 #define SANITY  1
14 #include <acess.h>
15 #include <mm_virt.h>
16 #include <mm_phys.h>
17 #include <proc.h>
18 #include <hal_proc.h>
19
20 #define TAB     22
21
22 #define WORKER_STACKS           0x00100000      // Thread0 Only!
23 #define WORKER_STACK_SIZE       MM_KERNEL_STACK_SIZE
24 #define WORKER_STACKS_END       0xB0000000
25 #define NUM_WORKER_STACKS       ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
26
27 #define PAE_PAGE_TABLE_ADDR     0xFC000000      // 16 MiB
28 #define PAE_PAGE_DIR_ADDR       0xFCFC0000      // 16 KiB
29 #define PAE_PAGE_PDPT_ADDR      0xFCFC3F00      // 32 bytes
30 #define PAE_TMP_PDPT_ADDR       0xFCFC3F20      // 32 bytes
31 #define PAE_TMP_DIR_ADDR        0xFCFE0000      // 16 KiB
32 #define PAE_TMP_TABLE_ADDR      0xFD000000      // 16 MiB
33
34 #define PAGE_TABLE_ADDR 0xFC000000
35 #define PAGE_DIR_ADDR   0xFC3F0000
36 #define PAGE_CR3_ADDR   0xFC3F0FC0
37 #define TMP_CR3_ADDR    0xFC3F0FC4      // Part of core instead of temp
38 #define TMP_DIR_ADDR    0xFC3F1000      // Same
39 #define TMP_TABLE_ADDR  0xFC400000
40
41 #define HW_MAP_ADDR             0xFE000000
42 #define HW_MAP_MAX              0xFFEF0000
43 #define NUM_HW_PAGES    ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
44 #define TEMP_MAP_ADDR   0xFFEF0000      // Allows 16 "temp" pages
45 #define NUM_TEMP_PAGES  16
46 #define LAST_BLOCK_ADDR 0xFFFF0000      // Free space for kernel provided user code/ *(-1) protection
47
48 #define PF_PRESENT      0x1
49 #define PF_WRITE        0x2
50 #define PF_USER         0x4
51 #define PF_GLOBAL       0x80
52 #define PF_COW          0x200
53 #define PF_NOPAGE       0x400
54
55 #define INVLPG(addr)    __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
56
57 typedef Uint32  tTabEnt;
58
59 // === IMPORTS ===
60 extern char     _UsertextEnd[], _UsertextBase[];
61 extern Uint32   gaInitPageDir[1024];
62 extern Uint32   gaInitPageTable[1024];
63 extern void     Threads_SegFault(tVAddr Addr);
64 extern void     Error_Backtrace(Uint eip, Uint ebp);
65
66 // === PROTOTYPES ===
67 void    MM_PreinitVirtual(void);
68 void    MM_InstallVirtual(void);
69 void    MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
70 //void  MM_DumpTables(tVAddr Start, tVAddr End);
71 //void  MM_ClearUser(void);
72 tPAddr  MM_DuplicatePage(tVAddr VAddr);
73
74 // === GLOBALS ===
75 #define gaPageTable     ((tTabEnt*)PAGE_TABLE_ADDR)
76 #define gaPageDir       ((tTabEnt*)PAGE_DIR_ADDR)
77 #define gaTmpTable      ((tTabEnt*)TMP_TABLE_ADDR)
78 #define gaTmpDir        ((tTabEnt*)TMP_DIR_ADDR)
79 #define gpPageCR3       ((tTabEnt*)PAGE_CR3_ADDR)
80 #define gpTmpCR3        ((tTabEnt*)TMP_CR3_ADDR)
81
82 #define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
83 #define gaPAE_PageDir   ((tTabEnt*)PAE_PAGE_DIR_ADDR)
84 #define gaPAE_MainPDPT  ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
85 #define gaPAE_TmpTable  ((tTabEnt*)PAE_TMP_DIR_ADDR)
86 #define gaPAE_TmpDir    ((tTabEnt*)PAE_TMP_DIR_ADDR)
87 #define gaPAE_TmpPDPT   ((tTabEnt*)PAE_TMP_PDPT_ADDR)
88  int    gbUsePAE = 0;
89 tMutex  glTempMappings;
90 tMutex  glTempFractal;
91 Uint32  gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
92  int    giLastUsedWorker = 0;
93 struct sPageInfo {
94         void    *Node;
95         tVAddr  Base;
96         Uint64  Offset;
97          int    Length;
98          int    Flags;
99 }       *gaMappedRegions;       // sizeof = 24 bytes
100
101 // === CODE ===
102 /**
103  * \fn void MM_PreinitVirtual(void)
104  * \brief Maps the fractal mappings
105  */
106 void MM_PreinitVirtual(void)
107 {
108         gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
109         INVLPG( PAGE_TABLE_ADDR );
110 }
111
112 /**
113  * \fn void MM_InstallVirtual(void)
114  * \brief Sets up the constant page mappings
115  */
116 void MM_InstallVirtual(void)
117 {
118          int    i;
119         
120         // --- Pre-Allocate kernel tables
121         for( i = KERNEL_BASE>>22; i < 1024; i ++ )
122         {
123                 if( gaPageDir[ i ] )    continue;
124                 // Skip stack tables, they are process unique
125                 if( i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) {
126                         gaPageDir[ i ] = 0;
127                         continue;
128                 }
129                 // Preallocate table
130                 gaPageDir[ i ] = MM_AllocPhys() | 3;
131                 INVLPG( &gaPageTable[i*1024] );
132                 memset( &gaPageTable[i*1024], 0, 0x1000 );
133         }
134         
135         // Unset kernel on the User Text pages
136         for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
137                 MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
138         }
139 }
140
141 /**
142  * \brief Cleans up the SMP required mappings
143  */
144 void MM_FinishVirtualInit(void)
145 {
146         gaInitPageDir[ 0 ] = 0;
147 }
148
149 /**
150  * \fn void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
151  * \brief Called on a page fault
152  */
153 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
154 {
155         //ENTER("xAddr bErrorCode", Addr, ErrorCode);
156         
157         // -- Check for COW --
158         if( gaPageDir  [Addr>>22] & PF_PRESENT  && gaPageTable[Addr>>12] & PF_PRESENT
159          && gaPageTable[Addr>>12] & PF_COW )
160         {
161                 tPAddr  paddr;
162                 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
163                 {
164                         gaPageTable[Addr>>12] &= ~PF_COW;
165                         gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
166                 }
167                 else
168                 {
169                         //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
170                         paddr = MM_DuplicatePage( Addr );
171                         MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
172                         gaPageTable[Addr>>12] &= PF_USER;
173                         gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
174                 }
175                 
176 //              Log_Debug("MMVirt", "COW for %p (%P)", Addr, gaPageTable[Addr>>12]);
177                 
178                 INVLPG( Addr & ~0xFFF );
179                 return;
180         }
181         
182         __asm__ __volatile__ ("pushf; andw $0xFEFF, 0(%esp); popf");
183         Proc_GetCurThread()->bInstrTrace = 0;
184
185         // If it was a user, tell the thread handler
186         if(ErrorCode & 4) {
187                 Log_Warning("MMVirt", "User %s %s memory%s",
188                         (ErrorCode&2?"write to":"read from"),
189                         (ErrorCode&1?"bad/locked":"non-present"),
190                         (ErrorCode&16?" (Instruction Fetch)":"")
191                         );
192                 Log_Warning("MMVirt", "Instruction %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
193                 __asm__ __volatile__ ("sti");   // Restart IRQs
194                 #if 1
195                 Error_Backtrace(Regs->eip, Regs->ebp);
196                 #endif
197                 Threads_SegFault(Addr);
198                 return ;
199         }
200         
201         Debug_KernelPanic();
202         
203         // -- Check Error Code --
204         if(ErrorCode & 8)
205                 Warning("Reserved Bits Trashed!");
206         else
207         {
208                 Warning("Kernel %s %s memory%s",
209                         (ErrorCode&2?"write to":"read from"),
210                         (ErrorCode&1?"bad/locked":"non-present"),
211                         (ErrorCode&16?" (Instruction Fetch)":"")
212                         );
213         }
214         
215         Log("CPU %i - Code at %p accessed %p", GetCPUNum(), Regs->eip, Addr);
216         // Print Stack Backtrace
217         Error_Backtrace(Regs->eip, Regs->ebp);
218
219         #if 0   
220         Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
221         if( gaPageDir[Addr>>22] & PF_PRESENT )
222                 Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
223         #endif
224         //MM_DumpTables(0, -1); 
225         
226         // Register Dump
227         Log("EAX %08x ECX %08x EDX %08x EBX %08x", Regs->eax, Regs->ecx, Regs->edx, Regs->ebx);
228         Log("ESP %08x EBP %08x ESI %08x EDI %08x", Regs->esp, Regs->ebp, Regs->esi, Regs->edi);
229         //Log("SS:ESP %04x:%08x", Regs->ss, Regs->esp);
230         Log("CS:EIP %04x:%08x", Regs->cs, Regs->eip);
231         Log("DS %04x ES %04x FS %04x GS %04x", Regs->ds, Regs->es, Regs->fs, Regs->gs);
232         {
233                 Uint    dr0, dr1;
234                 __ASM__ ("mov %%dr0, %0":"=r"(dr0):);
235                 __ASM__ ("mov %%dr1, %0":"=r"(dr1):);
236                 Log("DR0 %08x DR1 %08x", dr0, dr1);
237         }
238         
239         Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
240 }
241
242 /**
243  * \fn void MM_DumpTables(tVAddr Start, tVAddr End)
244  * \brief Dumps the layout of the page tables
245  */
246 void MM_DumpTables(tVAddr Start, tVAddr End)
247 {
248         tVAddr  rangeStart = 0;
249         tPAddr  expected = 0;
250         void    *expected_node = NULL, *tmpnode = NULL;
251         tVAddr  curPos;
252         Uint    page;
253         const tPAddr    MASK = ~0xF78;
254         
255         Start >>= 12;   End >>= 12;
256         
257         #if 0
258         Log("Directory Entries:");
259         for(page = Start >> 10;
260                 page < (End >> 10)+1;
261                 page ++)
262         {
263                 if(gaPageDir[page])
264                 {
265                         Log(" 0x%08x-0x%08x :: 0x%08x",
266                                 page<<22, ((page+1)<<22)-1,
267                                 gaPageDir[page]&~0xFFF
268                                 );
269                 }
270         }
271         #endif
272         
273         Log("Table Entries:");
274         for(page = Start, curPos = Start<<12;
275                 page < End;
276                 curPos += 0x1000, page++)
277         {
278                 if( !(gaPageDir[curPos>>22] & PF_PRESENT)
279                 ||  !(gaPageTable[page] & PF_PRESENT)
280                 ||  (gaPageTable[page] & MASK) != expected
281                 ||  (tmpnode=NULL,MM_GetPageNode(expected, &tmpnode), tmpnode != expected_node))
282                 {
283                         if(expected) {
284                                 tPAddr  orig = gaPageTable[rangeStart>>12];
285                                 Log(" 0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
286                                         rangeStart,
287                                         orig & ~0xFFF,
288                                         curPos - rangeStart,
289                                         (orig & PF_NOPAGE ? "P" : "-"),
290                                         (orig & PF_COW ? "C" : "-"),
291                                         (orig & PF_GLOBAL ? "G" : "-"),
292                                         (orig & PF_USER ? "U" : "-"),
293                                         (orig & PF_WRITE ? "W" : "-"),
294                                         expected_node
295                                         );
296                                 expected = 0;
297                         }
298                         if( !(gaPageDir[curPos>>22] & PF_PRESENT) )     continue;
299                         if( !(gaPageTable[curPos>>12] & PF_PRESENT) )   continue;
300                         
301                         expected = (gaPageTable[page] & MASK);
302                         MM_GetPageNode(expected, &expected_node);
303                         rangeStart = curPos;
304                 }
305                 if(expected)    expected += 0x1000;
306         }
307         
308         if(expected) {
309                 tPAddr  orig = gaPageTable[rangeStart>>12];
310                 Log("0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
311                         rangeStart,
312                         orig & ~0xFFF,
313                         curPos - rangeStart,
314                         (orig & PF_NOPAGE ? "p" : "-"),
315                         (orig & PF_COW ? "C" : "-"),
316                         (orig & PF_GLOBAL ? "G" : "-"),
317                         (orig & PF_USER ? "U" : "-"),
318                         (orig & PF_WRITE ? "W" : "-"),
319                         expected_node
320                         );
321                 expected = 0;
322         }
323 }
324
325 /**
326  * \fn tPAddr MM_Allocate(tVAddr VAddr)
327  */
328 tPAddr MM_Allocate(tVAddr VAddr)
329 {
330         tPAddr  paddr;
331         //ENTER("xVAddr", VAddr);
332         //__asm__ __volatile__ ("xchg %bx,%bx");
333         // Check if the directory is mapped
334         if( gaPageDir[ VAddr >> 22 ] == 0 )
335         {
336                 // Allocate directory
337                 paddr = MM_AllocPhys();
338                 if( paddr == 0 ) {
339                         Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
340                         //LEAVE('i',0);
341                         return 0;
342                 }
343                 // Map and mark as user (if needed)
344                 gaPageDir[ VAddr >> 22 ] = paddr | 3;
345                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
346                 
347                 INVLPG( &gaPageDir[ VAddr >> 22 ] );
348                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
349         }
350         // Check if the page is already allocated
351         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
352                 Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
353                 //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
354                 return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
355         }
356         
357         // Allocate
358         paddr = MM_AllocPhys();
359         //LOG("paddr = 0x%llx", paddr);
360         if( paddr == 0 ) {
361                 Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
362                         VAddr, __builtin_return_address(0));
363                 //LEAVE('i',0);
364                 return 0;
365         }
366         // Map
367         gaPageTable[ VAddr >> 12 ] = paddr | 3;
368         // Mark as user
369         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
370         // Invalidate Cache for address
371         INVLPG( VAddr & ~0xFFF );
372         
373         //LEAVE('X', paddr);
374         return paddr;
375 }
376
377 /**
378  * \fn void MM_Deallocate(tVAddr VAddr)
379  */
380 void MM_Deallocate(tVAddr VAddr)
381 {
382         if( gaPageDir[ VAddr >> 22 ] == 0 ) {
383                 Warning("MM_Deallocate - Directory not mapped");
384                 return;
385         }
386         
387         if(gaPageTable[ VAddr >> 12 ] == 0) {
388                 Warning("MM_Deallocate - Page is not allocated");
389                 return;
390         }
391         
392         // Dereference page
393         MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
394         // Clear page
395         gaPageTable[ VAddr >> 12 ] = 0;
396 }
397
398 /**
399  * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
400  * \brief Checks if the passed address is accesable
401  */
402 tPAddr MM_GetPhysAddr(tVAddr Addr)
403 {
404         if( !(gaPageDir[Addr >> 22] & 1) )
405                 return 0;
406         if( !(gaPageTable[Addr >> 12] & 1) )
407                 return 0;
408         return (gaPageTable[Addr >> 12] & ~0xFFF) | (Addr & 0xFFF);
409 }
410
411 /**
412  * \fn void MM_SetCR3(Uint CR3)
413  * \brief Sets the current process space
414  */
415 void MM_SetCR3(Uint CR3)
416 {
417         __asm__ __volatile__ ("mov %0, %%cr3"::"r"(CR3));
418 }
419
420 /**
421  * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
422  * \brief Map a physical page to a virtual one
423  */
424 int MM_Map(tVAddr VAddr, tPAddr PAddr)
425 {
426         //ENTER("xVAddr xPAddr", VAddr, PAddr);
427         // Sanity check
428         if( PAddr & 0xFFF || VAddr & 0xFFF ) {
429                 Warning("MM_Map - Physical or Virtual Addresses are not aligned");
430                 //LEAVE('i', 0);
431                 return 0;
432         }
433         
434         // Align addresses
435         PAddr &= ~0xFFF;        VAddr &= ~0xFFF;
436         
437         // Check if the directory is mapped
438         if( gaPageDir[ VAddr >> 22 ] == 0 )
439         {
440                 tPAddr  tmp = MM_AllocPhys();
441                 if( tmp == 0 )
442                         return 0;
443                 gaPageDir[ VAddr >> 22 ] = tmp | 3;
444                 
445                 // Mark as user
446                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
447                 
448                 INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
449                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
450         }
451         // Check if the page is already allocated
452         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
453                 Warning("MM_Map - Allocating to used address");
454                 //LEAVE('i', 0);
455                 return 0;
456         }
457         
458         // Map
459         gaPageTable[ VAddr >> 12 ] = PAddr | 3;
460         // Mark as user
461         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
462         
463         //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
464         //      VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
465         
466         // Reference
467         MM_RefPhys( PAddr );
468         
469         //LOG("INVLPG( 0x%x )", VAddr);
470         INVLPG( VAddr );
471         
472         //LEAVE('i', 1);
473         return 1;
474 }
475
476 /**
477  * \brief Clear user's address space
478  */
479 void MM_ClearUser(void)
480 {
481         Uint    i, j;
482         
483         for( i = 0; i < (MM_USER_MAX>>22); i ++ )
484         {
485                 // Check if directory is not allocated
486                 if( !(gaPageDir[i] & PF_PRESENT) ) {
487                         gaPageDir[i] = 0;
488                         continue;
489                 }
490                 
491                 // Deallocate tables
492                 for( j = 0; j < 1024; j ++ )
493                 {
494                         if( gaPageTable[i*1024+j] & 1 )
495                                 MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF );
496                         gaPageTable[i*1024+j] = 0;
497                 }
498                 
499                 // Deallocate directory
500                 MM_DerefPhys( gaPageDir[i] & ~0xFFF );
501                 gaPageDir[i] = 0;
502                 INVLPG( &gaPageTable[i*1024] );
503         }
504         INVLPG( gaPageDir );
505 }
506
507 /**
508  * \fn tPAddr MM_Clone(void)
509  * \brief Clone the current address space
510  */
511 tPAddr MM_Clone(void)
512 {
513         Uint    i, j;
514         tVAddr  ret;
515         Uint    page = 0;
516         tVAddr  kStackBase = Proc_GetCurThread()->KernelStack - MM_KERNEL_STACK_SIZE;
517         void    *tmp;
518         
519         Mutex_Acquire( &glTempFractal );
520         
521         // Create Directory Table
522         *gpTmpCR3 = MM_AllocPhys() | 3;
523         if( *gpTmpCR3 == 3 ) {
524                 *gpTmpCR3 = 0;
525                 return 0;
526         }
527         INVLPG( gaTmpDir );
528         //LOG("Allocated Directory (%x)", *gpTmpCR3);
529         memsetd( gaTmpDir, 0, 1024 );
530         
531         if( Threads_GetPID() != 0 )
532         {       
533                 // Copy Tables
534                 for( i = 0; i < 768; i ++)
535                 {
536                         // Check if table is allocated
537                         if( !(gaPageDir[i] & PF_PRESENT) ) {
538                                 gaTmpDir[i] = 0;
539                                 page += 1024;
540                                 continue;
541                         }
542                         
543                         // Allocate new table
544                         gaTmpDir[i] = MM_AllocPhys() | (gaPageDir[i] & 7);
545                         INVLPG( &gaTmpTable[page] );
546                         // Fill
547                         for( j = 0; j < 1024; j ++, page++ )
548                         {
549                                 if( !(gaPageTable[page] & PF_PRESENT) ) {
550                                         gaTmpTable[page] = 0;
551                                         continue;
552                                 }
553                                 
554                                 // Refrence old page
555                                 MM_RefPhys( gaPageTable[page] & ~0xFFF );
556                                 // Add to new table
557                                 if(gaPageTable[page] & PF_WRITE) {
558                                         gaTmpTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
559                                         gaPageTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
560                                         INVLPG( page << 12 );
561                                 }
562                                 else
563                                         gaTmpTable[page] = gaPageTable[page];
564                         }
565                 }
566         }
567         
568         // Map in kernel tables (and make fractal mapping)
569         for( i = 768; i < 1024; i ++ )
570         {
571                 // Fractal
572                 if( i == (PAGE_TABLE_ADDR >> 22) ) {
573                         gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
574                         continue;
575                 }
576                 
577                 if( gaPageDir[i] == 0 ) {
578                         gaTmpDir[i] = 0;
579                         continue;
580                 }
581                 
582                 //LOG("gaPageDir[%x/4] = 0x%x", i*4, gaPageDir[i]);
583                 MM_RefPhys( gaPageDir[i] & ~0xFFF );
584                 gaTmpDir[i] = gaPageDir[i];
585         }
586         
587         // Allocate kernel stack
588         for(i = MM_KERNEL_STACKS >> 22; i < MM_KERNEL_STACKS_END >> 22; i ++ )
589         {
590                 // Check if directory is allocated
591                 if( (gaPageDir[i] & 1) == 0 ) {
592                         gaTmpDir[i] = 0;
593                         continue;
594                 }               
595                 
596                 // We don't care about other kernel stacks, just the current one
597                 if( i != kStackBase >> 22 ) {
598                         MM_DerefPhys( gaPageDir[i] & ~0xFFF );
599                         gaTmpDir[i] = 0;
600                         continue;
601                 }
602                 
603                 // Create a copy
604                 gaTmpDir[i] = MM_AllocPhys() | 3;
605                 INVLPG( &gaTmpTable[i*1024] );
606                 for( j = 0; j < 1024; j ++ )
607                 {
608                         // Is the page allocated? If not, skip
609                         if( !(gaPageTable[i*1024+j] & 1) ) {
610                                 gaTmpTable[i*1024+j] = 0;
611                                 continue;
612                         }
613                         
614                         // We don't care about other kernel stacks
615                         if( ((i*1024+j)*4096 & ~(MM_KERNEL_STACK_SIZE-1)) != kStackBase ) {
616                                 gaTmpTable[i*1024+j] = 0;
617                                 continue;
618                         }
619                         
620                         // Allocate page
621                         gaTmpTable[i*1024+j] = MM_AllocPhys() | 3;
622                         
623                         MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF );
624                         
625                         tmp = (void *) MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF );
626                         memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 );
627                         MM_FreeTemp( (Uint)tmp );
628                 }
629         }
630         
631         ret = *gpTmpCR3 & ~0xFFF;
632         Mutex_Release( &glTempFractal );
633         
634         //LEAVE('x', ret);
635         return ret;
636 }
637
638 /**
639  * \fn tVAddr MM_NewKStack(void)
640  * \brief Create a new kernel stack
641  */
642 tVAddr MM_NewKStack(void)
643 {
644         tVAddr  base;
645         Uint    i;
646         for(base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE)
647         {
648                 // Check if space is free
649                 if(MM_GetPhysAddr(base) != 0)   continue;
650                 // Allocate
651                 //for(i = MM_KERNEL_STACK_SIZE; i -= 0x1000 ; )
652                 for(i = 0; i < MM_KERNEL_STACK_SIZE; i += 0x1000 )
653                 {
654                         if( MM_Allocate(base+i) == 0 )
655                         {
656                                 // On error, print a warning and return error
657                                 Warning("MM_NewKStack - Out of memory");
658                                 // - Clean up
659                                 //for( i += 0x1000 ; i < MM_KERNEL_STACK_SIZE; i += 0x1000 )
660                                 //      MM_Deallocate(base+i);
661                                 return 0;
662                         }
663                 }
664                 // Success
665 //              Log("MM_NewKStack - Allocated %p", base + MM_KERNEL_STACK_SIZE);
666                 return base+MM_KERNEL_STACK_SIZE;
667         }
668         // No stacks left
669         Log_Warning("MMVirt", "MM_NewKStack - No address space left");
670         return 0;
671 }
672
673 /**
674  * \fn tVAddr MM_NewWorkerStack()
675  * \brief Creates a new worker stack
676  */
677 tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize)
678 {
679         Uint    base, addr;
680         tVAddr  tmpPage;
681         tPAddr  page;
682         
683         // TODO: Thread safety
684         // Find a free worker stack address
685         for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++)
686         {
687                 // Used block
688                 if( gWorkerStacks[base/32] == -1 ) {
689                         base += 31;     base &= ~31;
690                         base --;        // Counteracted by the base++
691                         continue;
692                 }
693                 // Used stack
694                 if( gWorkerStacks[base/32] & (1 << base) ) {
695                         continue;
696                 }
697                 break;
698         }
699         if(base >= NUM_WORKER_STACKS) {
700                 Warning("Uh-oh! Out of worker stacks");
701                 return 0;
702         }
703         
704         // It's ours now!
705         gWorkerStacks[base/32] |= (1 << base);
706         // Make life easier for later calls
707         giLastUsedWorker = base;
708         // We have one
709         base = WORKER_STACKS + base * WORKER_STACK_SIZE;
710         //Log(" MM_NewWorkerStack: base = 0x%x", base);
711         
712         // Acquire the lock for the temp fractal mappings
713         Mutex_Acquire(&glTempFractal);
714         
715         // Set the temp fractals to TID0's address space
716         *gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
717         //Log(" MM_NewWorkerStack: *gpTmpCR3 = 0x%x", *gpTmpCR3);
718         INVLPG( gaTmpDir );
719         
720         
721         // Check if the directory is mapped (we are assuming that the stacks
722         // will fit neatly in a directory)
723         //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
724         if(gaTmpDir[ base >> 22 ] == 0) {
725                 gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3;
726                 INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] );
727         }
728         
729         // Mapping Time!
730         for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 )
731         {
732                 page = MM_AllocPhys();
733                 gaTmpTable[ (base + addr) >> 12 ] = page | 3;
734         }
735         *gpTmpCR3 = 0;
736         // Release the temp mapping lock
737         Mutex_Release(&glTempFractal);
738
739         // NOTE: Max of 1 page
740         // `page` is the last allocated page from the previious for loop
741         tmpPage = MM_MapTemp( page );
742         memcpy( (void*)( tmpPage + (0x1000 - ContentsSize) ), StackContents, ContentsSize);
743         MM_FreeTemp(tmpPage);   
744         
745         //Log("MM_NewWorkerStack: RETURN 0x%x", base);
746         return base + WORKER_STACK_SIZE;
747 }
748
749 /**
750  * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
751  * \brief Sets the flags on a page
752  */
753 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
754 {
755         tTabEnt *ent;
756         if( !(gaPageDir[VAddr >> 22] & 1) )     return ;
757         if( !(gaPageTable[VAddr >> 12] & 1) )   return ;
758         
759         ent = &gaPageTable[VAddr >> 12];
760         
761         // Read-Only
762         if( Mask & MM_PFLAG_RO )
763         {
764                 if( Flags & MM_PFLAG_RO ) {
765                         *ent &= ~PF_WRITE;
766                 }
767                 else {
768                         gaPageDir[VAddr >> 22] |= PF_WRITE;
769                         *ent |= PF_WRITE;
770                 }
771         }
772         
773         // Kernel
774         if( Mask & MM_PFLAG_KERNEL )
775         {
776                 if( Flags & MM_PFLAG_KERNEL ) {
777                         *ent &= ~PF_USER;
778                 }
779                 else {
780                         gaPageDir[VAddr >> 22] |= PF_USER;
781                         *ent |= PF_USER;
782                 }
783         }
784         
785         // Copy-On-Write
786         if( Mask & MM_PFLAG_COW )
787         {
788                 if( Flags & MM_PFLAG_COW ) {
789                         *ent &= ~PF_WRITE;
790                         *ent |= PF_COW;
791                 }
792                 else {
793                         *ent &= ~PF_COW;
794                         *ent |= PF_WRITE;
795                 }
796         }
797         
798         //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
799         //      *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
800 }
801
802 /**
803  * \brief Get the flags on a page
804  */
805 Uint MM_GetFlags(tVAddr VAddr)
806 {
807         tTabEnt *ent;
808         Uint    ret = 0;
809         
810         // Validity Check
811         if( !(gaPageDir[VAddr >> 22] & 1) )     return 0;
812         if( !(gaPageTable[VAddr >> 12] & 1) )   return 0;
813         
814         ent = &gaPageTable[VAddr >> 12];
815         
816         // Read-Only
817         if( !(*ent & PF_WRITE) )        ret |= MM_PFLAG_RO;
818         // Kernel
819         if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
820         // Copy-On-Write
821         if( *ent & PF_COW )     ret |= MM_PFLAG_COW;
822         
823         return ret;
824 }
825
826 /**
827  * \brief Check if the provided buffer is valid
828  * \return Boolean valid
829  */
830 int MM_IsValidBuffer(tVAddr Addr, size_t Size)
831 {
832          int    bIsUser;
833          int    dir, tab;
834
835         Size += Addr & (PAGE_SIZE-1);
836         Addr &= ~(PAGE_SIZE-1);
837
838         dir = Addr >> 22;
839         tab = Addr >> 12;
840         
841 //      Debug("Addr = %p, Size = 0x%x, dir = %i, tab = %i", Addr, Size, dir, tab);
842
843         if( !(gaPageDir[dir] & 1) )     return 0;
844         if( !(gaPageTable[tab] & 1) )   return 0;
845         
846         bIsUser = !!(gaPageTable[tab] & PF_USER);
847
848         while( Size >= PAGE_SIZE )
849         {
850                 if( (tab & 1023) == 0 )
851                 {
852                         dir ++;
853                         if( !(gaPageDir[dir] & 1) )     return 0;
854                 }
855                 
856                 if( !(gaPageTable[tab] & 1) )   return 0;
857                 if( bIsUser && !(gaPageTable[tab] & PF_USER) )  return 0;
858
859                 tab ++;
860                 Size -= PAGE_SIZE;
861         }
862         return 1;
863 }
864
865 /**
866  * \fn tPAddr MM_DuplicatePage(tVAddr VAddr)
867  * \brief Duplicates a virtual page to a physical one
868  */
869 tPAddr MM_DuplicatePage(tVAddr VAddr)
870 {
871         tPAddr  ret;
872         Uint    temp;
873          int    wasRO = 0;
874         
875         //ENTER("xVAddr", VAddr);
876         
877         // Check if mapped
878         if( !(gaPageDir  [VAddr >> 22] & PF_PRESENT) )  return 0;
879         if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) )  return 0;
880         
881         // Page Align
882         VAddr &= ~0xFFF;
883         
884         // Allocate new page
885         ret = MM_AllocPhys();
886         if( !ret ) {
887                 return 0;
888         }
889         
890         // Write-lock the page (to keep data constistent), saving its R/W state
891         wasRO = (gaPageTable[VAddr >> 12] & PF_WRITE ? 0 : 1);
892         gaPageTable[VAddr >> 12] &= ~PF_WRITE;
893         INVLPG( VAddr );
894         
895         // Copy Data
896         temp = MM_MapTemp(ret);
897         memcpy( (void*)temp, (void*)VAddr, 0x1000 );
898         MM_FreeTemp(temp);
899         
900         // Restore Writeable status
901         if(!wasRO)      gaPageTable[VAddr >> 12] |= PF_WRITE;
902         INVLPG(VAddr);
903         
904         //LEAVE('X', ret);
905         return ret;
906 }
907
908 /**
909  * \fn Uint MM_MapTemp(tPAddr PAddr)
910  * \brief Create a temporary memory mapping
911  * \todo Show Luigi Barone (C Lecturer) and see what he thinks
912  */
913 tVAddr MM_MapTemp(tPAddr PAddr)
914 {
915          int    i;
916         
917         //ENTER("XPAddr", PAddr);
918         
919         PAddr &= ~0xFFF;
920         
921         //LOG("glTempMappings = %i", glTempMappings);
922         
923         for(;;)
924         {
925                 Mutex_Acquire( &glTempMappings );
926                 
927                 for( i = 0; i < NUM_TEMP_PAGES; i ++ )
928                 {
929                         // Check if page used
930                         if(gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] & 1)        continue;
931                         // Mark as used
932                         gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
933                         INVLPG( TEMP_MAP_ADDR + (i << 12) );
934                         //LEAVE('p', TEMP_MAP_ADDR + (i << 12));
935                         Mutex_Release( &glTempMappings );
936                         return TEMP_MAP_ADDR + (i << 12);
937                 }
938                 Mutex_Release( &glTempMappings );
939                 Threads_Yield();        // TODO: Use a sleep queue here instead
940         }
941 }
942
943 /**
944  * \fn void MM_FreeTemp(tVAddr PAddr)
945  * \brief Free's a temp mapping
946  */
947 void MM_FreeTemp(tVAddr VAddr)
948 {
949          int    i = VAddr >> 12;
950         //ENTER("xVAddr", VAddr);
951         
952         if(i >= (TEMP_MAP_ADDR >> 12))
953                 gaPageTable[ i ] = 0;
954         
955         //LEAVE('-');
956 }
957
958 /**
959  * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
960  * \brief Allocates a contigous number of pages
961  */
962 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
963 {
964          int    i, j;
965         
966         PAddr &= ~0xFFF;
967         
968         // Scan List
969         for( i = 0; i < NUM_HW_PAGES; i ++ )
970         {               
971                 // Check if addr used
972                 if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 )
973                         continue;
974                 
975                 // Check possible region
976                 for( j = 0; j < Number && i + j < NUM_HW_PAGES; j ++ )
977                 {
978                         // If there is an allocated page in the region we are testing, break
979                         if( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & 1 )    break;
980                 }
981                 // Is it all free?
982                 if( j == Number )
983                 {
984                         // Allocate
985                         for( j = 0; j < Number; j++ ) {
986                                 MM_RefPhys( PAddr + (j<<12) );
987                                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3;
988                         }
989                         return HW_MAP_ADDR + (i<<12);
990                 }
991         }
992         // If we don't find any, return NULL
993         return 0;
994 }
995
996 /**
997  * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
998  * \brief Allocates DMA physical memory
999  * \param Pages Number of pages required
1000  * \param MaxBits       Maximum number of bits the physical address can have
1001  * \param PhysAddr      Pointer to the location to place the physical address allocated
1002  * \return Virtual address allocate
1003  */
1004 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1005 {
1006         tPAddr  maxCheck = (1 << MaxBits);
1007         tPAddr  phys;
1008         tVAddr  ret;
1009         
1010         ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr);
1011         
1012         // Sanity Check
1013         if(MaxBits < 12 || !PhysAddr) {
1014                 LEAVE('i', 0);
1015                 return 0;
1016         }
1017         
1018         // Bound
1019         if(MaxBits >= PHYS_BITS)        maxCheck = -1;
1020         
1021         // Fast Allocate
1022         if(Pages == 1 && MaxBits >= PHYS_BITS)
1023         {
1024                 phys = MM_AllocPhys();
1025                 if( !phys ) {
1026                         *PhysAddr = 0;
1027                         LEAVE_RET('i', 0);
1028                 }
1029                 *PhysAddr = phys;
1030                 ret = MM_MapHWPages(phys, 1);
1031                 if(ret == 0) {
1032                         MM_DerefPhys(phys);
1033                         LEAVE('i', 0);
1034                         return 0;
1035                 }
1036                 LEAVE('x', ret);
1037                 return ret;
1038         }
1039         
1040         // Slow Allocate
1041         phys = MM_AllocPhysRange(Pages, MaxBits);
1042         // - Was it allocated?
1043         if(phys == 0) {
1044                 LEAVE('i', 0);
1045                 return 0;
1046         }
1047         
1048         // Allocated successfully, now map
1049         ret = MM_MapHWPages(phys, Pages);
1050         if( ret == 0 ) {
1051                 // If it didn't map, free then return 0
1052                 for(;Pages--;phys+=0x1000)
1053                         MM_DerefPhys(phys);
1054                 LEAVE('i', 0);
1055                 return 0;
1056         }
1057         
1058         *PhysAddr = phys;
1059         LEAVE('x', ret);
1060         return ret;
1061 }
1062
1063 /**
1064  * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1065  * \brief Unmap a hardware page
1066  */
1067 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1068 {
1069          int    i, j;
1070         
1071         //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
1072         
1073         // Sanity Check
1074         if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX)     return;
1075         
1076         i = VAddr >> 12;
1077         
1078         Mutex_Acquire( &glTempMappings );       // Temp and HW share a directory, so they share a lock
1079         
1080         for( j = 0; j < Number; j++ )
1081         {
1082                 MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
1083                 gaPageTable[ i + j ] = 0;
1084         }
1085         
1086         Mutex_Release( &glTempMappings );
1087 }
1088

UCC git Repository :: git.ucc.asn.au