Kernel/x86 - Removed PAE support
[tpg/acess2.git] / Kernel / arch / x86 / mm_virt.c
1 /*
2  * AcessOS Microkernel Version
3  * mm_virt.c
4  * 
5  * Memory Map
6  * 0xE0 - Kernel Base
7  * 0xF0 - Kernel Stacks
8  * 0xFD - Fractals
9  * 0xFE - Unused
10  * 0xFF - System Calls / Kernel's User Code
11  */
12 #define DEBUG   0
13 #define SANITY  1
14 #include <acess.h>
15 #include <mm_virt.h>
16 #include <mm_phys.h>
17 #include <proc.h>
18
19 #define TAB     22
20
21 #define KERNEL_STACKS           0xF0000000
22 #define KERNEL_STACK_SIZE       0x00008000
23 #define KERNEL_STACKS_END       0xFC000000
24 #define WORKER_STACKS           0x00100000      // Thread0 Only!
25 #define WORKER_STACK_SIZE       KERNEL_STACK_SIZE
26 #define WORKER_STACKS_END       0xB0000000
27 #define NUM_WORKER_STACKS       ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
28
29 #define PAE_PAGE_TABLE_ADDR     0xFC000000      // 16 MiB
30 #define PAE_PAGE_DIR_ADDR       0xFCFC0000      // 16 KiB
31 #define PAE_PAGE_PDPT_ADDR      0xFCFC3F00      // 32 bytes
32 #define PAE_TMP_PDPT_ADDR       0xFCFC3F20      // 32 bytes
33 #define PAE_TMP_DIR_ADDR        0xFCFE0000      // 16 KiB
34 #define PAE_TMP_TABLE_ADDR      0xFD000000      // 16 MiB
35
36 #define PAGE_TABLE_ADDR 0xFC000000
37 #define PAGE_DIR_ADDR   0xFC3F0000
38 #define PAGE_CR3_ADDR   0xFC3F0FC0
39 #define TMP_CR3_ADDR    0xFC3F0FC4      // Part of core instead of temp
40 #define TMP_DIR_ADDR    0xFC3F1000      // Same
41 #define TMP_TABLE_ADDR  0xFC400000
42
43 #define HW_MAP_ADDR             0xFE000000
44 #define HW_MAP_MAX              0xFFEF0000
45 #define NUM_HW_PAGES    ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
46 #define TEMP_MAP_ADDR   0xFFEF0000      // Allows 16 "temp" pages
47 #define NUM_TEMP_PAGES  16
48 #define LAST_BLOCK_ADDR 0xFFFF0000      // Free space for kernel provided user code/ *(-1) protection
49
50 #define PF_PRESENT      0x1
51 #define PF_WRITE        0x2
52 #define PF_USER         0x4
53 #define PF_GLOBAL       0x80
54 #define PF_COW          0x200
55 #define PF_NOPAGE       0x400
56
57 #define INVLPG(addr)    __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
58
59 typedef Uint32  tTabEnt;
60
61 // === IMPORTS ===
62 extern void     _UsertextEnd, _UsertextBase;
63 extern Uint32   gaInitPageDir[1024];
64 extern Uint32   gaInitPageTable[1024];
65 extern void     Threads_SegFault(tVAddr Addr);
66 extern void     Error_Backtrace(Uint eip, Uint ebp);
67
68 // === PROTOTYPES ===
69 void    MM_PreinitVirtual(void);
70 void    MM_InstallVirtual(void);
71 void    MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
72 void    MM_DumpTables(tVAddr Start, tVAddr End);
73 tVAddr  MM_ClearUser(void);
74 tPAddr  MM_DuplicatePage(tVAddr VAddr);
75
76 // === GLOBALS ===
77 #define gaPageTable     ((tTabEnt*)PAGE_TABLE_ADDR)
78 #define gaPageDir       ((tTabEnt*)PAGE_DIR_ADDR)
79 #define gaTmpTable      ((tTabEnt*)TMP_TABLE_ADDR)
80 #define gaTmpDir        ((tTabEnt*)TMP_DIR_ADDR)
81 #define gpPageCR3       ((tTabEnt*)PAGE_CR3_ADDR)
82 #define gpTmpCR3        ((tTabEnt*)TMP_CR3_ADDR)
83
84 #define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
85 #define gaPAE_PageDir   ((tTabEnt*)PAE_PAGE_DIR_ADDR)
86 #define gaPAE_MainPDPT  ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
87 #define gaPAE_TmpTable  ((tTabEnt*)PAE_TMP_DIR_ADDR)
88 #define gaPAE_TmpDir    ((tTabEnt*)PAE_TMP_DIR_ADDR)
89 #define gaPAE_TmpPDPT   ((tTabEnt*)PAE_TMP_PDPT_ADDR)
90  int    gbUsePAE = 0;
91 tMutex  glTempMappings;
92 tMutex  glTempFractal;
93 Uint32  gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
94  int    giLastUsedWorker = 0;
95 struct sPageInfo {
96         void    *Node;
97         tVAddr  Base;
98         Uint64  Offset;
99          int    Length;
100          int    Flags;
101 }       *gaMappedRegions;       // sizeof = 24 bytes
102
103 // === CODE ===
104 /**
105  * \fn void MM_PreinitVirtual(void)
106  * \brief Maps the fractal mappings
107  */
108 void MM_PreinitVirtual(void)
109 {
110         gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
111         INVLPG( PAGE_TABLE_ADDR );
112 }
113
114 /**
115  * \fn void MM_InstallVirtual(void)
116  * \brief Sets up the constant page mappings
117  */
118 void MM_InstallVirtual(void)
119 {
120          int    i;
121         
122         // --- Pre-Allocate kernel tables
123         for( i = KERNEL_BASE>>22; i < 1024; i ++ )
124         {
125                 if( gaPageDir[ i ] )    continue;
126                 // Skip stack tables, they are process unique
127                 if( i > KERNEL_STACKS >> 22 && i < KERNEL_STACKS_END >> 22) {
128                         gaPageDir[ i ] = 0;
129                         continue;
130                 }
131                 // Preallocate table
132                 gaPageDir[ i ] = MM_AllocPhys() | 3;
133                 INVLPG( &gaPageTable[i*1024] );
134                 memset( &gaPageTable[i*1024], 0, 0x1000 );
135         }
136         
137         // Unset kernel on the User Text pages
138         for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
139                 MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
140         }
141 }
142
143 /**
144  * \brief Cleans up the SMP required mappings
145  */
146 void MM_FinishVirtualInit(void)
147 {
148         gaInitPageDir[ 0 ] = 0;
149 }
150
151 /**
152  * \fn void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
153  * \brief Called on a page fault
154  */
155 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
156 {
157         //ENTER("xAddr bErrorCode", Addr, ErrorCode);
158         
159         // -- Check for COW --
160         if( gaPageDir  [Addr>>22] & PF_PRESENT  && gaPageTable[Addr>>12] & PF_PRESENT
161          && gaPageTable[Addr>>12] & PF_COW )
162         {
163                 tPAddr  paddr;
164                 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
165                 {
166                         gaPageTable[Addr>>12] &= ~PF_COW;
167                         gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
168                 }
169                 else
170                 {
171                         //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
172                         paddr = MM_DuplicatePage( Addr );
173                         MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
174                         gaPageTable[Addr>>12] &= PF_USER;
175                         gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
176                 }
177                 
178                 INVLPG( Addr & ~0xFFF );
179                 return;
180         }
181         
182         // If it was a user, tell the thread handler
183         if(ErrorCode & 4) {
184                 Warning("%s %s %s memory%s",
185                         (ErrorCode&4?"User":"Kernel"),
186                         (ErrorCode&2?"write to":"read from"),
187                         (ErrorCode&1?"bad/locked":"non-present"),
188                         (ErrorCode&16?" (Instruction Fetch)":"")
189                         );
190                 Warning("User Pagefault: Instruction at %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
191                 __asm__ __volatile__ ("sti");   // Restart IRQs
192                 #if 1
193                 Error_Backtrace(Regs->eip, Regs->ebp);
194                 #endif
195                 Threads_SegFault(Addr);
196                 return ;
197         }
198         
199         Debug_KernelPanic();
200         
201         // -- Check Error Code --
202         if(ErrorCode & 8)
203                 Warning("Reserved Bits Trashed!");
204         else
205         {
206                 Warning("%s %s %s memory%s",
207                         (ErrorCode&4?"User":"Kernel"),
208                         (ErrorCode&2?"write to":"read from"),
209                         (ErrorCode&1?"bad/locked":"non-present"),
210                         (ErrorCode&16?" (Instruction Fetch)":"")
211                         );
212         }
213         
214         Log("Code at %p accessed %p", Regs->eip, Addr);
215         // Print Stack Backtrace
216         Error_Backtrace(Regs->eip, Regs->ebp);
217         
218         Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
219         if( gaPageDir[Addr>>22] & PF_PRESENT )
220                 Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
221         
222         //MM_DumpTables(0, -1); 
223         
224         // Register Dump
225         Log("EAX %08x ECX %08x EDX %08x EBX %08x", Regs->eax, Regs->ecx, Regs->edx, Regs->ebx);
226         Log("ESP %08x EBP %08x ESI %08x EDI %08x", Regs->esp, Regs->ebp, Regs->esi, Regs->edi);
227         //Log("SS:ESP %04x:%08x", Regs->ss, Regs->esp);
228         Log("CS:EIP %04x:%08x", Regs->cs, Regs->eip);
229         Log("DS %04x ES %04x FS %04x GS %04x", Regs->ds, Regs->es, Regs->fs, Regs->gs);
230         {
231                 Uint    dr0, dr1;
232                 __ASM__ ("mov %%dr0, %0":"=r"(dr0):);
233                 __ASM__ ("mov %%dr1, %0":"=r"(dr1):);
234                 Log("DR0 %08x DR1 %08x", dr0, dr1);
235         }
236         
237         Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
238 }
239
240 /**
241  * \fn void MM_DumpTables(tVAddr Start, tVAddr End)
242  * \brief Dumps the layout of the page tables
243  */
244 void MM_DumpTables(tVAddr Start, tVAddr End)
245 {
246         tVAddr  rangeStart = 0;
247         tPAddr  expected = 0;
248         tVAddr  curPos;
249         Uint    page;
250         const tPAddr    MASK = ~0xF78;
251         
252         Start >>= 12;   End >>= 12;
253         
254         #if 0
255         Log("Directory Entries:");
256         for(page = Start >> 10;
257                 page < (End >> 10)+1;
258                 page ++)
259         {
260                 if(gaPageDir[page])
261                 {
262                         Log(" 0x%08x-0x%08x :: 0x%08x",
263                                 page<<22, ((page+1)<<22)-1,
264                                 gaPageDir[page]&~0xFFF
265                                 );
266                 }
267         }
268         #endif
269         
270         Log("Table Entries:");
271         for(page = Start, curPos = Start<<12;
272                 page < End;
273                 curPos += 0x1000, page++)
274         {
275                 if( !(gaPageDir[curPos>>22] & PF_PRESENT)
276                 ||  !(gaPageTable[page] & PF_PRESENT)
277                 ||  (gaPageTable[page] & MASK) != expected)
278                 {
279                         if(expected) {
280                                 Log(" 0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s)",
281                                         rangeStart,
282                                         gaPageTable[rangeStart>>12] & ~0xFFF,
283                                         curPos - rangeStart,
284                                         (expected & PF_NOPAGE ? "P" : "-"),
285                                         (expected & PF_COW ? "C" : "-"),
286                                         (expected & PF_GLOBAL ? "G" : "-"),
287                                         (expected & PF_USER ? "U" : "-"),
288                                         (expected & PF_WRITE ? "W" : "-"),
289                                         gaPageTable[page] & MASK, expected
290                                         );
291                                 expected = 0;
292                         }
293                         if( !(gaPageDir[curPos>>22] & PF_PRESENT) )     continue;
294                         if( !(gaPageTable[curPos>>12] & PF_PRESENT) )   continue;
295                         
296                         expected = (gaPageTable[page] & MASK);
297                         rangeStart = curPos;
298                 }
299                 if(expected)    expected += 0x1000;
300         }
301         
302         if(expected) {
303                 Log("0x%08x => 0x%08x - 0x%08x (%s%s%s%s)",
304                         rangeStart,
305                         gaPageTable[rangeStart>>12] & ~0xFFF,
306                         curPos - rangeStart,
307                         (expected & PF_NOPAGE ? "p" : "-"),
308                         (expected & PF_COW ? "C" : "-"),
309                         (expected & PF_USER ? "U" : "-"),
310                         (expected & PF_WRITE ? "W" : "-")
311                         );
312                 expected = 0;
313         }
314 }
315
316 /**
317  * \fn tPAddr MM_Allocate(tVAddr VAddr)
318  */
319 tPAddr MM_Allocate(tVAddr VAddr)
320 {
321         tPAddr  paddr;
322         //ENTER("xVAddr", VAddr);
323         //__asm__ __volatile__ ("xchg %bx,%bx");
324         // Check if the directory is mapped
325         if( gaPageDir[ VAddr >> 22 ] == 0 )
326         {
327                 // Allocate directory
328                 paddr = MM_AllocPhys();
329                 if( paddr == 0 ) {
330                         Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
331                         //LEAVE('i',0);
332                         return 0;
333                 }
334                 // Map and mark as user (if needed)
335                 gaPageDir[ VAddr >> 22 ] = paddr | 3;
336                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
337                 
338                 INVLPG( &gaPageDir[ VAddr >> 22 ] );
339                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
340         }
341         // Check if the page is already allocated
342         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
343                 Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
344                 //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
345                 return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
346         }
347         
348         // Allocate
349         paddr = MM_AllocPhys();
350         //LOG("paddr = 0x%llx", paddr);
351         if( paddr == 0 ) {
352                 Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
353                         VAddr, __builtin_return_address(0));
354                 //LEAVE('i',0);
355                 return 0;
356         }
357         // Map
358         gaPageTable[ VAddr >> 12 ] = paddr | 3;
359         // Mark as user
360         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
361         // Invalidate Cache for address
362         INVLPG( VAddr & ~0xFFF );
363         
364         //LEAVE('X', paddr);
365         return paddr;
366 }
367
368 /**
369  * \fn void MM_Deallocate(tVAddr VAddr)
370  */
371 void MM_Deallocate(tVAddr VAddr)
372 {
373         if( gaPageDir[ VAddr >> 22 ] == 0 ) {
374                 Warning("MM_Deallocate - Directory not mapped");
375                 return;
376         }
377         
378         if(gaPageTable[ VAddr >> 12 ] == 0) {
379                 Warning("MM_Deallocate - Page is not allocated");
380                 return;
381         }
382         
383         // Dereference page
384         MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
385         // Clear page
386         gaPageTable[ VAddr >> 12 ] = 0;
387 }
388
389 /**
390  * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
391  * \brief Checks if the passed address is accesable
392  */
393 tPAddr MM_GetPhysAddr(tVAddr Addr)
394 {
395         if( !(gaPageDir[Addr >> 22] & 1) )
396                 return 0;
397         if( !(gaPageTable[Addr >> 12] & 1) )
398                 return 0;
399         return (gaPageTable[Addr >> 12] & ~0xFFF) | (Addr & 0xFFF);
400 }
401
402 /**
403  * \fn void MM_SetCR3(Uint CR3)
404  * \brief Sets the current process space
405  */
406 void MM_SetCR3(Uint CR3)
407 {
408         __asm__ __volatile__ ("mov %0, %%cr3"::"r"(CR3));
409 }
410
411 /**
412  * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
413  * \brief Map a physical page to a virtual one
414  */
415 int MM_Map(tVAddr VAddr, tPAddr PAddr)
416 {
417         //ENTER("xVAddr xPAddr", VAddr, PAddr);
418         // Sanity check
419         if( PAddr & 0xFFF || VAddr & 0xFFF ) {
420                 Warning("MM_Map - Physical or Virtual Addresses are not aligned");
421                 //LEAVE('i', 0);
422                 return 0;
423         }
424         
425         // Align addresses
426         PAddr &= ~0xFFF;        VAddr &= ~0xFFF;
427         
428         // Check if the directory is mapped
429         if( gaPageDir[ VAddr >> 22 ] == 0 )
430         {
431                 gaPageDir[ VAddr >> 22 ] = MM_AllocPhys() | 3;
432                 
433                 // Mark as user
434                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
435                 
436                 INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
437                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
438         }
439         // Check if the page is already allocated
440         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
441                 Warning("MM_Map - Allocating to used address");
442                 //LEAVE('i', 0);
443                 return 0;
444         }
445         
446         // Map
447         gaPageTable[ VAddr >> 12 ] = PAddr | 3;
448         // Mark as user
449         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
450         
451         //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
452         //      VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
453         
454         // Reference
455         MM_RefPhys( PAddr );
456         
457         //LOG("INVLPG( 0x%x )", VAddr);
458         INVLPG( VAddr );
459         
460         //LEAVE('i', 1);
461         return 1;
462 }
463
464 /**
465  * \fn tVAddr MM_ClearUser()
466  * \brief Clear user's address space
467  */
468 tVAddr MM_ClearUser(void)
469 {
470         Uint    i, j;
471         
472         for( i = 0; i < (MM_USER_MAX>>22); i ++ )
473         {
474                 // Check if directory is not allocated
475                 if( !(gaPageDir[i] & PF_PRESENT) ) {
476                         gaPageDir[i] = 0;
477                         continue;
478                 }
479                 
480                 // Deallocate tables
481                 for( j = 0; j < 1024; j ++ )
482                 {
483                         if( gaPageTable[i*1024+j] & 1 )
484                                 MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF );
485                         gaPageTable[i*1024+j] = 0;
486                 }
487                 
488                 // Deallocate directory
489                 MM_DerefPhys( gaPageDir[i] & ~0xFFF );
490                 gaPageDir[i] = 0;
491                 INVLPG( &gaPageTable[i*1024] );
492         }
493         INVLPG( gaPageDir );
494         
495         return *gpPageCR3;
496 }
497
498 /**
499  * \fn tPAddr MM_Clone(void)
500  * \brief Clone the current address space
501  */
502 tPAddr MM_Clone(void)
503 {
504         Uint    i, j;
505         tVAddr  ret;
506         Uint    page = 0;
507         tVAddr  kStackBase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
508         void    *tmp;
509         
510         Mutex_Acquire( &glTempFractal );
511         
512         // Create Directory Table
513         *gpTmpCR3 = MM_AllocPhys() | 3;
514         INVLPG( gaTmpDir );
515         //LOG("Allocated Directory (%x)", *gpTmpCR3);
516         memsetd( gaTmpDir, 0, 1024 );
517         
518         if( Threads_GetPID() != 0 )
519         {       
520                 // Copy Tables
521                 for( i = 0; i < 768; i ++)
522                 {
523                         // Check if table is allocated
524                         if( !(gaPageDir[i] & PF_PRESENT) ) {
525                                 gaTmpDir[i] = 0;
526                                 page += 1024;
527                                 continue;
528                         }
529                         
530                         // Allocate new table
531                         gaTmpDir[i] = MM_AllocPhys() | (gaPageDir[i] & 7);
532                         INVLPG( &gaTmpTable[page] );
533                         // Fill
534                         for( j = 0; j < 1024; j ++, page++ )
535                         {
536                                 if( !(gaPageTable[page] & PF_PRESENT) ) {
537                                         gaTmpTable[page] = 0;
538                                         continue;
539                                 }
540                                 
541                                 // Refrence old page
542                                 MM_RefPhys( gaPageTable[page] & ~0xFFF );
543                                 // Add to new table
544                                 if(gaPageTable[page] & PF_WRITE) {
545                                         gaTmpTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
546                                         gaPageTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
547                                         INVLPG( page << 12 );
548                                 }
549                                 else
550                                         gaTmpTable[page] = gaPageTable[page];
551                         }
552                 }
553         }
554         
555         // Map in kernel tables (and make fractal mapping)
556         for( i = 768; i < 1024; i ++ )
557         {
558                 // Fractal
559                 if( i == (PAGE_TABLE_ADDR >> 22) ) {
560                         gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
561                         continue;
562                 }
563                 
564                 if( gaPageDir[i] == 0 ) {
565                         gaTmpDir[i] = 0;
566                         continue;
567                 }
568                 
569                 //LOG("gaPageDir[%x/4] = 0x%x", i*4, gaPageDir[i]);
570                 MM_RefPhys( gaPageDir[i] & ~0xFFF );
571                 gaTmpDir[i] = gaPageDir[i];
572         }
573         
574         // Allocate kernel stack
575         for(i = KERNEL_STACKS >> 22;
576                 i < KERNEL_STACKS_END >> 22;
577                 i ++ )
578         {
579                 // Check if directory is allocated
580                 if( (gaPageDir[i] & 1) == 0 ) {
581                         gaTmpDir[i] = 0;
582                         continue;
583                 }               
584                 
585                 // We don't care about other kernel stacks, just the current one
586                 if( i != kStackBase >> 22 ) {
587                         MM_DerefPhys( gaPageDir[i] & ~0xFFF );
588                         gaTmpDir[i] = 0;
589                         continue;
590                 }
591                 
592                 // Create a copy
593                 gaTmpDir[i] = MM_AllocPhys() | 3;
594                 INVLPG( &gaTmpTable[i*1024] );
595                 for( j = 0; j < 1024; j ++ )
596                 {
597                         // Is the page allocated? If not, skip
598                         if( !(gaPageTable[i*1024+j] & 1) ) {
599                                 gaTmpTable[i*1024+j] = 0;
600                                 continue;
601                         }
602                         
603                         // We don't care about other kernel stacks
604                         if( ((i*1024+j)*4096 & ~(KERNEL_STACK_SIZE-1)) != kStackBase ) {
605                                 gaTmpTable[i*1024+j] = 0;
606                                 continue;
607                         }
608                         
609                         // Allocate page
610                         gaTmpTable[i*1024+j] = MM_AllocPhys() | 3;
611                         
612                         MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF );
613                         
614                         tmp = (void *) MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF );
615                         memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 );
616                         MM_FreeTemp( (Uint)tmp );
617                 }
618         }
619         
620         ret = *gpTmpCR3 & ~0xFFF;
621         Mutex_Release( &glTempFractal );
622         
623         //LEAVE('x', ret);
624         return ret;
625 }
626
627 /**
628  * \fn tVAddr MM_NewKStack(void)
629  * \brief Create a new kernel stack
630  */
631 tVAddr MM_NewKStack(void)
632 {
633         tVAddr  base;
634         Uint    i;
635         for(base = KERNEL_STACKS; base < KERNEL_STACKS_END; base += KERNEL_STACK_SIZE)
636         {
637                 // Check if space is free
638                 if(MM_GetPhysAddr(base) != 0)   continue;
639                 // Allocate
640                 //for(i = KERNEL_STACK_SIZE; i -= 0x1000 ; )
641                 for(i = 0; i < KERNEL_STACK_SIZE; i += 0x1000 )
642                 {
643                         if( MM_Allocate(base+i) == 0 )
644                         {
645                                 // On error, print a warning and return error
646                                 Warning("MM_NewKStack - Out of memory");
647                                 // - Clean up
648                                 //for( i += 0x1000 ; i < KERNEL_STACK_SIZE; i += 0x1000 )
649                                 //      MM_Deallocate(base+i);
650                                 return 0;
651                         }
652                 }
653                 // Success
654                 Log("MM_NewKStack - Allocated %p", base + KERNEL_STACK_SIZE);
655                 return base+KERNEL_STACK_SIZE;
656         }
657         // No stacks left
658         Warning("MM_NewKStack - No address space left");
659         return 0;
660 }
661
662 /**
663  * \fn tVAddr MM_NewWorkerStack()
664  * \brief Creates a new worker stack
665  */
666 tVAddr MM_NewWorkerStack()
667 {
668         Uint    esp, ebp;
669         Uint    oldstack;
670         Uint    base, addr;
671          int    i, j;
672         Uint    *tmpPage;
673         tPAddr  pages[WORKER_STACK_SIZE>>12];
674         
675         // Get the old ESP and EBP
676         __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
677         __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
678         
679         // TODO: Thread safety
680         // Find a free worker stack address
681         for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++)
682         {
683                 // Used block
684                 if( gWorkerStacks[base/32] == -1 ) {
685                         base += 31;     base &= ~31;
686                         base --;        // Counteracted by the base++
687                         continue;
688                 }
689                 // Used stack
690                 if( gWorkerStacks[base/32] & (1 << base) ) {
691                         continue;
692                 }
693                 break;
694         }
695         if(base >= NUM_WORKER_STACKS) {
696                 Warning("Uh-oh! Out of worker stacks");
697                 return 0;
698         }
699         
700         // It's ours now!
701         gWorkerStacks[base/32] |= (1 << base);
702         // Make life easier for later calls
703         giLastUsedWorker = base;
704         // We have one
705         base = WORKER_STACKS + base * WORKER_STACK_SIZE;
706         //Log(" MM_NewWorkerStack: base = 0x%x", base);
707         
708         // Acquire the lock for the temp fractal mappings
709         Mutex_Acquire(&glTempFractal);
710         
711         // Set the temp fractals to TID0's address space
712         *gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
713         //Log(" MM_NewWorkerStack: *gpTmpCR3 = 0x%x", *gpTmpCR3);
714         INVLPG( gaTmpDir );
715         
716         
717         // Check if the directory is mapped (we are assuming that the stacks
718         // will fit neatly in a directory)
719         //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
720         if(gaTmpDir[ base >> 22 ] == 0) {
721                 gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3;
722                 INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] );
723         }
724         
725         // Mapping Time!
726         for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 )
727         //for( addr = WORKER_STACK_SIZE; addr; addr -= 0x1000 )
728         {
729                 pages[ addr >> 12 ] = MM_AllocPhys();
730                 gaTmpTable[ (base + addr) >> 12 ] = pages[addr>>12] | 3;
731         }
732         *gpTmpCR3 = 0;
733         // Release the temp mapping lock
734         Mutex_Release(&glTempFractal);
735         
736         // Copy the old stack
737         oldstack = (esp + KERNEL_STACK_SIZE-1) & ~(KERNEL_STACK_SIZE-1);
738         esp = oldstack - esp;   // ESP as an offset in the stack
739         
740         // Make `base` be the top of the stack
741         base += WORKER_STACK_SIZE;
742         
743         i = (WORKER_STACK_SIZE>>12) - 1;
744         // Copy the contents of the old stack to the new one, altering the addresses
745         // `addr` is refering to bytes from the stack base (mem downwards)
746         for(addr = 0; addr < esp; addr += 0x1000)
747         {
748                 Uint    *stack = (Uint*)( oldstack-(addr+0x1000) );
749                 tmpPage = (void*)MM_MapTemp( pages[i] );
750                 // Copy old stack
751                 for(j = 0; j < 1024; j++)
752                 {
753                         // Possible Stack address?
754                         if(oldstack-esp < stack[j] && stack[j] < oldstack)
755                                 tmpPage[j] = base - (oldstack - stack[j]);
756                         else    // Seems not, best leave it alone
757                                 tmpPage[j] = stack[j];
758                 }
759                 MM_FreeTemp((tVAddr)tmpPage);
760                 i --;
761         }
762         
763         //Log("MM_NewWorkerStack: RETURN 0x%x", base);
764         return base;
765 }
766
767 /**
768  * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
769  * \brief Sets the flags on a page
770  */
771 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
772 {
773         tTabEnt *ent;
774         if( !(gaPageDir[VAddr >> 22] & 1) )     return ;
775         if( !(gaPageTable[VAddr >> 12] & 1) )   return ;
776         
777         ent = &gaPageTable[VAddr >> 12];
778         
779         // Read-Only
780         if( Mask & MM_PFLAG_RO )
781         {
782                 if( Flags & MM_PFLAG_RO ) {
783                         *ent &= ~PF_WRITE;
784                 }
785                 else {
786                         gaPageDir[VAddr >> 22] |= PF_WRITE;
787                         *ent |= PF_WRITE;
788                 }
789         }
790         
791         // Kernel
792         if( Mask & MM_PFLAG_KERNEL )
793         {
794                 if( Flags & MM_PFLAG_KERNEL ) {
795                         *ent &= ~PF_USER;
796                 }
797                 else {
798                         gaPageDir[VAddr >> 22] |= PF_USER;
799                         *ent |= PF_USER;
800                 }
801         }
802         
803         // Copy-On-Write
804         if( Mask & MM_PFLAG_COW )
805         {
806                 if( Flags & MM_PFLAG_COW ) {
807                         *ent &= ~PF_WRITE;
808                         *ent |= PF_COW;
809                 }
810                 else {
811                         *ent &= ~PF_COW;
812                         *ent |= PF_WRITE;
813                 }
814         }
815         
816         //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
817         //      *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
818 }
819
820 /**
821  * \brief Get the flags on a page
822  */
823 Uint MM_GetFlags(tVAddr VAddr)
824 {
825         tTabEnt *ent;
826         Uint    ret = 0;
827         
828         // Validity Check
829         if( !(gaPageDir[VAddr >> 22] & 1) )     return 0;
830         if( !(gaPageTable[VAddr >> 12] & 1) )   return 0;
831         
832         ent = &gaPageTable[VAddr >> 12];
833         
834         // Read-Only
835         if( !(*ent & PF_WRITE) )        ret |= MM_PFLAG_RO;
836         // Kernel
837         if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
838         // Copy-On-Write
839         if( *ent & PF_COW )     ret |= MM_PFLAG_COW;
840         
841         return ret;
842 }
843
844 /**
845  * \fn tPAddr MM_DuplicatePage(tVAddr VAddr)
846  * \brief Duplicates a virtual page to a physical one
847  */
848 tPAddr MM_DuplicatePage(tVAddr VAddr)
849 {
850         tPAddr  ret;
851         Uint    temp;
852          int    wasRO = 0;
853         
854         //ENTER("xVAddr", VAddr);
855         
856         // Check if mapped
857         if( !(gaPageDir  [VAddr >> 22] & PF_PRESENT) )  return 0;
858         if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) )  return 0;
859         
860         // Page Align
861         VAddr &= ~0xFFF;
862         
863         // Allocate new page
864         ret = MM_AllocPhys();
865         
866         // Write-lock the page (to keep data constistent), saving its R/W state
867         wasRO = (gaPageTable[VAddr >> 12] & PF_WRITE ? 0 : 1);
868         gaPageTable[VAddr >> 12] &= ~PF_WRITE;
869         INVLPG( VAddr );
870         
871         // Copy Data
872         temp = MM_MapTemp(ret);
873         memcpy( (void*)temp, (void*)VAddr, 0x1000 );
874         MM_FreeTemp(temp);
875         
876         // Restore Writeable status
877         if(!wasRO)      gaPageTable[VAddr >> 12] |= PF_WRITE;
878         INVLPG(VAddr);
879         
880         //LEAVE('X', ret);
881         return ret;
882 }
883
884 /**
885  * \fn Uint MM_MapTemp(tPAddr PAddr)
886  * \brief Create a temporary memory mapping
887  * \todo Show Luigi Barone (C Lecturer) and see what he thinks
888  */
889 tVAddr MM_MapTemp(tPAddr PAddr)
890 {
891          int    i;
892         
893         //ENTER("XPAddr", PAddr);
894         
895         PAddr &= ~0xFFF;
896         
897         //LOG("glTempMappings = %i", glTempMappings);
898         
899         for(;;)
900         {
901                 Mutex_Acquire( &glTempMappings );
902                 
903                 for( i = 0; i < NUM_TEMP_PAGES; i ++ )
904                 {
905                         // Check if page used
906                         if(gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] & 1)        continue;
907                         // Mark as used
908                         gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
909                         INVLPG( TEMP_MAP_ADDR + (i << 12) );
910                         //LEAVE('p', TEMP_MAP_ADDR + (i << 12));
911                         Mutex_Release( &glTempMappings );
912                         return TEMP_MAP_ADDR + (i << 12);
913                 }
914                 Mutex_Release( &glTempMappings );
915                 Threads_Yield();        // TODO: Use a sleep queue here instead
916         }
917 }
918
919 /**
920  * \fn void MM_FreeTemp(tVAddr PAddr)
921  * \brief Free's a temp mapping
922  */
923 void MM_FreeTemp(tVAddr VAddr)
924 {
925          int    i = VAddr >> 12;
926         //ENTER("xVAddr", VAddr);
927         
928         if(i >= (TEMP_MAP_ADDR >> 12))
929                 gaPageTable[ i ] = 0;
930         
931         //LEAVE('-');
932 }
933
934 /**
935  * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
936  * \brief Allocates a contigous number of pages
937  */
938 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
939 {
940          int    i, j;
941         
942         PAddr &= ~0xFFF;
943         
944         // Scan List
945         for( i = 0; i < NUM_HW_PAGES; i ++ )
946         {               
947                 // Check if addr used
948                 if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 )
949                         continue;
950                 
951                 // Check possible region
952                 for( j = 0; j < Number && i + j < NUM_HW_PAGES; j ++ )
953                 {
954                         // If there is an allocated page in the region we are testing, break
955                         if( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & 1 )    break;
956                 }
957                 // Is it all free?
958                 if( j == Number )
959                 {
960                         // Allocate
961                         for( j = 0; j < Number; j++ ) {
962                                 MM_RefPhys( PAddr + (j<<12) );
963                                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3;
964                         }
965                         return HW_MAP_ADDR + (i<<12);
966                 }
967         }
968         // If we don't find any, return NULL
969         return 0;
970 }
971
972 /**
973  * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
974  * \brief Allocates DMA physical memory
975  * \param Pages Number of pages required
976  * \param MaxBits       Maximum number of bits the physical address can have
977  * \param PhysAddr      Pointer to the location to place the physical address allocated
978  * \return Virtual address allocate
979  */
980 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
981 {
982         tPAddr  maxCheck = (1 << MaxBits);
983         tPAddr  phys;
984         tVAddr  ret;
985         
986         ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr);
987         
988         // Sanity Check
989         if(MaxBits < 12 || !PhysAddr) {
990                 LEAVE('i', 0);
991                 return 0;
992         }
993         
994         // Bound
995         if(MaxBits >= PHYS_BITS)        maxCheck = -1;
996         
997         // Fast Allocate
998         if(Pages == 1 && MaxBits >= PHYS_BITS)
999         {
1000                 phys = MM_AllocPhys();
1001                 *PhysAddr = phys;
1002                 ret = MM_MapHWPages(phys, 1);
1003                 if(ret == 0) {
1004                         MM_DerefPhys(phys);
1005                         LEAVE('i', 0);
1006                         return 0;
1007                 }
1008                 LEAVE('x', ret);
1009                 return ret;
1010         }
1011         
1012         // Slow Allocate
1013         phys = MM_AllocPhysRange(Pages, MaxBits);
1014         // - Was it allocated?
1015         if(phys == 0) {
1016                 LEAVE('i', 0);
1017                 return 0;
1018         }
1019         
1020         // Allocated successfully, now map
1021         ret = MM_MapHWPages(phys, Pages);
1022         if( ret == 0 ) {
1023                 // If it didn't map, free then return 0
1024                 for(;Pages--;phys+=0x1000)
1025                         MM_DerefPhys(phys);
1026                 LEAVE('i', 0);
1027                 return 0;
1028         }
1029         
1030         *PhysAddr = phys;
1031         LEAVE('x', ret);
1032         return ret;
1033 }
1034
1035 /**
1036  * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1037  * \brief Unmap a hardware page
1038  */
1039 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1040 {
1041          int    i, j;
1042         
1043         //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
1044         
1045         // Sanity Check
1046         if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX)     return;
1047         
1048         i = VAddr >> 12;
1049         
1050         Mutex_Acquire( &glTempMappings );       // Temp and HW share a directory, so they share a lock
1051         
1052         for( j = 0; j < Number; j++ )
1053         {
1054                 MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
1055                 gaPageTable[ i + j ] = 0;
1056         }
1057         
1058         Mutex_Release( &glTempMappings );
1059 }
1060

UCC git Repository :: git.ucc.asn.au