Kernel/x86 - Fix PMem bitmap overflow
[tpg/acess2.git] / KernelLand / Kernel / arch / x86 / mm_virt.c
1 /*
2  * AcessOS Microkernel Version
3  * mm_virt.c
4  * 
5  * Memory Map
6  * 0xE0 - Kernel Base
7  * 0xF0 - Kernel Stacks
8  * 0xFD - Fractals
9  * 0xFE - Unused
10  * 0xFF - System Calls / Kernel's User Code
11  */
12 #define DEBUG   0
13 #define SANITY  1
14 #include <acess.h>
15 #include <mm_virt.h>
16 #include <mm_phys.h>
17 #include <proc.h>
18 #include <hal_proc.h>
19 #include <arch_int.h>
20 #include <semaphore.h>
21
22 #define TAB     22
23
24 #define WORKER_STACKS           0x00100000      // Thread0 Only!
25 #define WORKER_STACK_SIZE       MM_KERNEL_STACK_SIZE
26 #define WORKER_STACKS_END       0xB0000000
27 #define NUM_WORKER_STACKS       ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
28
29 #define PAE_PAGE_TABLE_ADDR     0xFC000000      // 16 MiB
30 #define PAE_PAGE_DIR_ADDR       0xFCFC0000      // 16 KiB
31 #define PAE_PAGE_PDPT_ADDR      0xFCFC3F00      // 32 bytes
32 #define PAE_TMP_PDPT_ADDR       0xFCFC3F20      // 32 bytes
33 #define PAE_TMP_DIR_ADDR        0xFCFE0000      // 16 KiB
34 #define PAE_TMP_TABLE_ADDR      0xFD000000      // 16 MiB
35
36 #define PAGE_TABLE_ADDR 0xFC000000
37 #define PAGE_DIR_ADDR   0xFC3F0000
38 #define PAGE_CR3_ADDR   0xFC3F0FC0
39 #define TMP_CR3_ADDR    0xFC3F0FC4      // Part of core instead of temp
40 #define TMP_DIR_ADDR    0xFC3F1000      // Same
41 #define TMP_TABLE_ADDR  0xFC400000
42
43 #define HW_MAP_ADDR             0xFE000000
44 #define HW_MAP_MAX              0xFFEF0000
45 #define NUM_HW_PAGES    ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
46 #define TEMP_MAP_ADDR   0xFFEF0000      // Allows 16 "temp" pages
47 #define NUM_TEMP_PAGES  16
48 #define LAST_BLOCK_ADDR 0xFFFF0000      // Free space for kernel provided user code/ *(-1) protection
49
50 #define PF_PRESENT      0x1
51 #define PF_WRITE        0x2
52 #define PF_USER         0x4
53 #define PF_GLOBAL       0x80
54 #define PF_COW          0x200
55 #define PF_NOPAGE       0x400
56
57 #define INVLPG(addr)    __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
58
59 #define GET_TEMP_MAPPING(cr3) do { \
60         __ASM__("cli"); \
61         __AtomicTestSetLoop( (Uint *)gpTmpCR3, cr3 | 3 ); \
62 } while(0)
63 #define REL_TEMP_MAPPING() do { \
64         *gpTmpCR3 = 0; \
65         __ASM__("sti"); \
66 } while(0)
67
68 typedef Uint32  tTabEnt;
69
70 // === IMPORTS ===
71 extern char     _UsertextEnd[], _UsertextBase[];
72 extern Uint32   gaInitPageDir[1024];
73 extern Uint32   gaInitPageTable[1024];
74 extern void     Threads_SegFault(tVAddr Addr);
75 extern void     Error_Backtrace(Uint eip, Uint ebp);
76
77 // === PROTOTYPES ===
78 void    MM_PreinitVirtual(void);
79 void    MM_InstallVirtual(void);
80 void    MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
81 //void  MM_DumpTables(tVAddr Start, tVAddr End);
82 //void  MM_ClearUser(void);
83 tPAddr  MM_DuplicatePage(tVAddr VAddr);
84
85 // === GLOBALS ===
86 #define gaPageTable     ((tTabEnt*)PAGE_TABLE_ADDR)
87 #define gaPageDir       ((tTabEnt*)PAGE_DIR_ADDR)
88 #define gaTmpTable      ((tTabEnt*)TMP_TABLE_ADDR)
89 #define gaTmpDir        ((tTabEnt*)TMP_DIR_ADDR)
90 #define gpPageCR3       ((tTabEnt*)PAGE_CR3_ADDR)
91 #define gpTmpCR3        ((tTabEnt*)TMP_CR3_ADDR)
92
93 #define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
94 #define gaPAE_PageDir   ((tTabEnt*)PAE_PAGE_DIR_ADDR)
95 #define gaPAE_MainPDPT  ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
96 #define gaPAE_TmpTable  ((tTabEnt*)PAE_TMP_DIR_ADDR)
97 #define gaPAE_TmpDir    ((tTabEnt*)PAE_TMP_DIR_ADDR)
98 #define gaPAE_TmpPDPT   ((tTabEnt*)PAE_TMP_PDPT_ADDR)
99  int    gbUsePAE = 0;
100 tMutex  glTempMappings;
101 tSemaphore      gTempMappingsSem;
102 tMutex  glTempFractal;
103 Uint32  gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
104  int    giLastUsedWorker = 0;
105 struct sPageInfo {
106         void    *Node;
107         tVAddr  Base;
108         Uint64  Offset;
109          int    Length;
110          int    Flags;
111 }       *gaMappedRegions;       // sizeof = 24 bytes
112
113 // === CODE ===
114 /**
115  * \fn void MM_PreinitVirtual(void)
116  * \brief Maps the fractal mappings
117  */
118 void MM_PreinitVirtual(void)
119 {
120         gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
121         INVLPG( PAGE_TABLE_ADDR );
122         
123         Semaphore_Init(&gTempMappingsSem, NUM_TEMP_PAGES, NUM_TEMP_PAGES, "MMVirt", "Temp Mappings");
124 }
125
126 /**
127  * \fn void MM_InstallVirtual(void)
128  * \brief Sets up the constant page mappings
129  */
130 void MM_InstallVirtual(void)
131 {
132         // --- Pre-Allocate kernel tables
133         for( int i = KERNEL_BASE>>22; i < 1024; i ++ )
134         {
135                 if( gaPageDir[ i ] )    continue;
136                 // Skip stack tables, they are process unique
137                 if( i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) {
138                         gaPageDir[ i ] = 0;
139                         continue;
140                 }
141                 // Preallocate table
142                 gaPageDir[ i ] = MM_AllocPhys() | 3;
143                 INVLPG( &gaPageTable[i*1024] );
144                 memset( &gaPageTable[i*1024], 0, 0x1000 );
145         }
146         
147         // Unset kernel on the User Text pages
148         for( int i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
149                 MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
150         }
151         
152         *gpTmpCR3 = 0;
153 }
154
155 /**
156  * \brief Cleans up the SMP required mappings
157  */
158 void MM_FinishVirtualInit(void)
159 {
160         gaInitPageDir[ 0 ] = 0;
161 }
162
163 /**
164  * \fn void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
165  * \brief Called on a page fault
166  */
167 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
168 {
169         //ENTER("xAddr bErrorCode", Addr, ErrorCode);
170         
171         // -- Check for COW --
172         if( gaPageDir  [Addr>>22] & PF_PRESENT  && gaPageTable[Addr>>12] & PF_PRESENT
173          && gaPageTable[Addr>>12] & PF_COW )
174         {
175                 tPAddr  paddr;
176                 __asm__ __volatile__ ("sti");
177                 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
178                 {
179                         gaPageTable[Addr>>12] &= ~PF_COW;
180                         gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
181                 }
182                 else
183                 {
184                         //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
185                         paddr = MM_DuplicatePage( Addr );
186                         MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
187                         gaPageTable[Addr>>12] &= PF_USER;
188                         gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
189                 }
190                 
191 //              Log_Debug("MMVirt", "COW for %p (%P)", Addr, gaPageTable[Addr>>12]);
192                 
193                 INVLPG( Addr & ~0xFFF );
194                 return;
195         }
196
197         // Disable instruction tracing  
198         __ASM__("pushf; andw $0xFEFF, 0(%esp); popf");
199         Proc_GetCurThread()->bInstrTrace = 0;
200
201         // If it was a user, tell the thread handler
202         if(ErrorCode & 4) {
203                 __asm__ __volatile__ ("sti");
204                 Log_Warning("MMVirt", "User %s %s memory%s",
205                         (ErrorCode&2?"write to":"read from"),
206                         (ErrorCode&1?"bad/locked":"non-present"),
207                         (ErrorCode&16?" (Instruction Fetch)":"")
208                         );
209                 Log_Warning("MMVirt", "Instruction %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
210                 __ASM__("sti"); // Restart IRQs
211                 #if 1
212                 Error_Backtrace(Regs->eip, Regs->ebp);
213                 #endif
214                 Threads_SegFault(Addr);
215                 return ;
216         }
217         
218         Debug_KernelPanic();
219         
220         // -- Check Error Code --
221         if(ErrorCode & 8)
222                 Warning("Reserved Bits Trashed!");
223         else
224         {
225                 Warning("Kernel %s %s memory%s",
226                         (ErrorCode&2?"write to":"read from"),
227                         (ErrorCode&1?"bad/locked":"non-present"),
228                         (ErrorCode&16?" (Instruction Fetch)":"")
229                         );
230         }
231         
232         Log("CPU %i - Code at %p accessed %p", GetCPUNum(), Regs->eip, Addr);
233         // Print Stack Backtrace
234         Error_Backtrace(Regs->eip, Regs->ebp);
235
236         #if 0   
237         Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
238         if( gaPageDir[Addr>>22] & PF_PRESENT )
239                 Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
240         #endif
241         //MM_DumpTables(0, -1); 
242         
243         // Register Dump
244         Log("EAX %08x ECX %08x EDX %08x EBX %08x", Regs->eax, Regs->ecx, Regs->edx, Regs->ebx);
245         Log("ESP %08x EBP %08x ESI %08x EDI %08x", Regs->esp, Regs->ebp, Regs->esi, Regs->edi);
246         //Log("SS:ESP %04x:%08x", Regs->ss, Regs->esp);
247         Log("CS:EIP %04x:%08x", Regs->cs, Regs->eip);
248         Log("DS %04x ES %04x FS %04x GS %04x", Regs->ds, Regs->es, Regs->fs, Regs->gs);
249         {
250                 Uint    dr0, dr1;
251                 __ASM__ ("mov %%dr0, %0":"=r"(dr0):);
252                 __ASM__ ("mov %%dr1, %0":"=r"(dr1):);
253                 Log("DR0 %08x DR1 %08x", dr0, dr1);
254         }
255         
256         Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
257 }
258
259 /**
260  * \fn void MM_DumpTables(tVAddr Start, tVAddr End)
261  * \brief Dumps the layout of the page tables
262  */
263 void MM_DumpTables(tVAddr Start, tVAddr End)
264 {
265         tVAddr  rangeStart = 0;
266         tPAddr  expected = 0;
267         void    *expected_node = NULL, *tmpnode = NULL;
268         tVAddr  curPos;
269         Uint    page;
270         const tPAddr    MASK = ~0xF78;
271         
272         Start >>= 12;   End >>= 12;
273         
274         #if 0
275         Log("Directory Entries:");
276         for(page = Start >> 10;
277                 page < (End >> 10)+1;
278                 page ++)
279         {
280                 if(gaPageDir[page])
281                 {
282                         Log(" 0x%08x-0x%08x :: 0x%08x",
283                                 page<<22, ((page+1)<<22)-1,
284                                 gaPageDir[page]&~0xFFF
285                                 );
286                 }
287         }
288         #endif
289         
290         Log("Table Entries:");
291         for(page = Start, curPos = Start<<12;
292                 page < End;
293                 curPos += 0x1000, page++)
294         {
295                 if( !(gaPageDir[curPos>>22] & PF_PRESENT)
296                 ||  !(gaPageTable[page] & PF_PRESENT)
297                 ||  (gaPageTable[page] & MASK) != expected
298                 ||  (tmpnode=NULL,MM_GetPageNode(expected, &tmpnode), tmpnode != expected_node))
299                 {
300                         if(expected) {
301                                 tPAddr  orig = gaPageTable[rangeStart>>12];
302                                 Log(" 0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
303                                         rangeStart,
304                                         orig & ~0xFFF,
305                                         curPos - rangeStart,
306                                         (orig & PF_NOPAGE ? "P" : "-"),
307                                         (orig & PF_COW ? "C" : "-"),
308                                         (orig & PF_GLOBAL ? "G" : "-"),
309                                         (orig & PF_USER ? "U" : "-"),
310                                         (orig & PF_WRITE ? "W" : "-"),
311                                         expected_node
312                                         );
313                                 expected = 0;
314                         }
315                         if( !(gaPageDir[curPos>>22] & PF_PRESENT) )     continue;
316                         if( !(gaPageTable[curPos>>12] & PF_PRESENT) )   continue;
317                         
318                         expected = (gaPageTable[page] & MASK);
319                         MM_GetPageNode(expected, &expected_node);
320                         rangeStart = curPos;
321                 }
322                 if(expected)    expected += 0x1000;
323         }
324         
325         if(expected) {
326                 tPAddr  orig = gaPageTable[rangeStart>>12];
327                 Log("0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
328                         rangeStart,
329                         orig & ~0xFFF,
330                         curPos - rangeStart,
331                         (orig & PF_NOPAGE ? "p" : "-"),
332                         (orig & PF_COW ? "C" : "-"),
333                         (orig & PF_GLOBAL ? "G" : "-"),
334                         (orig & PF_USER ? "U" : "-"),
335                         (orig & PF_WRITE ? "W" : "-"),
336                         expected_node
337                         );
338                 expected = 0;
339         }
340 }
341
342 /**
343  * \fn tPAddr MM_Allocate(tVAddr VAddr)
344  */
345 tPAddr MM_Allocate(tVAddr VAddr)
346 {
347         tPAddr  paddr;
348         //ENTER("xVAddr", VAddr);
349         //__ASM__("xchg %bx,%bx");
350         // Check if the directory is mapped
351         if( gaPageDir[ VAddr >> 22 ] == 0 )
352         {
353                 // Allocate directory
354                 paddr = MM_AllocPhys();
355                 if( paddr == 0 ) {
356                         Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
357                         //LEAVE('i',0);
358                         return 0;
359                 }
360                 // Map and mark as user (if needed)
361                 gaPageDir[ VAddr >> 22 ] = paddr | 3;
362                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
363                 
364                 INVLPG( &gaPageDir[ VAddr >> 22 ] );
365                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
366         }
367         // Check if the page is already allocated
368         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
369                 Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
370                 //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
371                 return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
372         }
373         
374         // Allocate
375         paddr = MM_AllocPhys();
376         //LOG("paddr = 0x%llx", paddr);
377         if( paddr == 0 ) {
378                 Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
379                         VAddr, __builtin_return_address(0));
380                 //LEAVE('i',0);
381                 return 0;
382         }
383         // Map
384         gaPageTable[ VAddr >> 12 ] = paddr | 3;
385         // Mark as user
386         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
387         // Invalidate Cache for address
388         INVLPG( VAddr & ~0xFFF );
389         
390         //LEAVE('X', paddr);
391         return paddr;
392 }
393
394 /**
395  * \fn void MM_Deallocate(tVAddr VAddr)
396  */
397 void MM_Deallocate(tVAddr VAddr)
398 {
399         if( gaPageDir[ VAddr >> 22 ] == 0 ) {
400                 Warning("MM_Deallocate - Directory not mapped");
401                 return;
402         }
403         
404         if(gaPageTable[ VAddr >> 12 ] == 0) {
405                 Warning("MM_Deallocate - Page is not allocated");
406                 return;
407         }
408         
409         // Dereference page
410         MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
411         // Clear page
412         gaPageTable[ VAddr >> 12 ] = 0;
413 }
414
415 /**
416  * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
417  * \brief Checks if the passed address is accesable
418  */
419 tPAddr MM_GetPhysAddr(volatile const void *Addr)
420 {
421         tVAddr  addr = (tVAddr)Addr;
422         if( !(gaPageDir[addr >> 22] & 1) )
423                 return 0;
424         if( !(gaPageTable[addr >> 12] & 1) )
425                 return 0;
426         return (gaPageTable[addr >> 12] & ~0xFFF) | (addr & 0xFFF);
427 }
428
429 /**
430  * \fn void MM_SetCR3(Uint CR3)
431  * \brief Sets the current process space
432  */
433 void MM_SetCR3(Uint CR3)
434 {
435         __ASM__("mov %0, %%cr3"::"r"(CR3));
436 }
437
438 /**
439  * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
440  * \brief Map a physical page to a virtual one
441  */
442 int MM_Map(tVAddr VAddr, tPAddr PAddr)
443 {
444         //ENTER("xVAddr xPAddr", VAddr, PAddr);
445         // Sanity check
446         if( PAddr & 0xFFF || VAddr & 0xFFF ) {
447                 Log_Warning("MM_Virt", "MM_Map - Physical or Virtual Addresses are not aligned (0x%P and %p)",
448                         PAddr, VAddr);
449                 //LEAVE('i', 0);
450                 return 0;
451         }
452         
453         // Align addresses
454         PAddr &= ~0xFFF;        VAddr &= ~0xFFF;
455         
456         // Check if the directory is mapped
457         if( gaPageDir[ VAddr >> 22 ] == 0 )
458         {
459                 tPAddr  tmp = MM_AllocPhys();
460                 if( tmp == 0 )
461                         return 0;
462                 gaPageDir[ VAddr >> 22 ] = tmp | 3;
463                 
464                 // Mark as user
465                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
466                 
467                 INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
468                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
469         }
470         // Check if the page is already allocated
471         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
472                 Warning("MM_Map - Allocating to used address");
473                 //LEAVE('i', 0);
474                 return 0;
475         }
476         
477         // Map
478         gaPageTable[ VAddr >> 12 ] = PAddr | 3;
479         // Mark as user
480         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
481         
482         //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
483         //      VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
484         
485         // Reference
486         MM_RefPhys( PAddr );
487         
488         //LOG("INVLPG( 0x%x )", VAddr);
489         INVLPG( VAddr );
490         
491         //LEAVE('i', 1);
492         return 1;
493 }
494
495 /**
496  * \brief Clear user's address space
497  */
498 void MM_ClearUser(void)
499 {
500         Uint    i, j;
501         
502         for( i = 0; i < (MM_USER_MAX>>22); i ++ )
503         {
504                 // Check if directory is not allocated
505                 if( !(gaPageDir[i] & PF_PRESENT) ) {
506                         gaPageDir[i] = 0;
507                         continue;
508                 }
509                 
510                 // Deallocate tables
511                 for( j = 0; j < 1024; j ++ )
512                 {
513                         if( gaPageTable[i*1024+j] & 1 )
514                                 MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF );
515                         gaPageTable[i*1024+j] = 0;
516                 }
517                 
518                 // Deallocate directory
519                 MM_DerefPhys( gaPageDir[i] & ~0xFFF );
520                 gaPageDir[i] = 0;
521                 INVLPG( &gaPageTable[i*1024] );
522         }
523         INVLPG( gaPageDir );
524 }
525
526 /**
527  * \brief Deallocate an address space
528  */
529 void MM_ClearSpace(Uint32 CR3)
530 {
531          int    i, j;
532         
533         if(CR3 == (*gpPageCR3 & ~0xFFF)) {
534                 Log_Error("MMVirt", "Can't clear current address space");
535                 return ;
536         }
537
538         if( MM_GetRefCount(CR3) > 1 ) {
539                 MM_DerefPhys(CR3);
540                 Log_Log("MMVirt", "CR3 %P is still referenced, not cleaning (but dereferenced)", CR3);
541                 return ;
542         }
543
544         Log_Debug("MMVirt", "Clearing out address space 0x%x from 0x%x", CR3, *gpPageCR3);
545         
546         GET_TEMP_MAPPING(CR3);
547         INVLPG( gaTmpDir );
548
549         for( i = 0; i < 1024; i ++ )
550         {
551                 Uint32  *table = &gaTmpTable[i*1024];
552                 if( !(gaTmpDir[i] & PF_PRESENT) )
553                         continue ;
554
555                 INVLPG( table );        
556
557                 if( i < 768 || (i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) )
558                 {
559                         for( j = 0; j < 1024; j ++ )
560                         {
561                                 if( !(table[j] & 1) )
562                                         continue;
563                                 MM_DerefPhys( table[j] & ~0xFFF );
564                         }
565                 }
566
567                 if( i != (PAGE_TABLE_ADDR >> 22) )
568                 {               
569                         MM_DerefPhys( gaTmpDir[i] & ~0xFFF );
570                 }
571         }
572
573
574         MM_DerefPhys( CR3 );
575
576         REL_TEMP_MAPPING();
577 }
578
579 /**
580  * \fn tPAddr MM_Clone(void)
581  * \brief Clone the current address space
582  */
583 tPAddr MM_Clone(int bNoUserCopy)
584 {
585         Uint    i, j;
586         tPAddr  ret;
587         Uint    page = 0;
588         tVAddr  kStackBase = Proc_GetCurThread()->KernelStack - MM_KERNEL_STACK_SIZE;
589         void    *tmp;
590         
591         // Create Directory Table
592         ret = MM_AllocPhys();
593         if( ret == 0 ) {
594                 return 0;
595         }
596         
597         // Map
598         GET_TEMP_MAPPING( ret );
599         INVLPG( gaTmpDir );
600         memsetd( gaTmpDir, 0, 1024 );
601         
602         if( Threads_GetPID() != 0 && !bNoUserCopy )
603         {       
604                 // Copy Tables
605                 for( i = 0; i < 768; i ++)
606                 {
607                         // Check if table is allocated
608                         if( !(gaPageDir[i] & PF_PRESENT) ) {
609                                 gaTmpDir[i] = 0;
610                                 page += 1024;
611                                 continue;
612                         }
613                         
614                         // Allocate new table
615                         gaTmpDir[i] = MM_AllocPhys() | (gaPageDir[i] & 7);
616                         INVLPG( &gaTmpTable[page] );
617                         // Fill
618                         for( j = 0; j < 1024; j ++, page++ )
619                         {
620                                 if( !(gaPageTable[page] & PF_PRESENT) ) {
621                                         gaTmpTable[page] = 0;
622                                         continue;
623                                 }
624                                 
625                                 // Refrence old page
626                                 MM_RefPhys( gaPageTable[page] & ~0xFFF );
627                                 // Add to new table
628                                 if(gaPageTable[page] & PF_WRITE) {
629                                         gaTmpTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
630                                         gaPageTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
631                                         INVLPG( page << 12 );
632                                 }
633                                 else
634                                         gaTmpTable[page] = gaPageTable[page];
635                         }
636                 }
637         }
638         
639         // Map in kernel tables (and make fractal mapping)
640         for( i = 768; i < 1024; i ++ )
641         {
642                 // Fractal
643                 if( i == (PAGE_TABLE_ADDR >> 22) ) {
644                         gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
645                         continue;
646                 }
647                 if( i == (TMP_TABLE_ADDR >> 22) ) {
648                         gaTmpDir[ TMP_TABLE_ADDR >> 22 ] = 0;
649                         continue ;
650                 }
651                 
652                 if( gaPageDir[i] == 0 ) {
653                         gaTmpDir[i] = 0;
654                         continue;
655                 }
656                 
657                 //LOG("gaPageDir[%x/4] = 0x%x", i*4, gaPageDir[i]);
658                 MM_RefPhys( gaPageDir[i] & ~0xFFF );
659                 gaTmpDir[i] = gaPageDir[i];
660         }
661         
662         // Allocate kernel stack
663         for(i = MM_KERNEL_STACKS >> 22; i < MM_KERNEL_STACKS_END >> 22; i ++ )
664         {
665                 // Check if directory is allocated
666                 if( (gaPageDir[i] & 1) == 0 ) {
667                         gaTmpDir[i] = 0;
668                         continue;
669                 }               
670                 
671                 // We don't care about other kernel stacks, just the current one
672                 if( i != kStackBase >> 22 ) {
673                         MM_DerefPhys( gaPageDir[i] & ~0xFFF );
674                         gaTmpDir[i] = 0;
675                         continue;
676                 }
677                 
678                 // Create a copy
679                 gaTmpDir[i] = MM_AllocPhys() | 3;
680                 INVLPG( &gaTmpTable[i*1024] );
681                 for( j = 0; j < 1024; j ++ )
682                 {
683                         // Is the page allocated? If not, skip
684                         if( !(gaPageTable[i*1024+j] & 1) ) {
685                                 gaTmpTable[i*1024+j] = 0;
686                                 continue;
687                         }
688                         
689                         // We don't care about other kernel stacks
690                         if( ((i*1024+j)*4096 & ~(MM_KERNEL_STACK_SIZE-1)) != kStackBase ) {
691                                 gaTmpTable[i*1024+j] = 0;
692                                 continue;
693                         }
694                         
695                         // Allocate page
696                         gaTmpTable[i*1024+j] = MM_AllocPhys() | 3;
697                         
698                         MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF );
699                         
700                         tmp = MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF );
701                         memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 );
702                         MM_FreeTemp( tmp );
703                 }
704         }
705         
706         REL_TEMP_MAPPING();
707         
708         //LEAVE('x', ret);
709         return ret;
710 }
711
712 /**
713  * \fn tVAddr MM_NewKStack(void)
714  * \brief Create a new kernel stack
715  */
716 tVAddr MM_NewKStack(void)
717 {
718         tVAddr  base;
719         Uint    i;
720         for(base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE)
721         {
722                 // Check if space is free
723                 if(MM_GetPhysAddr( (void*) base) != 0)
724                         continue;
725                 // Allocate
726                 //for(i = MM_KERNEL_STACK_SIZE; i -= 0x1000 ; )
727                 for(i = 0; i < MM_KERNEL_STACK_SIZE; i += 0x1000 )
728                 {
729                         if( MM_Allocate(base+i) == 0 )
730                         {
731                                 // On error, print a warning and return error
732                                 Warning("MM_NewKStack - Out of memory");
733                                 // - Clean up
734                                 //for( i += 0x1000 ; i < MM_KERNEL_STACK_SIZE; i += 0x1000 )
735                                 //      MM_Deallocate(base+i);
736                                 return 0;
737                         }
738                 }
739                 // Success
740 //              Log("MM_NewKStack - Allocated %p", base + MM_KERNEL_STACK_SIZE);
741                 return base+MM_KERNEL_STACK_SIZE;
742         }
743         // No stacks left
744         Log_Warning("MMVirt", "MM_NewKStack - No address space left");
745         return 0;
746 }
747
748 /**
749  * \fn tVAddr MM_NewWorkerStack()
750  * \brief Creates a new worker stack
751  */
752 tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize)
753 {
754         Uint    base;
755         tPAddr  page;
756         
757         LOG("(StackContents=%p,ContentsSize=%i)", StackContents, ContentsSize);
758         // TODO: Thread safety
759         // Find a free worker stack address
760         for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++)
761         {
762                 // Used block
763                 if( gWorkerStacks[base/32] == -1 ) {
764                         base += 31;     base &= ~31;
765                         base --;        // Counteracted by the base++
766                         continue;
767                 }
768                 // Used stack
769                 if( gWorkerStacks[base/32] & (1 << base) ) {
770                         continue;
771                 }
772                 break;
773         }
774         if(base >= NUM_WORKER_STACKS) {
775                 Log_Error("MMVirt", "Uh-oh! Out of worker stacks");
776                 return 0;
777         }
778         LOG("base=0x%x", base);
779         
780         // It's ours now!
781         gWorkerStacks[base/32] |= (1 << base);
782         // Make life easier for later calls
783         giLastUsedWorker = base;
784         // We have one
785         base = WORKER_STACKS + base * WORKER_STACK_SIZE;
786         //Log(" MM_NewWorkerStack: base = 0x%x", base);
787         LOG("base=%p (top)", base);
788         
789         // Set the temp fractals to TID0's address space
790         GET_TEMP_MAPPING( ((Uint)gaInitPageDir - KERNEL_BASE) );
791         INVLPG( gaTmpDir );
792         
793         // Check if the directory is mapped (we are assuming that the stacks
794         // will fit neatly in a directory)
795         LOG("gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
796         if(gaTmpDir[ base >> 22 ] == 0) {
797                 gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3;
798                 INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] );
799         }
800         
801         // Mapping Time!
802         for( Uint addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 )
803         {
804                 page = MM_AllocPhys();
805                 gaTmpTable[ (base + addr) >> 12 ] = page | 3;
806         }
807         LOG("mapped");
808
809         // Release temporary fractal
810         REL_TEMP_MAPPING();
811
812         // NOTE: Max of 1 page
813         // `page` is the last allocated page from the previious for loop
814         LOG("Mapping first page");
815         char    *tmpPage = MM_MapTemp( page );
816         LOG("tmpPage=%p", tmpPage);
817         memcpy( tmpPage + (0x1000 - ContentsSize), StackContents, ContentsSize);
818         MM_FreeTemp( tmpPage );
819         
820         //Log("MM_NewWorkerStack: RETURN 0x%x", base);
821         LOG("return %p", base+WORKER_STACK_SIZE);
822         return base + WORKER_STACK_SIZE;
823 }
824
825 /**
826  * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
827  * \brief Sets the flags on a page
828  */
829 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
830 {
831         tTabEnt *ent;
832         if( !(gaPageDir[VAddr >> 22] & 1) )     return ;
833         if( !(gaPageTable[VAddr >> 12] & 1) )   return ;
834         
835         ent = &gaPageTable[VAddr >> 12];
836         
837         // Read-Only
838         if( Mask & MM_PFLAG_RO )
839         {
840                 if( Flags & MM_PFLAG_RO ) {
841                         *ent &= ~PF_WRITE;
842                 }
843                 else {
844                         gaPageDir[VAddr >> 22] |= PF_WRITE;
845                         *ent |= PF_WRITE;
846                 }
847         }
848         
849         // Kernel
850         if( Mask & MM_PFLAG_KERNEL )
851         {
852                 if( Flags & MM_PFLAG_KERNEL ) {
853                         *ent &= ~PF_USER;
854                 }
855                 else {
856                         gaPageDir[VAddr >> 22] |= PF_USER;
857                         *ent |= PF_USER;
858                 }
859         }
860         
861         // Copy-On-Write
862         if( Mask & MM_PFLAG_COW )
863         {
864                 if( Flags & MM_PFLAG_COW ) {
865                         *ent &= ~PF_WRITE;
866                         *ent |= PF_COW;
867                 }
868                 else {
869                         *ent &= ~PF_COW;
870                         *ent |= PF_WRITE;
871                 }
872         }
873         
874         //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
875         //      *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
876 }
877
878 /**
879  * \brief Get the flags on a page
880  */
881 Uint MM_GetFlags(tVAddr VAddr)
882 {
883         tTabEnt *ent;
884         Uint    ret = 0;
885         
886         // Validity Check
887         if( !(gaPageDir[VAddr >> 22] & 1) )     return 0;
888         if( !(gaPageTable[VAddr >> 12] & 1) )   return 0;
889         
890         ent = &gaPageTable[VAddr >> 12];
891         
892         // Read-Only
893         if( !(*ent & PF_WRITE) )        ret |= MM_PFLAG_RO;
894         // Kernel
895         if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
896         // Copy-On-Write
897         if( *ent & PF_COW )     ret |= MM_PFLAG_COW;
898         
899         return ret;
900 }
901
902 /**
903  * \brief Check if the provided buffer is valid
904  * \return Boolean valid
905  */
906 int MM_IsValidBuffer(tVAddr Addr, size_t Size)
907 {
908          int    bIsUser;
909          int    dir, tab;
910
911         Size += Addr & (PAGE_SIZE-1);
912         Addr &= ~(PAGE_SIZE-1);
913
914         dir = Addr >> 22;
915         tab = Addr >> 12;
916         
917 //      Debug("Addr = %p, Size = 0x%x, dir = %i, tab = %i", Addr, Size, dir, tab);
918
919         if( !(gaPageDir[dir] & 1) )     return 0;
920         if( !(gaPageTable[tab] & 1) )   return 0;
921         
922         bIsUser = !!(gaPageTable[tab] & PF_USER);
923
924         while( Size >= PAGE_SIZE )
925         {
926                 if( (tab & 1023) == 0 )
927                 {
928                         dir ++;
929                         if( !(gaPageDir[dir] & 1) )     return 0;
930                 }
931                 
932                 if( !(gaPageTable[tab] & 1) )   return 0;
933                 if( bIsUser && !(gaPageTable[tab] & PF_USER) )  return 0;
934
935                 tab ++;
936                 Size -= PAGE_SIZE;
937         }
938         return 1;
939 }
940
941 /**
942  * \fn tPAddr MM_DuplicatePage(tVAddr VAddr)
943  * \brief Duplicates a virtual page to a physical one
944  */
945 tPAddr MM_DuplicatePage(tVAddr VAddr)
946 {
947         tPAddr  ret;
948         void    *temp;
949          int    wasRO = 0;
950         
951         //ENTER("xVAddr", VAddr);
952         
953         // Check if mapped
954         if( !(gaPageDir  [VAddr >> 22] & PF_PRESENT) )  return 0;
955         if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) )  return 0;
956         
957         // Page Align
958         VAddr &= ~0xFFF;
959         
960         // Allocate new page
961         ret = MM_AllocPhys();
962         if( !ret ) {
963                 return 0;
964         }
965         
966         // Write-lock the page (to keep data constistent), saving its R/W state
967         wasRO = (gaPageTable[VAddr >> 12] & PF_WRITE ? 0 : 1);
968         gaPageTable[VAddr >> 12] &= ~PF_WRITE;
969         INVLPG( VAddr );
970         
971         // Copy Data
972         temp = MM_MapTemp(ret);
973         memcpy( temp, (void*)VAddr, 0x1000 );
974         MM_FreeTemp(temp);
975         
976         // Restore Writeable status
977         if(!wasRO)      gaPageTable[VAddr >> 12] |= PF_WRITE;
978         INVLPG(VAddr);
979         
980         //LEAVE('X', ret);
981         return ret;
982 }
983
984 /**
985  * \fn Uint MM_MapTemp(tPAddr PAddr)
986  * \brief Create a temporary memory mapping
987  * \todo Show Luigi Barone (C Lecturer) and see what he thinks
988  */
989 void *MM_MapTemp(tPAddr PAddr)
990 {
991         ENTER("PPAddr", PAddr);
992         
993         PAddr &= ~0xFFF;
994         
995         if( Semaphore_Wait(&gTempMappingsSem, 1) != 1 )
996                 return NULL;
997         LOG("Semaphore good");
998         Mutex_Acquire( &glTempMappings );
999         for( int i = 0; i < NUM_TEMP_PAGES; i ++ )
1000         {
1001                 Uint32  *pte = &gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ];
1002                 LOG("%i: %x", i, *pte);
1003                 // Check if page used
1004                 if(*pte & 1)    continue;
1005                 // Mark as used
1006                 *pte = PAddr | 3;
1007                 INVLPG( TEMP_MAP_ADDR + (i << 12) );
1008                 LEAVE('p', TEMP_MAP_ADDR + (i << 12));
1009                 Mutex_Release( &glTempMappings );
1010                 return (void*)( TEMP_MAP_ADDR + (i << 12) );
1011         }
1012         Mutex_Release( &glTempMappings );
1013         Log_KernelPanic("MMVirt", "Semaphore suplied a mapping, but none are avaliable");
1014         return NULL;
1015 }
1016
1017 /**
1018  * \fn void MM_FreeTemp(tVAddr PAddr)
1019  * \brief Free's a temp mapping
1020  */
1021 void MM_FreeTemp(void *VAddr)
1022 {
1023          int    i = (tVAddr)VAddr >> 12;
1024         //ENTER("xVAddr", VAddr);
1025         
1026         if(i >= (TEMP_MAP_ADDR >> 12)) {
1027                 gaPageTable[ i ] = 0;
1028                 Semaphore_Signal(&gTempMappingsSem, 1);
1029         }
1030         
1031         //LEAVE('-');
1032 }
1033
1034 /**
1035  * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
1036  * \brief Allocates a contigous number of pages
1037  */
1038 void *MM_MapHWPages(tPAddr PAddr, Uint Number)
1039 {
1040          int    j;
1041         
1042         PAddr &= ~0xFFF;
1043
1044         if( PAddr < 1024*1024 && (1024*1024-PAddr) >= Number * PAGE_SIZE )
1045         {
1046                 return (void*)(KERNEL_BASE + PAddr);
1047         }
1048
1049         // Scan List
1050         for( int i = 0; i < NUM_HW_PAGES; i ++ )
1051         {               
1052                 // Check if addr used
1053                 if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 )
1054                         continue;
1055                 
1056                 // Check possible region
1057                 for( j = 0; j < Number && i + j < NUM_HW_PAGES; j ++ )
1058                 {
1059                         // If there is an allocated page in the region we are testing, break
1060                         if( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & 1 )    break;
1061                 }
1062                 // Is it all free?
1063                 if( j == Number )
1064                 {
1065                         // Allocate
1066                         for( j = 0; j < Number; j++ ) {
1067                                 MM_RefPhys( PAddr + (j<<12) );
1068                                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3;
1069                         }
1070                         return (void*)(HW_MAP_ADDR + (i<<12));
1071                 }
1072         }
1073         // If we don't find any, return NULL
1074         return 0;
1075 }
1076
1077 /**
1078  * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1079  * \brief Allocates DMA physical memory
1080  * \param Pages Number of pages required
1081  * \param MaxBits       Maximum number of bits the physical address can have
1082  * \param PhysAddr      Pointer to the location to place the physical address allocated
1083  * \return Virtual address allocate
1084  */
1085 void *MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1086 {
1087         tPAddr  phys;
1088         void    *ret;
1089         
1090         ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr);
1091         
1092         if(MaxBits == -1)
1093                 MaxBits = PHYS_BITS;
1094         
1095         // Sanity Check
1096         if(MaxBits < 12) {
1097                 LEAVE('i', 0);
1098                 return 0;
1099         }
1100         
1101         // Fast Allocate
1102         if(Pages == 1 && MaxBits >= PHYS_BITS)
1103         {
1104                 phys = MM_AllocPhys();
1105                 if( PhysAddr )
1106                         *PhysAddr = phys;
1107                 if( !phys ) {
1108                         LEAVE_RET('i', 0);
1109                 }
1110                 ret = MM_MapHWPages(phys, 1);
1111                 if(ret == 0) {
1112                         MM_DerefPhys(phys);
1113                         LEAVE('i', 0);
1114                         return 0;
1115                 }
1116                 LEAVE('x', ret);
1117                 return (void*)ret;
1118         }
1119         
1120         // Slow Allocate
1121         phys = MM_AllocPhysRange(Pages, MaxBits);
1122         // - Was it allocated?
1123         if(phys == 0) {
1124                 LEAVE('i', 0);
1125                 return 0;
1126         }
1127         
1128         // Allocated successfully, now map
1129         ret = MM_MapHWPages(phys, Pages);
1130         if( ret == 0 ) {
1131                 // If it didn't map, free then return 0
1132                 for(;Pages--;phys+=0x1000)
1133                         MM_DerefPhys(phys);
1134                 LEAVE('i', 0);
1135                 return 0;
1136         }
1137         
1138         if( PhysAddr )
1139                 *PhysAddr = phys;
1140         LEAVE('x', ret);
1141         return (void*)ret;
1142 }
1143
1144 /**
1145  * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1146  * \brief Unmap a hardware page
1147  */
1148 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1149 {
1150          int    i, j;
1151         
1152         //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
1153
1154         //
1155         if( KERNEL_BASE <= VAddr && VAddr < KERNEL_BASE + 1024*1024 )
1156                 return ;        
1157
1158         // Sanity Check
1159         if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX)     return;
1160         
1161         i = VAddr >> 12;
1162         
1163         Mutex_Acquire( &glTempMappings );       // Temp and HW share a directory, so they share a lock
1164         
1165         for( j = 0; j < Number; j++ )
1166         {
1167                 MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
1168                 gaPageTable[ i + j ] = 0;
1169                 INVLPG( (tVAddr)(i+j) << 12 );
1170         }
1171         
1172         Mutex_Release( &glTempMappings );
1173 }
1174

UCC git Repository :: git.ucc.asn.au