Merge branch 'master' of github.com:thepowersgang/acess2
[tpg/acess2.git] / KernelLand / Kernel / arch / x86 / mm_virt.c
1 /*
2  * AcessOS Microkernel Version
3  * mm_virt.c
4  * 
5  * Memory Map
6  * 0xE0 - Kernel Base
7  * 0xF0 - Kernel Stacks
8  * 0xFD - Fractals
9  * 0xFE - Unused
10  * 0xFF - System Calls / Kernel's User Code
11  */
12 #define DEBUG   0
13 #define SANITY  1
14 #include <acess.h>
15 #include <mm_virt.h>
16 #include <mm_phys.h>
17 #include <proc.h>
18 #include <hal_proc.h>
19 #include <arch_int.h>
20
21 #define TAB     22
22
23 #define WORKER_STACKS           0x00100000      // Thread0 Only!
24 #define WORKER_STACK_SIZE       MM_KERNEL_STACK_SIZE
25 #define WORKER_STACKS_END       0xB0000000
26 #define NUM_WORKER_STACKS       ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
27
28 #define PAE_PAGE_TABLE_ADDR     0xFC000000      // 16 MiB
29 #define PAE_PAGE_DIR_ADDR       0xFCFC0000      // 16 KiB
30 #define PAE_PAGE_PDPT_ADDR      0xFCFC3F00      // 32 bytes
31 #define PAE_TMP_PDPT_ADDR       0xFCFC3F20      // 32 bytes
32 #define PAE_TMP_DIR_ADDR        0xFCFE0000      // 16 KiB
33 #define PAE_TMP_TABLE_ADDR      0xFD000000      // 16 MiB
34
35 #define PAGE_TABLE_ADDR 0xFC000000
36 #define PAGE_DIR_ADDR   0xFC3F0000
37 #define PAGE_CR3_ADDR   0xFC3F0FC0
38 #define TMP_CR3_ADDR    0xFC3F0FC4      // Part of core instead of temp
39 #define TMP_DIR_ADDR    0xFC3F1000      // Same
40 #define TMP_TABLE_ADDR  0xFC400000
41
42 #define HW_MAP_ADDR             0xFE000000
43 #define HW_MAP_MAX              0xFFEF0000
44 #define NUM_HW_PAGES    ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
45 #define TEMP_MAP_ADDR   0xFFEF0000      // Allows 16 "temp" pages
46 #define NUM_TEMP_PAGES  16
47 #define LAST_BLOCK_ADDR 0xFFFF0000      // Free space for kernel provided user code/ *(-1) protection
48
49 #define PF_PRESENT      0x1
50 #define PF_WRITE        0x2
51 #define PF_USER         0x4
52 #define PF_GLOBAL       0x80
53 #define PF_COW          0x200
54 #define PF_NOPAGE       0x400
55
56 #define INVLPG(addr)    __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
57
58 #define GET_TEMP_MAPPING(cr3) do { \
59         __ASM__("cli"); \
60         __AtomicTestSetLoop( (Uint *)gpTmpCR3, cr3 | 3 ); \
61 } while(0)
62 #define REL_TEMP_MAPPING() do { \
63         *gpTmpCR3 = 0; \
64         __ASM__("sti"); \
65 } while(0)
66
67 typedef Uint32  tTabEnt;
68
69 // === IMPORTS ===
70 extern char     _UsertextEnd[], _UsertextBase[];
71 extern Uint32   gaInitPageDir[1024];
72 extern Uint32   gaInitPageTable[1024];
73 extern void     Threads_SegFault(tVAddr Addr);
74 extern void     Error_Backtrace(Uint eip, Uint ebp);
75
76 // === PROTOTYPES ===
77 void    MM_PreinitVirtual(void);
78 void    MM_InstallVirtual(void);
79 void    MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
80 //void  MM_DumpTables(tVAddr Start, tVAddr End);
81 //void  MM_ClearUser(void);
82 tPAddr  MM_DuplicatePage(tVAddr VAddr);
83
84 // === GLOBALS ===
85 #define gaPageTable     ((tTabEnt*)PAGE_TABLE_ADDR)
86 #define gaPageDir       ((tTabEnt*)PAGE_DIR_ADDR)
87 #define gaTmpTable      ((tTabEnt*)TMP_TABLE_ADDR)
88 #define gaTmpDir        ((tTabEnt*)TMP_DIR_ADDR)
89 #define gpPageCR3       ((tTabEnt*)PAGE_CR3_ADDR)
90 #define gpTmpCR3        ((tTabEnt*)TMP_CR3_ADDR)
91
92 #define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
93 #define gaPAE_PageDir   ((tTabEnt*)PAE_PAGE_DIR_ADDR)
94 #define gaPAE_MainPDPT  ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
95 #define gaPAE_TmpTable  ((tTabEnt*)PAE_TMP_DIR_ADDR)
96 #define gaPAE_TmpDir    ((tTabEnt*)PAE_TMP_DIR_ADDR)
97 #define gaPAE_TmpPDPT   ((tTabEnt*)PAE_TMP_PDPT_ADDR)
98  int    gbUsePAE = 0;
99 tMutex  glTempMappings;
100 tMutex  glTempFractal;
101 Uint32  gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
102  int    giLastUsedWorker = 0;
103 struct sPageInfo {
104         void    *Node;
105         tVAddr  Base;
106         Uint64  Offset;
107          int    Length;
108          int    Flags;
109 }       *gaMappedRegions;       // sizeof = 24 bytes
110
111 // === CODE ===
112 /**
113  * \fn void MM_PreinitVirtual(void)
114  * \brief Maps the fractal mappings
115  */
116 void MM_PreinitVirtual(void)
117 {
118         gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
119         INVLPG( PAGE_TABLE_ADDR );
120 }
121
122 /**
123  * \fn void MM_InstallVirtual(void)
124  * \brief Sets up the constant page mappings
125  */
126 void MM_InstallVirtual(void)
127 {
128          int    i;
129         
130         // --- Pre-Allocate kernel tables
131         for( i = KERNEL_BASE>>22; i < 1024; i ++ )
132         {
133                 if( gaPageDir[ i ] )    continue;
134                 // Skip stack tables, they are process unique
135                 if( i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) {
136                         gaPageDir[ i ] = 0;
137                         continue;
138                 }
139                 // Preallocate table
140                 gaPageDir[ i ] = MM_AllocPhys() | 3;
141                 INVLPG( &gaPageTable[i*1024] );
142                 memset( &gaPageTable[i*1024], 0, 0x1000 );
143         }
144         
145         // Unset kernel on the User Text pages
146         for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
147                 MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
148         }
149         
150         *gpTmpCR3 = 0;
151 }
152
153 /**
154  * \brief Cleans up the SMP required mappings
155  */
156 void MM_FinishVirtualInit(void)
157 {
158         gaInitPageDir[ 0 ] = 0;
159 }
160
161 /**
162  * \fn void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
163  * \brief Called on a page fault
164  */
165 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
166 {
167         //ENTER("xAddr bErrorCode", Addr, ErrorCode);
168         
169         // -- Check for COW --
170         if( gaPageDir  [Addr>>22] & PF_PRESENT  && gaPageTable[Addr>>12] & PF_PRESENT
171          && gaPageTable[Addr>>12] & PF_COW )
172         {
173                 tPAddr  paddr;
174                 __asm__ __volatile__ ("sti");
175                 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
176                 {
177                         gaPageTable[Addr>>12] &= ~PF_COW;
178                         gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
179                 }
180                 else
181                 {
182                         //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
183                         paddr = MM_DuplicatePage( Addr );
184                         MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
185                         gaPageTable[Addr>>12] &= PF_USER;
186                         gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
187                 }
188                 
189 //              Log_Debug("MMVirt", "COW for %p (%P)", Addr, gaPageTable[Addr>>12]);
190                 
191                 INVLPG( Addr & ~0xFFF );
192                 return;
193         }
194
195         // Disable instruction tracing  
196         __ASM__("pushf; andw $0xFEFF, 0(%esp); popf");
197         Proc_GetCurThread()->bInstrTrace = 0;
198
199         // If it was a user, tell the thread handler
200         if(ErrorCode & 4) {
201                 __asm__ __volatile__ ("sti");
202                 Log_Warning("MMVirt", "User %s %s memory%s",
203                         (ErrorCode&2?"write to":"read from"),
204                         (ErrorCode&1?"bad/locked":"non-present"),
205                         (ErrorCode&16?" (Instruction Fetch)":"")
206                         );
207                 Log_Warning("MMVirt", "Instruction %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
208                 __ASM__("sti"); // Restart IRQs
209                 #if 1
210                 Error_Backtrace(Regs->eip, Regs->ebp);
211                 #endif
212                 Threads_SegFault(Addr);
213                 return ;
214         }
215         
216         Debug_KernelPanic();
217         
218         // -- Check Error Code --
219         if(ErrorCode & 8)
220                 Warning("Reserved Bits Trashed!");
221         else
222         {
223                 Warning("Kernel %s %s memory%s",
224                         (ErrorCode&2?"write to":"read from"),
225                         (ErrorCode&1?"bad/locked":"non-present"),
226                         (ErrorCode&16?" (Instruction Fetch)":"")
227                         );
228         }
229         
230         Log("CPU %i - Code at %p accessed %p", GetCPUNum(), Regs->eip, Addr);
231         // Print Stack Backtrace
232         Error_Backtrace(Regs->eip, Regs->ebp);
233
234         #if 0   
235         Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
236         if( gaPageDir[Addr>>22] & PF_PRESENT )
237                 Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
238         #endif
239         //MM_DumpTables(0, -1); 
240         
241         // Register Dump
242         Log("EAX %08x ECX %08x EDX %08x EBX %08x", Regs->eax, Regs->ecx, Regs->edx, Regs->ebx);
243         Log("ESP %08x EBP %08x ESI %08x EDI %08x", Regs->esp, Regs->ebp, Regs->esi, Regs->edi);
244         //Log("SS:ESP %04x:%08x", Regs->ss, Regs->esp);
245         Log("CS:EIP %04x:%08x", Regs->cs, Regs->eip);
246         Log("DS %04x ES %04x FS %04x GS %04x", Regs->ds, Regs->es, Regs->fs, Regs->gs);
247         {
248                 Uint    dr0, dr1;
249                 __ASM__ ("mov %%dr0, %0":"=r"(dr0):);
250                 __ASM__ ("mov %%dr1, %0":"=r"(dr1):);
251                 Log("DR0 %08x DR1 %08x", dr0, dr1);
252         }
253         
254         Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
255 }
256
257 /**
258  * \fn void MM_DumpTables(tVAddr Start, tVAddr End)
259  * \brief Dumps the layout of the page tables
260  */
261 void MM_DumpTables(tVAddr Start, tVAddr End)
262 {
263         tVAddr  rangeStart = 0;
264         tPAddr  expected = 0;
265         void    *expected_node = NULL, *tmpnode = NULL;
266         tVAddr  curPos;
267         Uint    page;
268         const tPAddr    MASK = ~0xF78;
269         
270         Start >>= 12;   End >>= 12;
271         
272         #if 0
273         Log("Directory Entries:");
274         for(page = Start >> 10;
275                 page < (End >> 10)+1;
276                 page ++)
277         {
278                 if(gaPageDir[page])
279                 {
280                         Log(" 0x%08x-0x%08x :: 0x%08x",
281                                 page<<22, ((page+1)<<22)-1,
282                                 gaPageDir[page]&~0xFFF
283                                 );
284                 }
285         }
286         #endif
287         
288         Log("Table Entries:");
289         for(page = Start, curPos = Start<<12;
290                 page < End;
291                 curPos += 0x1000, page++)
292         {
293                 if( !(gaPageDir[curPos>>22] & PF_PRESENT)
294                 ||  !(gaPageTable[page] & PF_PRESENT)
295                 ||  (gaPageTable[page] & MASK) != expected
296                 ||  (tmpnode=NULL,MM_GetPageNode(expected, &tmpnode), tmpnode != expected_node))
297                 {
298                         if(expected) {
299                                 tPAddr  orig = gaPageTable[rangeStart>>12];
300                                 Log(" 0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
301                                         rangeStart,
302                                         orig & ~0xFFF,
303                                         curPos - rangeStart,
304                                         (orig & PF_NOPAGE ? "P" : "-"),
305                                         (orig & PF_COW ? "C" : "-"),
306                                         (orig & PF_GLOBAL ? "G" : "-"),
307                                         (orig & PF_USER ? "U" : "-"),
308                                         (orig & PF_WRITE ? "W" : "-"),
309                                         expected_node
310                                         );
311                                 expected = 0;
312                         }
313                         if( !(gaPageDir[curPos>>22] & PF_PRESENT) )     continue;
314                         if( !(gaPageTable[curPos>>12] & PF_PRESENT) )   continue;
315                         
316                         expected = (gaPageTable[page] & MASK);
317                         MM_GetPageNode(expected, &expected_node);
318                         rangeStart = curPos;
319                 }
320                 if(expected)    expected += 0x1000;
321         }
322         
323         if(expected) {
324                 tPAddr  orig = gaPageTable[rangeStart>>12];
325                 Log("0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
326                         rangeStart,
327                         orig & ~0xFFF,
328                         curPos - rangeStart,
329                         (orig & PF_NOPAGE ? "p" : "-"),
330                         (orig & PF_COW ? "C" : "-"),
331                         (orig & PF_GLOBAL ? "G" : "-"),
332                         (orig & PF_USER ? "U" : "-"),
333                         (orig & PF_WRITE ? "W" : "-"),
334                         expected_node
335                         );
336                 expected = 0;
337         }
338 }
339
340 /**
341  * \fn tPAddr MM_Allocate(tVAddr VAddr)
342  */
343 tPAddr MM_Allocate(tVAddr VAddr)
344 {
345         tPAddr  paddr;
346         //ENTER("xVAddr", VAddr);
347         //__ASM__("xchg %bx,%bx");
348         // Check if the directory is mapped
349         if( gaPageDir[ VAddr >> 22 ] == 0 )
350         {
351                 // Allocate directory
352                 paddr = MM_AllocPhys();
353                 if( paddr == 0 ) {
354                         Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
355                         //LEAVE('i',0);
356                         return 0;
357                 }
358                 // Map and mark as user (if needed)
359                 gaPageDir[ VAddr >> 22 ] = paddr | 3;
360                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
361                 
362                 INVLPG( &gaPageDir[ VAddr >> 22 ] );
363                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
364         }
365         // Check if the page is already allocated
366         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
367                 Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
368                 //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
369                 return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
370         }
371         
372         // Allocate
373         paddr = MM_AllocPhys();
374         //LOG("paddr = 0x%llx", paddr);
375         if( paddr == 0 ) {
376                 Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
377                         VAddr, __builtin_return_address(0));
378                 //LEAVE('i',0);
379                 return 0;
380         }
381         // Map
382         gaPageTable[ VAddr >> 12 ] = paddr | 3;
383         // Mark as user
384         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
385         // Invalidate Cache for address
386         INVLPG( VAddr & ~0xFFF );
387         
388         //LEAVE('X', paddr);
389         return paddr;
390 }
391
392 /**
393  * \fn void MM_Deallocate(tVAddr VAddr)
394  */
395 void MM_Deallocate(tVAddr VAddr)
396 {
397         if( gaPageDir[ VAddr >> 22 ] == 0 ) {
398                 Warning("MM_Deallocate - Directory not mapped");
399                 return;
400         }
401         
402         if(gaPageTable[ VAddr >> 12 ] == 0) {
403                 Warning("MM_Deallocate - Page is not allocated");
404                 return;
405         }
406         
407         // Dereference page
408         MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
409         // Clear page
410         gaPageTable[ VAddr >> 12 ] = 0;
411 }
412
413 /**
414  * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
415  * \brief Checks if the passed address is accesable
416  */
417 tPAddr MM_GetPhysAddr(const void *Addr)
418 {
419         tVAddr  addr = (tVAddr)Addr;
420         if( !(gaPageDir[addr >> 22] & 1) )
421                 return 0;
422         if( !(gaPageTable[addr >> 12] & 1) )
423                 return 0;
424         return (gaPageTable[addr >> 12] & ~0xFFF) | (addr & 0xFFF);
425 }
426
427 /**
428  * \fn void MM_SetCR3(Uint CR3)
429  * \brief Sets the current process space
430  */
431 void MM_SetCR3(Uint CR3)
432 {
433         __ASM__("mov %0, %%cr3"::"r"(CR3));
434 }
435
436 /**
437  * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
438  * \brief Map a physical page to a virtual one
439  */
440 int MM_Map(tVAddr VAddr, tPAddr PAddr)
441 {
442         //ENTER("xVAddr xPAddr", VAddr, PAddr);
443         // Sanity check
444         if( PAddr & 0xFFF || VAddr & 0xFFF ) {
445                 Log_Warning("MM_Virt", "MM_Map - Physical or Virtual Addresses are not aligned (0x%P and %p)",
446                         PAddr, VAddr);
447                 //LEAVE('i', 0);
448                 return 0;
449         }
450         
451         // Align addresses
452         PAddr &= ~0xFFF;        VAddr &= ~0xFFF;
453         
454         // Check if the directory is mapped
455         if( gaPageDir[ VAddr >> 22 ] == 0 )
456         {
457                 tPAddr  tmp = MM_AllocPhys();
458                 if( tmp == 0 )
459                         return 0;
460                 gaPageDir[ VAddr >> 22 ] = tmp | 3;
461                 
462                 // Mark as user
463                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
464                 
465                 INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
466                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
467         }
468         // Check if the page is already allocated
469         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
470                 Warning("MM_Map - Allocating to used address");
471                 //LEAVE('i', 0);
472                 return 0;
473         }
474         
475         // Map
476         gaPageTable[ VAddr >> 12 ] = PAddr | 3;
477         // Mark as user
478         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
479         
480         //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
481         //      VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
482         
483         // Reference
484         MM_RefPhys( PAddr );
485         
486         //LOG("INVLPG( 0x%x )", VAddr);
487         INVLPG( VAddr );
488         
489         //LEAVE('i', 1);
490         return 1;
491 }
492
493 /**
494  * \brief Clear user's address space
495  */
496 void MM_ClearUser(void)
497 {
498         Uint    i, j;
499         
500         for( i = 0; i < (MM_USER_MAX>>22); i ++ )
501         {
502                 // Check if directory is not allocated
503                 if( !(gaPageDir[i] & PF_PRESENT) ) {
504                         gaPageDir[i] = 0;
505                         continue;
506                 }
507                 
508                 // Deallocate tables
509                 for( j = 0; j < 1024; j ++ )
510                 {
511                         if( gaPageTable[i*1024+j] & 1 )
512                                 MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF );
513                         gaPageTable[i*1024+j] = 0;
514                 }
515                 
516                 // Deallocate directory
517                 MM_DerefPhys( gaPageDir[i] & ~0xFFF );
518                 gaPageDir[i] = 0;
519                 INVLPG( &gaPageTable[i*1024] );
520         }
521         INVLPG( gaPageDir );
522 }
523
524 /**
525  * \brief Deallocate an address space
526  */
527 void MM_ClearSpace(Uint32 CR3)
528 {
529          int    i, j;
530         
531         if(CR3 == (*gpPageCR3 & ~0xFFF)) {
532                 Log_Error("MMVirt", "Can't clear current address space");
533                 return ;
534         }
535
536         if( MM_GetRefCount(CR3) > 1 ) {
537                 MM_DerefPhys(CR3);
538                 Log_Log("MMVirt", "CR3 %P is still referenced, not cleaning (but dereferenced)", CR3);
539                 return ;
540         }
541
542         Log_Debug("MMVirt", "Clearing out address space 0x%x from 0x%x", CR3, *gpPageCR3);
543         
544         GET_TEMP_MAPPING(CR3);
545         INVLPG( gaTmpDir );
546
547         for( i = 0; i < 1024; i ++ )
548         {
549                 Uint32  *table = &gaTmpTable[i*1024];
550                 if( !(gaTmpDir[i] & PF_PRESENT) )
551                         continue ;
552
553                 INVLPG( table );        
554
555                 if( i < 768 || (i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) )
556                 {
557                         for( j = 0; j < 1024; j ++ )
558                         {
559                                 if( !(table[j] & 1) )
560                                         continue;
561                                 MM_DerefPhys( table[j] & ~0xFFF );
562                         }
563                 }
564
565                 if( i != (PAGE_TABLE_ADDR >> 22) )
566                 {               
567                         MM_DerefPhys( gaTmpDir[i] & ~0xFFF );
568                 }
569         }
570
571
572         MM_DerefPhys( CR3 );
573
574         REL_TEMP_MAPPING();
575 }
576
577 /**
578  * \fn tPAddr MM_Clone(void)
579  * \brief Clone the current address space
580  */
581 tPAddr MM_Clone(int bNoUserCopy)
582 {
583         Uint    i, j;
584         tPAddr  ret;
585         Uint    page = 0;
586         tVAddr  kStackBase = Proc_GetCurThread()->KernelStack - MM_KERNEL_STACK_SIZE;
587         void    *tmp;
588         
589         // Create Directory Table
590         ret = MM_AllocPhys();
591         if( ret == 0 ) {
592                 return 0;
593         }
594         
595         // Map
596         GET_TEMP_MAPPING( ret );
597         INVLPG( gaTmpDir );
598         memsetd( gaTmpDir, 0, 1024 );
599         
600         if( Threads_GetPID() != 0 && !bNoUserCopy )
601         {       
602                 // Copy Tables
603                 for( i = 0; i < 768; i ++)
604                 {
605                         // Check if table is allocated
606                         if( !(gaPageDir[i] & PF_PRESENT) ) {
607                                 gaTmpDir[i] = 0;
608                                 page += 1024;
609                                 continue;
610                         }
611                         
612                         // Allocate new table
613                         gaTmpDir[i] = MM_AllocPhys() | (gaPageDir[i] & 7);
614                         INVLPG( &gaTmpTable[page] );
615                         // Fill
616                         for( j = 0; j < 1024; j ++, page++ )
617                         {
618                                 if( !(gaPageTable[page] & PF_PRESENT) ) {
619                                         gaTmpTable[page] = 0;
620                                         continue;
621                                 }
622                                 
623                                 // Refrence old page
624                                 MM_RefPhys( gaPageTable[page] & ~0xFFF );
625                                 // Add to new table
626                                 if(gaPageTable[page] & PF_WRITE) {
627                                         gaTmpTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
628                                         gaPageTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
629                                         INVLPG( page << 12 );
630                                 }
631                                 else
632                                         gaTmpTable[page] = gaPageTable[page];
633                         }
634                 }
635         }
636         
637         // Map in kernel tables (and make fractal mapping)
638         for( i = 768; i < 1024; i ++ )
639         {
640                 // Fractal
641                 if( i == (PAGE_TABLE_ADDR >> 22) ) {
642                         gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
643                         continue;
644                 }
645                 if( i == (TMP_TABLE_ADDR >> 22) ) {
646                         gaTmpDir[ TMP_TABLE_ADDR >> 22 ] = 0;
647                         continue ;
648                 }
649                 
650                 if( gaPageDir[i] == 0 ) {
651                         gaTmpDir[i] = 0;
652                         continue;
653                 }
654                 
655                 //LOG("gaPageDir[%x/4] = 0x%x", i*4, gaPageDir[i]);
656                 MM_RefPhys( gaPageDir[i] & ~0xFFF );
657                 gaTmpDir[i] = gaPageDir[i];
658         }
659         
660         // Allocate kernel stack
661         for(i = MM_KERNEL_STACKS >> 22; i < MM_KERNEL_STACKS_END >> 22; i ++ )
662         {
663                 // Check if directory is allocated
664                 if( (gaPageDir[i] & 1) == 0 ) {
665                         gaTmpDir[i] = 0;
666                         continue;
667                 }               
668                 
669                 // We don't care about other kernel stacks, just the current one
670                 if( i != kStackBase >> 22 ) {
671                         MM_DerefPhys( gaPageDir[i] & ~0xFFF );
672                         gaTmpDir[i] = 0;
673                         continue;
674                 }
675                 
676                 // Create a copy
677                 gaTmpDir[i] = MM_AllocPhys() | 3;
678                 INVLPG( &gaTmpTable[i*1024] );
679                 for( j = 0; j < 1024; j ++ )
680                 {
681                         // Is the page allocated? If not, skip
682                         if( !(gaPageTable[i*1024+j] & 1) ) {
683                                 gaTmpTable[i*1024+j] = 0;
684                                 continue;
685                         }
686                         
687                         // We don't care about other kernel stacks
688                         if( ((i*1024+j)*4096 & ~(MM_KERNEL_STACK_SIZE-1)) != kStackBase ) {
689                                 gaTmpTable[i*1024+j] = 0;
690                                 continue;
691                         }
692                         
693                         // Allocate page
694                         gaTmpTable[i*1024+j] = MM_AllocPhys() | 3;
695                         
696                         MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF );
697                         
698                         tmp = MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF );
699                         memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 );
700                         MM_FreeTemp( tmp );
701                 }
702         }
703         
704         REL_TEMP_MAPPING();
705         
706         //LEAVE('x', ret);
707         return ret;
708 }
709
710 /**
711  * \fn tVAddr MM_NewKStack(void)
712  * \brief Create a new kernel stack
713  */
714 tVAddr MM_NewKStack(void)
715 {
716         tVAddr  base;
717         Uint    i;
718         for(base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE)
719         {
720                 // Check if space is free
721                 if(MM_GetPhysAddr( (void*) base) != 0)
722                         continue;
723                 // Allocate
724                 //for(i = MM_KERNEL_STACK_SIZE; i -= 0x1000 ; )
725                 for(i = 0; i < MM_KERNEL_STACK_SIZE; i += 0x1000 )
726                 {
727                         if( MM_Allocate(base+i) == 0 )
728                         {
729                                 // On error, print a warning and return error
730                                 Warning("MM_NewKStack - Out of memory");
731                                 // - Clean up
732                                 //for( i += 0x1000 ; i < MM_KERNEL_STACK_SIZE; i += 0x1000 )
733                                 //      MM_Deallocate(base+i);
734                                 return 0;
735                         }
736                 }
737                 // Success
738 //              Log("MM_NewKStack - Allocated %p", base + MM_KERNEL_STACK_SIZE);
739                 return base+MM_KERNEL_STACK_SIZE;
740         }
741         // No stacks left
742         Log_Warning("MMVirt", "MM_NewKStack - No address space left");
743         return 0;
744 }
745
746 /**
747  * \fn tVAddr MM_NewWorkerStack()
748  * \brief Creates a new worker stack
749  */
750 tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize)
751 {
752         Uint    base, addr;
753         tVAddr  tmpPage;
754         tPAddr  page;
755         
756         // TODO: Thread safety
757         // Find a free worker stack address
758         for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++)
759         {
760                 // Used block
761                 if( gWorkerStacks[base/32] == -1 ) {
762                         base += 31;     base &= ~31;
763                         base --;        // Counteracted by the base++
764                         continue;
765                 }
766                 // Used stack
767                 if( gWorkerStacks[base/32] & (1 << base) ) {
768                         continue;
769                 }
770                 break;
771         }
772         if(base >= NUM_WORKER_STACKS) {
773                 Warning("Uh-oh! Out of worker stacks");
774                 return 0;
775         }
776         
777         // It's ours now!
778         gWorkerStacks[base/32] |= (1 << base);
779         // Make life easier for later calls
780         giLastUsedWorker = base;
781         // We have one
782         base = WORKER_STACKS + base * WORKER_STACK_SIZE;
783         //Log(" MM_NewWorkerStack: base = 0x%x", base);
784         
785         // Set the temp fractals to TID0's address space
786         GET_TEMP_MAPPING( ((Uint)gaInitPageDir - KERNEL_BASE) );
787         INVLPG( gaTmpDir );
788         
789         // Check if the directory is mapped (we are assuming that the stacks
790         // will fit neatly in a directory)
791         //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
792         if(gaTmpDir[ base >> 22 ] == 0) {
793                 gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3;
794                 INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] );
795         }
796         
797         // Mapping Time!
798         for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 )
799         {
800                 page = MM_AllocPhys();
801                 gaTmpTable[ (base + addr) >> 12 ] = page | 3;
802         }
803
804         // Release temporary fractal
805         REL_TEMP_MAPPING();
806
807         // NOTE: Max of 1 page
808         // `page` is the last allocated page from the previious for loop
809         tmpPage = (tVAddr)MM_MapTemp( page );
810         memcpy( (void*)( tmpPage + (0x1000 - ContentsSize) ), StackContents, ContentsSize);
811         MM_FreeTemp( (void*)tmpPage );
812         
813         //Log("MM_NewWorkerStack: RETURN 0x%x", base);
814         return base + WORKER_STACK_SIZE;
815 }
816
817 /**
818  * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
819  * \brief Sets the flags on a page
820  */
821 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
822 {
823         tTabEnt *ent;
824         if( !(gaPageDir[VAddr >> 22] & 1) )     return ;
825         if( !(gaPageTable[VAddr >> 12] & 1) )   return ;
826         
827         ent = &gaPageTable[VAddr >> 12];
828         
829         // Read-Only
830         if( Mask & MM_PFLAG_RO )
831         {
832                 if( Flags & MM_PFLAG_RO ) {
833                         *ent &= ~PF_WRITE;
834                 }
835                 else {
836                         gaPageDir[VAddr >> 22] |= PF_WRITE;
837                         *ent |= PF_WRITE;
838                 }
839         }
840         
841         // Kernel
842         if( Mask & MM_PFLAG_KERNEL )
843         {
844                 if( Flags & MM_PFLAG_KERNEL ) {
845                         *ent &= ~PF_USER;
846                 }
847                 else {
848                         gaPageDir[VAddr >> 22] |= PF_USER;
849                         *ent |= PF_USER;
850                 }
851         }
852         
853         // Copy-On-Write
854         if( Mask & MM_PFLAG_COW )
855         {
856                 if( Flags & MM_PFLAG_COW ) {
857                         *ent &= ~PF_WRITE;
858                         *ent |= PF_COW;
859                 }
860                 else {
861                         *ent &= ~PF_COW;
862                         *ent |= PF_WRITE;
863                 }
864         }
865         
866         //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
867         //      *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
868 }
869
870 /**
871  * \brief Get the flags on a page
872  */
873 Uint MM_GetFlags(tVAddr VAddr)
874 {
875         tTabEnt *ent;
876         Uint    ret = 0;
877         
878         // Validity Check
879         if( !(gaPageDir[VAddr >> 22] & 1) )     return 0;
880         if( !(gaPageTable[VAddr >> 12] & 1) )   return 0;
881         
882         ent = &gaPageTable[VAddr >> 12];
883         
884         // Read-Only
885         if( !(*ent & PF_WRITE) )        ret |= MM_PFLAG_RO;
886         // Kernel
887         if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
888         // Copy-On-Write
889         if( *ent & PF_COW )     ret |= MM_PFLAG_COW;
890         
891         return ret;
892 }
893
894 /**
895  * \brief Check if the provided buffer is valid
896  * \return Boolean valid
897  */
898 int MM_IsValidBuffer(tVAddr Addr, size_t Size)
899 {
900          int    bIsUser;
901          int    dir, tab;
902
903         Size += Addr & (PAGE_SIZE-1);
904         Addr &= ~(PAGE_SIZE-1);
905
906         dir = Addr >> 22;
907         tab = Addr >> 12;
908         
909 //      Debug("Addr = %p, Size = 0x%x, dir = %i, tab = %i", Addr, Size, dir, tab);
910
911         if( !(gaPageDir[dir] & 1) )     return 0;
912         if( !(gaPageTable[tab] & 1) )   return 0;
913         
914         bIsUser = !!(gaPageTable[tab] & PF_USER);
915
916         while( Size >= PAGE_SIZE )
917         {
918                 if( (tab & 1023) == 0 )
919                 {
920                         dir ++;
921                         if( !(gaPageDir[dir] & 1) )     return 0;
922                 }
923                 
924                 if( !(gaPageTable[tab] & 1) )   return 0;
925                 if( bIsUser && !(gaPageTable[tab] & PF_USER) )  return 0;
926
927                 tab ++;
928                 Size -= PAGE_SIZE;
929         }
930         return 1;
931 }
932
933 /**
934  * \fn tPAddr MM_DuplicatePage(tVAddr VAddr)
935  * \brief Duplicates a virtual page to a physical one
936  */
937 tPAddr MM_DuplicatePage(tVAddr VAddr)
938 {
939         tPAddr  ret;
940         void    *temp;
941          int    wasRO = 0;
942         
943         //ENTER("xVAddr", VAddr);
944         
945         // Check if mapped
946         if( !(gaPageDir  [VAddr >> 22] & PF_PRESENT) )  return 0;
947         if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) )  return 0;
948         
949         // Page Align
950         VAddr &= ~0xFFF;
951         
952         // Allocate new page
953         ret = MM_AllocPhys();
954         if( !ret ) {
955                 return 0;
956         }
957         
958         // Write-lock the page (to keep data constistent), saving its R/W state
959         wasRO = (gaPageTable[VAddr >> 12] & PF_WRITE ? 0 : 1);
960         gaPageTable[VAddr >> 12] &= ~PF_WRITE;
961         INVLPG( VAddr );
962         
963         // Copy Data
964         temp = MM_MapTemp(ret);
965         memcpy( temp, (void*)VAddr, 0x1000 );
966         MM_FreeTemp(temp);
967         
968         // Restore Writeable status
969         if(!wasRO)      gaPageTable[VAddr >> 12] |= PF_WRITE;
970         INVLPG(VAddr);
971         
972         //LEAVE('X', ret);
973         return ret;
974 }
975
976 /**
977  * \fn Uint MM_MapTemp(tPAddr PAddr)
978  * \brief Create a temporary memory mapping
979  * \todo Show Luigi Barone (C Lecturer) and see what he thinks
980  */
981 void * MM_MapTemp(tPAddr PAddr)
982 {
983          int    i;
984         
985         //ENTER("XPAddr", PAddr);
986         
987         PAddr &= ~0xFFF;
988         
989         //LOG("glTempMappings = %i", glTempMappings);
990         
991         for(;;)
992         {
993                 Mutex_Acquire( &glTempMappings );
994                 
995                 for( i = 0; i < NUM_TEMP_PAGES; i ++ )
996                 {
997                         // Check if page used
998                         if(gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] & 1)        continue;
999                         // Mark as used
1000                         gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
1001                         INVLPG( TEMP_MAP_ADDR + (i << 12) );
1002                         //LEAVE('p', TEMP_MAP_ADDR + (i << 12));
1003                         Mutex_Release( &glTempMappings );
1004                         return (void*)( TEMP_MAP_ADDR + (i << 12) );
1005                 }
1006                 Mutex_Release( &glTempMappings );
1007                 Threads_Yield();        // TODO: Use a sleep queue here instead
1008         }
1009 }
1010
1011 /**
1012  * \fn void MM_FreeTemp(tVAddr PAddr)
1013  * \brief Free's a temp mapping
1014  */
1015 void MM_FreeTemp(void *VAddr)
1016 {
1017          int    i = (tVAddr)VAddr >> 12;
1018         //ENTER("xVAddr", VAddr);
1019         
1020         if(i >= (TEMP_MAP_ADDR >> 12))
1021                 gaPageTable[ i ] = 0;
1022         
1023         //LEAVE('-');
1024 }
1025
1026 /**
1027  * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
1028  * \brief Allocates a contigous number of pages
1029  */
1030 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
1031 {
1032          int    i, j;
1033         
1034         PAddr &= ~0xFFF;
1035         
1036         // Scan List
1037         for( i = 0; i < NUM_HW_PAGES; i ++ )
1038         {               
1039                 // Check if addr used
1040                 if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 )
1041                         continue;
1042                 
1043                 // Check possible region
1044                 for( j = 0; j < Number && i + j < NUM_HW_PAGES; j ++ )
1045                 {
1046                         // If there is an allocated page in the region we are testing, break
1047                         if( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & 1 )    break;
1048                 }
1049                 // Is it all free?
1050                 if( j == Number )
1051                 {
1052                         // Allocate
1053                         for( j = 0; j < Number; j++ ) {
1054                                 MM_RefPhys( PAddr + (j<<12) );
1055                                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3;
1056                         }
1057                         return HW_MAP_ADDR + (i<<12);
1058                 }
1059         }
1060         // If we don't find any, return NULL
1061         return 0;
1062 }
1063
1064 /**
1065  * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1066  * \brief Allocates DMA physical memory
1067  * \param Pages Number of pages required
1068  * \param MaxBits       Maximum number of bits the physical address can have
1069  * \param PhysAddr      Pointer to the location to place the physical address allocated
1070  * \return Virtual address allocate
1071  */
1072 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1073 {
1074         tPAddr  phys;
1075         tVAddr  ret;
1076         
1077         ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr);
1078         
1079         if(MaxBits == -1)
1080                 MaxBits = PHYS_BITS;
1081         
1082         // Sanity Check
1083         if(MaxBits < 12) {
1084                 LEAVE('i', 0);
1085                 return 0;
1086         }
1087         
1088         // Fast Allocate
1089         if(Pages == 1 && MaxBits >= PHYS_BITS)
1090         {
1091                 phys = MM_AllocPhys();
1092                 if( PhysAddr )
1093                         *PhysAddr = phys;
1094                 if( !phys ) {
1095                         LEAVE_RET('i', 0);
1096                 }
1097                 ret = MM_MapHWPages(phys, 1);
1098                 if(ret == 0) {
1099                         MM_DerefPhys(phys);
1100                         LEAVE('i', 0);
1101                         return 0;
1102                 }
1103                 LEAVE('x', ret);
1104                 return ret;
1105         }
1106         
1107         // Slow Allocate
1108         phys = MM_AllocPhysRange(Pages, MaxBits);
1109         // - Was it allocated?
1110         if(phys == 0) {
1111                 LEAVE('i', 0);
1112                 return 0;
1113         }
1114         
1115         // Allocated successfully, now map
1116         ret = MM_MapHWPages(phys, Pages);
1117         if( ret == 0 ) {
1118                 // If it didn't map, free then return 0
1119                 for(;Pages--;phys+=0x1000)
1120                         MM_DerefPhys(phys);
1121                 LEAVE('i', 0);
1122                 return 0;
1123         }
1124         
1125         if( PhysAddr )
1126                 *PhysAddr = phys;
1127         LEAVE('x', ret);
1128         return ret;
1129 }
1130
1131 /**
1132  * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1133  * \brief Unmap a hardware page
1134  */
1135 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1136 {
1137          int    i, j;
1138         
1139         //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
1140         
1141         // Sanity Check
1142         if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX)     return;
1143         
1144         i = VAddr >> 12;
1145         
1146         Mutex_Acquire( &glTempMappings );       // Temp and HW share a directory, so they share a lock
1147         
1148         for( j = 0; j < Number; j++ )
1149         {
1150                 MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
1151                 gaPageTable[ i + j ] = 0;
1152                 INVLPG( (tVAddr)(i+j) << 12 );
1153         }
1154         
1155         Mutex_Release( &glTempMappings );
1156 }
1157

UCC git Repository :: git.ucc.asn.au